blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d48b98c5bc5c10dfb688d580ddef94ddd8f71891
|
df015a639d78b2c5c40463356c93d67effca2f4c
|
/algo/ten.py
|
e759915d1597f9d54aec23ceb371b09ae5be835c
|
[] |
no_license
|
opklnm102/python-tutorial
|
5b53cd234b1b519230776cf4308ff1c41f62a2ae
|
cb400d5e318c59d42703ce1f8a12c5693bea07c8
|
refs/heads/master
| 2020-12-29T02:38:32.172485
| 2017-03-31T23:34:12
| 2017-03-31T23:34:12
| 48,275,041
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
#문장에서 알파벳 갯수 카운트
string = str(input("input string; "))
alpha = str(input("input search alphabet: "))
count = 0
for i in string:
if i == alpha:
count += 1
print(count)
|
[
"opklnm102@gmail.com"
] |
opklnm102@gmail.com
|
a882e8c2d54e99f64a919245322746ff4a2e8004
|
5ee0f0f8d96f6666f4f0a065c754e9469924d2d4
|
/xyz_web/contributions/views.py
|
7ab84f7447599825f60357220fcbfbe6153cb633
|
[
"MIT"
] |
permissive
|
amunso/xyz_web
|
81e1533fd403ba45064a6de5c33593113a11d29c
|
756e217c55ccbd22be8de8228f758cdb5581227a
|
refs/heads/master
| 2022-12-20T10:37:17.551520
| 2020-09-23T17:34:07
| 2020-09-23T17:34:07
| 298,033,987
| 0
| 0
| null | 2020-09-23T16:45:16
| 2020-09-23T16:45:15
| null |
UTF-8
|
Python
| false
| false
| 2,437
|
py
|
from django.forms import ModelForm, ModelChoiceField, RadioSelect, CheckboxSelectMultiple, Form, ModelMultipleChoiceField
from django.views.generic.list import ListView
from django.views.generic.edit import CreateView, FormView
from .models import Contribution, Vote
from django.utils.html import mark_safe
from django.urls import reverse_lazy
from django.shortcuts import redirect
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth import logout
def confirm(request, token):
# TODO: handle object not found
Vote.objects.get(confirmation_token=token).confirm()
return redirect('vote')
class MainView(ListView):
model = Contribution
class ContributionPlayerLabelMixin:
def label_from_instance(self, obj):
return mark_safe(obj.video_player())
class RadioSelectCustomOption(RadioSelect):
"""RadioSelect widget where order of label and input in template is reversed."""
option_template_name = 'contributions/input_option_reversed.html'
class VoteChoiceField(ContributionPlayerLabelMixin, ModelChoiceField):
widget = RadioSelectCustomOption
def __init__(self, *args, **kwargs):
# TODO: There has to be a more elegant way to to this.
# This is a hack.
kwargs['queryset'] = kwargs['queryset'].filter(approved=True)
return super().__init__(*args, **kwargs)
class ContributionApproveField(ContributionPlayerLabelMixin, ModelMultipleChoiceField):
widget = CheckboxSelectMultiple
class VoteForm(ModelForm):
class Meta:
model = Vote
fields = ['username', 'contribution']
field_classes = {'contribution': VoteChoiceField}
class VoteView(CreateView):
form_class = VoteForm
success_url = reverse_lazy('vote')
model = Vote
class ContributionApproveForm(Form):
contributions = ContributionApproveField(
queryset=Contribution.objects.filter(approved=False))
class ApproveContributionView(LoginRequiredMixin, FormView):
form_class = ContributionApproveForm
success_url = "/approve/"
template_name = "contributions/contribution_approve.html"
def form_valid(self, form):
approved_contributions = form.cleaned_data['contributions']
for contribution in approved_contributions:
contribution.approved = True
contribution.save()
# Log out user
logout(self.request)
return super().form_valid(form)
|
[
"thorvalb@stud.ntnu.no"
] |
thorvalb@stud.ntnu.no
|
7229d4da19911690a42c9a6785e092cf7365c860
|
818325a5f0d7c2a105e81ce67bc9b8608b835f03
|
/app.py
|
8f19ac7cfb635f27d48260be767be5565e2f0e5c
|
[] |
no_license
|
seladb/simple-analytics2
|
134121e1195e6539ff7106637e8eadb12e687a13
|
ec8d71af9785c6634ddabe9366ff75f4c64ff7b6
|
refs/heads/master
| 2023-05-08T14:15:23.167377
| 2021-06-02T01:12:53
| 2021-06-02T01:12:53
| 372,994,564
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 950
|
py
|
import logging
# https://docs.python.org/3/library/queue.html
import threading
import queue
from flask import Flask, request
from flask.logging import default_handler
from services.enrich_geo_location import EnrichGeoLocation
from services.request_anomaly_ranking import AnomalyRanking
from services.db_service import DBService
from common.record import Record
app = Flask(__name__)
root = logging.getLogger()
root.setLevel(logging.INFO)
root.addHandler(default_handler)
@app.route('/')
def serve_requests():
enrich_geo = EnrichGeoLocation()
enriched_data = enrich_geo.enrich(request.remote_addr)
anomaly_rank = AnomalyRanking()
request_rank = anomaly_rank.rank_request(request.headers)
db_service = DBService()
db_service.write_to_db(
Record(
geo_location=enriched_data, rank=request_rank
)
)
return "Hello world"
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
|
[
"pcapplusplus@gmail.com"
] |
pcapplusplus@gmail.com
|
6f1f73cfc55c33a0424408de4d246780a15a3935
|
d4ea02450749cb8db5d8d557a4c2616308b06a45
|
/students/luyao_xu/lesson05/mailroom3.py
|
f320ecd0b76e8fb1819cb61f48cc0ffa9f73bbc0
|
[] |
no_license
|
UWPCE-PythonCert-ClassRepos/Self_Paced-Online
|
75421a5bdd6233379443fc310da866ebfcd049fe
|
e298b1151dab639659d8dfa56f47bcb43dd3438f
|
refs/heads/master
| 2021-06-16T15:41:07.312247
| 2019-07-17T16:02:47
| 2019-07-17T16:02:47
| 115,212,391
| 13
| 160
| null | 2019-11-13T16:07:35
| 2017-12-23T17:52:41
|
Python
|
UTF-8
|
Python
| false
| false
| 4,972
|
py
|
#!/Library/Frameworks/Python.framework/Versions/3.7/bin/python3
from pathlib import Path
def initialize_db():
don_1 = {'name': 'Amy Walker', 'donation amount': [18900.90, 4500]}
don_2 = {'name': 'Peter Thomson', 'donation amount': [9999.99, 500, 6000.88]}
don_3 = {'name': 'July Jensen', 'donation amount': [5300.00, 200]}
don_4 = {'name': 'Paul Allen', 'donation amount': [320.57]}
don_5 = {'name': 'Jenny Palmer', 'donation amount': [66.89]}
data = [don_1, don_2, don_3, don_4, don_5]
return data
donations = initialize_db()
def donate_amount(donor, donation_amount):
donation_list = donor['donation amount']
donation_list.append(donation_amount)
donor['donation amount'] = donation_list
return donor
def donate_sum(donor):
return sum(donor['donation amount'])
def donate(donor):
# Once a name has been selected, prompt for a donation amount
try:
donation_amount = float(input('Enter the amount you want to donate: $'))
donor = donate_amount(donor, donation_amount)
while True:
more = input('Is there another gift amount you want to enter? (y/n) ')
if more[0].lower() == 'y':
donation_amount = float(input('That\'s so nice of you! Enter the amount you want to donate: $'))
# Once an amount has been given, add that amount to the donation history of the selected user
donor = donate_amount(donor, donation_amount)
# Use string formatting to compose an email thanking the donor for their generous donation.
elif more.lower() == 'n':
print(letter(donor))
break
except ValueError:
print("please enter a valid number")
donate(donor)
return donor
def create_donor(name):
return {'name': name, 'donation amount': []}
def send_thank():
while True:
# If the user types ‘list’, show them a list of the donor names and re-prompt
user_input = input(
'Please enter a full name, type "list" to see the list of donors, type "back" to go back: ')
if user_input.lower() == 'list':
for name in donations:
print(name['name'])
continue
elif user_input.lower() == 'back':
# back to the main menu
return
# If the user types a name in the list, use it.
elif user_input.lower() in [d['name'].lower() for d in donations]:
for d in donations:
if d['name'] == user_input:
donate(d)
# exit for loop on name match
break
else:
# If the user types a name not in the list, add that name to the data structure and use it
d = create_donor(user_input)
donations.append(donate(d))
break
def sort_total_given(k):
return sum(k['donation amount'])
def report_string(d):
result = '{:<26} | {:^11} | {:^8} | {:>12}\n'.format('Donor Name', 'Total Given', 'Num Gifts', 'Average Gift')
result += '-' * len(result) + '\n'
for i in d:
name = i['name']
total_donation = sum(i['donation amount'])
gift_number = len(i['donation amount'])
average_donation = total_donation / gift_number
result += '{:<27} ${:>11.2f} {:>12} ${:>13.2f}\n'.format(name, total_donation, gift_number, average_donation)
return result
def create_report():
donations.sort(key=sort_total_given, reverse=True)
# print a list of your donors, sorted by total historical donation amount
print(report_string(donations))
def letter(donor):
letter = 'Dear {}, we want to thank you for your total donation amount of ${}. Have a nice day!'.format(
donor['name'], donate_sum(donor))
return letter
def send_to_everyone():
for donor in donations:
filename = Path('./letters/' + donor['name'] + '.txt')
filename.parent.mkdir(parents=True, exist_ok=True)
with filename.open('w') as outfile:
outfile.write(letter(donor))
def menu_selection(prompt, dispatch_dict):
while True:
response = input(prompt)
result = dispatch_dict[response]()
if result == "exit":
break
# quote menu function
def quit_program():
print('Bye.')
return "exit"
if __name__ == "__main__":
main_prompt = ("Please choose one of the actions in menu:\n1. Send a Thank you.\n2. Create a report.\n"
"3. Send thank you letter to everyone.\n4. Quit.\nEnter the number of the action you want to make: ")
main_dispatch = {'1': send_thank,
'2': create_report,
'3': send_to_everyone,
'4': quit_program}
try:
menu_selection(main_prompt, main_dispatch)
except KeyError:
print("The action you take is not valid.")
menu_selection(main_prompt, main_dispatch)
|
[
"xuluyao19931213@gmail.com"
] |
xuluyao19931213@gmail.com
|
c80f65a316cce749b4402378910cf0b906c8d8fa
|
5d2a2887bafa327492022fc0826738cc2d3dbae0
|
/day_01.py
|
31651d03616bdc7ff6b183dffed597696ff81db9
|
[] |
no_license
|
mullevik/advent-of-code-2019
|
c32a09a8674bef4f828f4cc0d3624b714c21a16a
|
876ceb350db7e0241015740e093b99cd8d7a7485
|
refs/heads/master
| 2020-09-22T12:30:27.159006
| 2019-12-13T19:43:38
| 2019-12-13T19:43:38
| 225,195,119
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 547
|
py
|
import fileinput
def calculate_fuel(mass: int) -> int:
return (mass // 3) - 2
if __name__ == "__main__":
total_fuel_requirement = 0
for line in fileinput.input():
module_mass = int(line)
fuel_requirements = []
fuel_requirement = calculate_fuel(module_mass)
while fuel_requirement > 0:
fuel_requirements.append(fuel_requirement)
fuel_requirement = calculate_fuel(fuel_requirement)
total_fuel_requirement += sum(fuel_requirements)
print(total_fuel_requirement)
|
[
"muller.viktor.muller@gmail.com"
] |
muller.viktor.muller@gmail.com
|
3e122dfa2ff830a984a694cea5458b47c7620559
|
1e74402cd0db4e283e9b274f59035d98344cc83c
|
/setup.py
|
eab885a3b4b7911e77ee3a7e6a2781c36909c3fa
|
[
"MIT"
] |
permissive
|
MarkMurillo/python_ctype_structure_example
|
025d7f8c38b21333dc0a2bc287f92691adb8dd9a
|
9e889cc4cbdeab8433c396262f086071bb961e13
|
refs/heads/master
| 2020-03-13T21:49:37.620364
| 2018-10-14T18:59:58
| 2018-10-14T18:59:58
| 131,304,851
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
""" setup.py
Builds the test c-extension python module.
Author: Mark Murillo
"""
from distutils.core import setup, Extension
testModule = Extension(
'testMod',
include_dir=[],
libraries=[],
library_dirs=[],
sources=['test.c']
)
setup(
name='testMod',
version='1.0',
description='A library to test the c extension capabilities',
ext_modules=[testModule]
)
|
[
"mark.sj.murillo@gmail.com"
] |
mark.sj.murillo@gmail.com
|
cd95b19f6d4f93b14d14a0883f75efb49067cd20
|
0ba2e5061577f6286ff9265ef1df9aca96769445
|
/sort/python/Piegon Sort.py
|
cc6205f804dcef84994b2ca7406186657254e296
|
[
"CC0-1.0"
] |
permissive
|
ZoranPandovski/al-go-rithms
|
68d5d02f80a61de9baf8e50a81a52e7d0b3983a0
|
4ae6ba54e90af14af236e03e435eb0402dcac787
|
refs/heads/master
| 2023-09-04T16:04:04.321676
| 2023-06-06T15:22:16
| 2023-06-06T15:22:16
| 93,438,176
| 1,421
| 2,445
|
CC0-1.0
| 2023-06-15T14:24:28
| 2017-06-05T19:20:20
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,722
|
py
|
"""
This is an implementation of Pigeon Hole Sort.
For doctests run following command:
python3 -m doctest -v pigeon_sort.py
or
python -m doctest -v pigeon_sort.py
For manual testing run:
python pigeon_sort.py
"""
def pigeon_sort(array):
"""
Implementation of pigeon hole sort algorithm
:param array: Collection of comparable items
:return: Collection sorted in ascending order
>>> pigeon_sort([0, 5, 3, 2, 2])
[0, 2, 2, 3, 5]
>>> pigeon_sort([])
[]
>>> pigeon_sort([-2, -5, -45])
[-45, -5, -2]
"""
if len(array) == 0:
return array
# Manually finds the minimum and maximum of the array.
min = array[0]
max = array[0]
for i in range(len(array)):
if array[i] < min:
min = array[i]
elif array[i] > max:
max = array[i]
# Compute the variables
holes_range = max - min + 1
holes = [0 for _ in range(holes_range)]
holes_repeat = [0 for _ in range(holes_range)]
# Make the sorting.
for i in range(len(array)):
index = array[i] - min
if holes[index] != array[i]:
holes[index] = array[i]
holes_repeat[index] += 1
else:
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
index = 0
for i in range(holes_range):
while holes_repeat[i] > 0:
array[index] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
user_input = input("Enter numbers separated by comma:\n")
unsorted = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
|
[
"zoran.pandovski@gmail.com"
] |
zoran.pandovski@gmail.com
|
032e16ace2a8d54eed495da1838d6c543d7890c8
|
a907c2cdcf2587dad8cafeef3b851addaef127de
|
/fnndsc.py
|
1a903afea89822bc8c2fcef87ef4b1e41de6b72f
|
[] |
no_license
|
FNNDSC/scripts
|
10d443612fcf8a206fa33c13dbc1f5aa17d2e2c9
|
fafa74576d7f37b901cdc8c28e6343141df648eb
|
refs/heads/master
| 2023-04-17T01:46:09.155442
| 2023-04-11T02:15:37
| 2023-04-11T02:15:37
| 2,512,788
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,127
|
py
|
#!/usr/bin/env python
import os
import sys
import string
import getopt
import argparse
import csv
from _common import systemMisc as misc
from _common import crun
import error
import message
import stage
class FNNDSC():
'''
The 'FNNDSC' class provides the base infrastructure for batched pipelined
processessing. It provides the basic level of services and abstractions
needed to run staged (serial) pipelined analysis streams.
Sub-classes of this base provide experiment-specific specializations.
'''
def log(self, *args):
'''
get/set the internal pipeline log message object.
Caller can further manipulate the log object with object-specific
calls.
'''
if len(args):
self._log = args[0]
else:
return self._log
def name(self, *args):
'''
get/set the descriptive name text of this object.
'''
if len(args):
self.__name = args[0]
else:
return self.__name
def pipeline(self, *args):
if len(args):
self._pipeline = args[0]
else:
return self._pipeline
def verbosity(self, *args):
if len(args):
self._verbosity = args[0]
self._log.verbosity(args[0])
self._pipeline._log.verbosity(args[0])
else:
return self._verbosity
def vprintf(self, alevel, format, *args):
'''
A verbosity-aware print.
'''
if self._verbosity and self._verbosity <= alevel:
sys.stdout.write(format % args)
def __init__(self, **kwargs):
'''
Basic constructor. Checks on named input args, checks that files
exist and creates directories.
'''
self.__name = 'FNNDSC-base'
self._verbosity = 0
self._log = message.Message()
self._log.tee(True)
self._log.syslog(True)
self._pipeline = stage.Pipeline(name = self.__name)
self._pipeline.log(self._log)
self._str_subjectDir = ''
self._b_debugMode = False
for key, value in kwargs.iteritems():
if key == 'syslog': self._log.syslog(value)
if key == 'logTo': self._log.to(value)
if key == 'logTee': self._log.tee(value)
def initialize(self):
'''
This method provides some "post-constructor" initialization. It is
typically called after the constructor and after other class flags
have been set (or reset).
'''
def run(self):
'''
The main 'engine' of the class.
'''
self._log('Starting %s...\n' % self.__name)
self._pipeline.execute()
self._log('Finished %s\n' % self.__name)
def stage_add(self, stage):
self._pipeline.stage_add(stage)
|
[
"rudolph.pienaar@gmail.com"
] |
rudolph.pienaar@gmail.com
|
910c40a79a455b4bbc008bbdbb80be0f86e7421c
|
fbea0f58191fd13c5fecc86d24b02032761699bc
|
/users/urls.py
|
c8ca8686832ef10215ce800de49d5dc25dbc720f
|
[] |
no_license
|
HOTGU/nbnb-clone
|
ef29b339032a2fc3d07035ba5c1e456933cb5434
|
fae3a4f2aa5b3c5ed983c2fc34b223e8d0814703
|
refs/heads/master
| 2023-07-14T05:01:07.506626
| 2021-08-23T07:52:16
| 2021-08-23T07:52:16
| 383,764,793
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 889
|
py
|
from django.urls import path
from . import views
app_name = "users"
urlpatterns = [
path("login/", views.LoginView.as_view(), name="login"),
path("login/github/", views.github_login, name="github-login"),
path("login/github/callback/", views.github_callback, name="github-callback"),
path("login/kakao/", views.kakao_login, name="kakao-login"),
path("login/kakao/callback/", views.kakao_callback, name="kakao-callback"),
path("logout/", views.log_out, name="logout"),
path("signup/", views.SignUpView.as_view(), name="signup"),
path("update-profile/", views.UpdateProfileView.as_view(), name="update"),
path(
"update-password/", views.UpdatePasswordView.as_view(), name="update-password"
),
path("switch-hosting/", views.switch_hosting, name="switch-hosting"),
path("<int:pk>/", views.UserProfileView.as_view(), name="profile"),
]
|
[
"gksrn42@gmail.com"
] |
gksrn42@gmail.com
|
f6415420e3a9b8fff87cdc63145da81445c0f1e5
|
e6b9527ca455149f2a257e7fa8936c3b67bbb108
|
/ruby/test_ruby_bindings.py
|
18c9ccd7df236f44ba2d2cc72079059627631092
|
[] |
no_license
|
theoweiss/generators
|
ae269659e794ca24c49b5ff4fc52aaaf795b0eb6
|
13d1f8f2ed70aaa85ee001f57852f9233064c748
|
refs/heads/master
| 2020-04-22T18:15:24.371245
| 2019-02-13T17:08:29
| 2019-02-13T17:08:29
| 170,570,998
| 0
| 0
| null | 2019-02-13T20:00:41
| 2019-02-13T20:00:41
| null |
UTF-8
|
Python
| false
| false
| 1,955
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Ruby Bindings Tester
Copyright (C) 2012-2014, 2017-2018 Matthias Bolte <matthias@tinkerforge.com>
test_ruby_bindings.py: Tests the Ruby bindings
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
import sys
import os
sys.path.append(os.path.split(os.getcwd())[0])
import common
class RubyTester(common.Tester):
def __init__(self, root_dir, extra_paths):
common.Tester.__init__(self, 'ruby', '.rb', root_dir, subdirs=['examples', 'source'], extra_paths=extra_paths)
def test(self, cookie, path, extra):
args = ['ruby',
'-wc',
path]
retcode, output = common.check_output_and_error(args)
output = output.strip('\r\n')
success = retcode == 0 and len(output.split('\n')) == 1 and 'Syntax OK' in output
self.handle_result(cookie, output, success)
def run(root_dir):
extra_paths = [os.path.join(root_dir, '../../weather-station/write_to_lcd/ruby/weather_station.rb'),
os.path.join(root_dir, '../../hardware-hacking/remote_switch/ruby/remote_switch.rb'),
os.path.join(root_dir, '../../hardware-hacking/smoke_detector/ruby/smoke_detector.rb')]
return RubyTester(root_dir, extra_paths).run()
if __name__ == '__main__':
run(os.getcwd())
|
[
"matthias@tinkerforge.com"
] |
matthias@tinkerforge.com
|
ffdc58bdbc38eae855b064d75da1b278d2b35797
|
825ab7f2431bf040259bc6f725054152d8bf6da3
|
/Projeto Naval Final/TABULEIRO_Brincando.py
|
6cf470e3cfccc064eb008b39a1ada2e293d94db1
|
[] |
no_license
|
leandro1989/Projetos
|
f70856be008ac1e59ddea339d0d74b8512e52353
|
dc8931e1fc7caf1ab03414d8f07f7373f0790ad3
|
refs/heads/master
| 2021-01-19T23:24:57.826068
| 2017-04-21T11:14:12
| 2017-04-21T11:14:12
| 88,975,459
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,615
|
py
|
from random import *
import sys
from termcolor import colored
def resultado():
print('')
print('Os resultados foram:')
print('')
for i in range(len(nomes_jogador)):
print(nomes_jogador[i], '--->' , contador_de_seus_acertos)
def cria_grelha(matriz):
'''esta função cria uma matriz 10X10'''
for i in range(10):
matriz.append([])
for j in range(10):
matriz[i].append(colored('~', 'blue'))
def interface(matriz):
'''Mostra o Tabuleiro que aparecerá as embarcações e os tiros dados'''
print(' | %i | %i | %i | %i | %i | %i | %i | %i | %i | %i |'%(linha_numero[0],linha_numero[1],linha_numero[2],linha_numero[3],linha_numero[4],
linha_numero[5],linha_numero[6],linha_numero[7],linha_numero[8],linha_numero[9]))
print(' ---------------------------------------------------------------')
for k in range(10):
print(' %s | %s %s %s %s %s %s %s %s %s %s | ' %(letras[k],matriz[k][0],matriz[k][1], matriz[k][2],
matriz[k][3], matriz[k][4],matriz[k][5],matriz[k][6],
matriz[k][7], matriz[k][8], matriz[k][9]))
print(' ---------------------------------------------------------------')
print(' Mar das embarcações do meu INIMIGO ')
def verifica_tiro(numero,letra_tiro):
'''esta função verifica se na matriz_aliado contém algum barco e substitui na matriz_inimigo que é a matriz do adversário'''
if matriz_aliado[letras.find(letra_tiro)][numero - 1] == 1:
'''Caso acerte o tiro aparece o número 1'''
X = colored('X', 'red')
matriz_inimigo[letras.find(letra_tiro)][numero - 1] = X
else:
matriz_inimigo[letras.find(letra_tiro)][numero - 1] = 'O'
print('Tiro em alto Mar. Atire novamente!')
def coloca_barco(linha,coluna,sentido_do_barco,matriz_aliado,barco,i):
'''Esta função coloca o barco na posição desejada e coloca 'a' em volta dos barcos para não ser possível colocar barcos adjacentes'''
a = 'x' #O caractere atribuido ao termo 'a' será colocado em volta dos barcos
if i == 0:
'''Esta parte coloca 'a' ao redor do barco quando os barcos forem colocados na horizontal'''
if sentido_do_barco == 1:
matriz_aux0 = []
for v in range(coluna, 10):
matriz_aux0.append(v)
if 0 < coluna < 9 and 0 < linha < 9:
matriz_aliado[linha][coluna - 1] = a
if len(barco) < matriz_aux0.index(matriz_aux0[-1]) + 1:
'''Esta parte verifica se o limite do barco encosta ou não no limite do tabuleiro para acrecentar ou não o 'a'.'''
matriz_aliado[linha][coluna + len(barco)] = a
for v in range(coluna - 1, coluna + len(barco) + 1):
matriz_aliado[linha - 1][v] = a
matriz_aliado[linha + 1][v] = a
else:
matriz_aliado[linha][coluna - 1] = a
for v in range(coluna - 1, coluna + len(barco)):
matriz_aliado[linha - 1][v] = a
matriz_aliado[linha + 1][v] = a
matriz_aux0.clear()
if 0 < coluna < 9 and linha == 0:
matriz_aliado[linha][coluna - 1] = a
if coluna + len(barco) <= 9:
'''Se existir mais elementos depois da ultima parte do barco acrecenta-se 'a'. '''
matriz_aliado[linha][coluna + len(barco)] = a
if coluna + len(barco) + 1 <= 10:
for v in range(coluna - 1, coluna + len(barco) + 1):
matriz_aliado[linha + 1][v] = a
else:
for v in range(coluna - 1, coluna + len(barco)):
matriz_aliado[linha + 1][v] = a
elif 0 < coluna < 9 and linha == 9:
matriz_aliado[linha][coluna - 1] = a
if coluna + len(barco) <= 9:
matriz_aliado[linha][coluna + len(barco)] = a
if coluna + len(barco) + 1 <= 10:
for v in range(coluna - 1, coluna + len(barco) + 1):
matriz_aliado[linha - 1][v] = a
else:
for v in range(coluna - 1, coluna + len(barco)):
matriz_aliado[linha - 1][v] = a
elif coluna == 0 and 0 < linha < 9:
matriz_aliado[linha][coluna + len(barco)] = a
for v in range(coluna , coluna + len(barco) + 1):
matriz_aliado[linha - 1][v] = a
matriz_aliado[linha + 1][v] = a
elif coluna == 9 and 0 < linha < 9:
matriz_aliado[linha][coluna - 1] = a
for v in range(coluna - 1, coluna + len(barco)):
matriz_aliado[linha - 1][v] = a
matriz_aliado[linha + 1][v] = a
elif coluna == 0 and linha == 0:
matriz_aliado[linha][coluna + len(barco)] = a
for v in range(coluna , coluna + len(barco) + 1):
matriz_aliado[linha + 1][v] = a
elif coluna == 9 and linha == 0:
matriz_aliado[linha][coluna - 1] = a
for v in range(coluna - 1, coluna + len(barco)):
matriz_aliado[linha + 1][v] = a
elif coluna == 0 and linha == 9:
matriz_aliado[linha][coluna + len(barco)] = a
for v in range(coluna , coluna + len(barco) + 1):
matriz_aliado[linha - 1][v] = a
elif coluna == 9 and linha == 9:
matriz_aliado[linha][coluna - 1] = a
for v in range(coluna - 1, coluna + len(barco)):
matriz_aliado[linha - 1][v] = a
if sentido_do_barco == 2:
'''Esta parte coloca 'a' em volta dos barcos colocados na vertical.'''
j = len(barco)
matriz_aux = []
for r in range(linha, 10):
matriz_aux.append(matriz_aliado[r][coluna])
if linha != 0 and linha != 9 and coluna != 0 and coluna != 9 and (len(barco) != len(matriz_aux)):
matriz_aliado[linha - 1][coluna] = a
matriz_aliado[linha + j][coluna] = a
for r in range(linha - 1, linha + len(barco) + 1):
matriz_aliado[r][coluna - 1] = a
matriz_aliado[r][coluna + 1] = a
elif (0 < linha < 9) and coluna == 9 and (len(barco) != len(matriz_aux)):
matriz_aliado[linha - 1][coluna] = a
matriz_aliado[linha + j][coluna] = a
for r in range(linha - 1, linha + len(barco) + 1):
matriz_aliado[r][coluna - 1] = a
elif (0 < linha < 9) and coluna == 0 and (len(barco) != len(matriz_aux)):
matriz_aliado[linha - 1][coluna] = a
matriz_aliado[linha + j][coluna] = a
for r in range(linha - 1, linha + len(barco) + 1):
matriz_aliado[r][coluna + 1] = a
elif linha == 0 and 0 < coluna < 9:
matriz_aliado[j][coluna] = a
for r in range(linha, linha + j + 1):
matriz_aliado[r][coluna - 1] = a
matriz_aliado[r][coluna + 1] = a
elif linha == 9 and 0 < coluna < 9:
matriz_aliado[linha - 1][coluna] = a
for r in range(linha - 1, linha + len(barco)):
matriz_aliado[r][coluna - 1] = a
matriz_aliado[r][coluna + 1] = a
elif linha == 0 and coluna == 0:
matriz_aliado[j][coluna] = a
for r in range(linha, linha + j + 1):
matriz_aliado[r][coluna + 1] = a
elif linha == 0 and coluna == 9:
matriz_aliado[j][coluna] = a
for r in range(linha, linha + j + 1):
matriz_aliado[r][coluna - 1] = a
elif len(barco) == len(matriz_aux):
if coluna == 0:
matriz_aliado[linha - 1][coluna] = a
for r in range(linha - 1, linha + len(barco)):
matriz_aliado[r][coluna + 1] = a
elif coluna == 9:
matriz_aliado[linha - 1][coluna] = a
for r in range(linha - 1, linha + len(barco)):
matriz_aliado[r][coluna - 1] = a
elif 0 < coluna < 9:
matriz_aliado[linha - 1][coluna] = a
for r in range(linha - 1, linha + len(barco)):
matriz_aliado[r][coluna - 1] = a
matriz_aliado[r][coluna + 1] = a
matriz_aux.clear()
if i == len(barco):
return matriz_aliado
matriz_aliado[linha][coluna] = barco[i]
#Coloca barco na Horizontal
if sentido_do_barco == 1:
return coloca_barco(linha,coluna+1,sentido_do_barco,matriz_aliado,barco,i+1)
# Coloca barco na Vertical
else:
return coloca_barco(linha+1,coluna,sentido_do_barco,matriz_aliado,barco,i+1)
'''Fim da def coloca_barco'''
def corrige(barco,linha,coluna):
'''Verifica se a posição do barco é válida nos limites das matriz_aliado'''
'''Verifica na Horizontal'''
if len(matriz_aliado[linha][coluna:]) < len(barco) and sentido_do_barco == 1:
return False
cont = 1
for i in range(linha, 10):
cont += 1
'''Verifica na vertical'''
if cont < len(barco) and sentido_do_barco == 2:
return False
else:
return True
def verifica_coloca_barco(barco,linha,coluna,sentido_do_barco):
'''Verifica se um barco esta sendo colocado em cima de outro sentido_do_barco == 1(Horizontal) e sentido_do_barco == 2 (vertical)'''
a = '~'
if sentido_do_barco == 1:
try:
for p in range(len(barco)):
'''Verifica se tem algum 'x' ou '1' nas posições que serão ocupadas pelo barco ou se o barco excede o limite do tabuleiro
, permitindo ou não que o barco seja inserido'''
if 'x' not in matriz_aliado[linha][coluna:coluna + len(barco)] and 1 not in matriz_aliado[linha][coluna:coluna + len(barco)]:
return True
except:
return False
cont_h = 0
for p in range(coluna - 1, coluna + len(barco)):
'''Esta parte verifica e impede que alguma parte do barco seja substituida por 'a', impedindo a colocação do barco.
O elemento matriz_aliado[linha][p] pode ser uma parte de uma embarcação colocada anteriormente.'''
if a != matriz_aliado[linha][p]:
cont_h += 1
if cont_h == 0:
return True
else:
return False
elif sentido_do_barco == 2:
'''Esta parte verifica se o barco pode ser colocado verticalmente na posição solicitada'''
cont_v = 0
for p in range(linha, linha + len(barco)):
try:
if matriz_aliado[p][coluna] == a:
cont_v += 1
except IndexError:
return False
if cont_v >= len(barco):
return True
else:
return False
print('\n Lembrando algumas regras da Batalha Naval: \n '
'\n 1 - Não é possível colocar um embarcação ao lado de outra embarcação; '
'\n 2 - Os limites dados no tabuleiro devem ser respeitados, ou seja, não pode ficar nenhuma parte da embarcação fora '
'\n do tabuleleiro, caso isso ocorra será solicitada uma nova posição; '
'\n 3 - Uma embarcação não pode ser colocada em uma posição já ocupada por alguma parte de outra embarcação;'
'\n 4 - Caso uma embarcação seja acertada aparecerá o número 1 no local atirado, caso contrário será exibida a letra "x";'
'\n 5 - O jogador que primeiro afundar todas embarcações ganha;'
'\n 6 - Ao selecinar uma posição para o barco, a parte inicial da embarcação será colocada na posição selecinada,'
' \n as demais parte serão colocada no sentido(Horizontal ou vertical) que você escolher;'
'\n 7 - Tabuleiro do jogo: ')
print('')
print('Saudações, Capitão! Bem vindo a bordo! Para iniciarmos a batalha vamos posicionar nossas embarcações: ')
print('')
resp = 's'
nomes_jogador = []
acertos_jogador = []
while resp == 's':
letras = 'ABCDEFGHIJ'
linha_numero = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
matriz_aliado = []
matriz_inimigo = []
barcos = [[1], [1, 1], [1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1, 1]]
cria_grelha(matriz_aliado)
cria_grelha(matriz_inimigo)
interface(matriz_inimigo)
print('')
jogador = input('Qual seu nome Senhor? E futuro CAPITÃO!? ')
nomes_jogador.append(jogador)
while True:
try:
nivel_dificuldade = int(input('Qual é a nivel que o CAPITÃO que jogar( 1 - Fácil(Errar 25 tiros), 2 - Normal(Errar 20 tiros), '
'3 - Difícil(Errar 15 tiros) ou 4 - Quase Impossível(Errar 10 tiros)? '))
if nivel_dificuldade not in [1,2,3,4]:
print('Número inválido. Informe um número de 1 a 4.')
continue
break
except ValueError:
print('Caractere inválido. Informe um número de 1 a 4.')
print('')
n = []
for i in range(len(barcos)):
'''Esta parte do código pede ao usuário as posição e o sentido que o barco deve ser colocado, além de chamar funções para verificar se a posição é valida ou não.'''
while True:
'''Esta parte parte impede que o mesmo barco seja posicionado mais de uma vez no tabuleiro.'''
x = randint(0,4)
if x not in n:
n.append(x)
break
while True:
'''Posiciona aleatoriamente o barco.'''
sentido_do_barco = randint(1,2)
linha = randint(0, 9)
coluna = randint(0, 9)
if corrige(barcos[x],linha,coluna) == True and verifica_coloca_barco(barcos[x], linha, coluna, sentido_do_barco) == True:
'''Esta parte chama as funções acima para verificar se é possível o barco ser colocado na posição solicitada.'''
coloca_barco(linha,coluna,sentido_do_barco,matriz_aliado,barcos[x],0)
break
else:
continue
'''Fim do For '''
print('\n Agora começa a Batalha Naval! Informe a posicão que devemos atirar, Capitão!')
print('')
while True:
'''Esta parte pede a posição que o jogador vai atirar'''
while True:
c = input('Informe a letra da linha que o tiro vai ser disparado: ')
letra_tiro = c.upper()
if letra_tiro not in letras:
print('Letra inválida. Informe uma letra maiúscula de A a J.')
print('')
continue
else:
break
while True:
try:
numero = int(input('Informe o número da coluna que o tiro vai ser disparado: '))
if numero < 0 or numero > 10:
print('Valor inválido. Informe um número de 1 a 10.')
continue
else:
print('')
break
except:
print('Valor inválido. Informe um número de 1 a 10.')
continue
verifica_tiro(numero,letra_tiro)
interface(matriz_inimigo)
print('')
'''A qui começa a parte de contagem de erros e acertos dos tiros, determinado quando o jogo acaba. '''
contador_de_erros = 0
contador_de_seus_acertos = 0
for y in range(0,10):
'''Verifica se vc ganhou a batalha.'''
contador_de_erros += matriz_inimigo[y].count('O')
if nivel_dificuldade == 1:
if contador_de_erros == 25:
interface(matriz_aliado)
print('ACABOU nossas BALAS Capitão!!')
for y in range(0, 10):
'''Verifica se vc ganhou a batalha.'''
contador_de_seus_acertos += matriz_inimigo[y].count(1)
acertos_jogador.append(contador_de_seus_acertos)
while True:
resp = input('Gostaria de ARMAR os CANHÕES e ATIRAR novamente(s ou n)? ')
if resp == 'n':
resultado()
elif resp not in ['s','n']:
print('Informe s para jogar novamente ou n para parar de jogar!')
continue
break
break
else:
print('Em qual posição vai atirar novamente Capitão??')
elif nivel_dificuldade == 2:
if contador_de_erros == 20:
interface(matriz_aliado)
print('ACABOU nossas BALAS Capitão!!')
for y in range(0, 10):
'''Verifica se vc ganhou a batalha.'''
contador_de_seus_acertos += matriz_inimigo[y].count(1)
acertos_jogador.append(contador_de_seus_acertos)
while True:
resp = input('Gostaria de ARMAR os CANHÕES e ATIRAR novamente(s ou n)? ')
if resp == 'n':
resultado()
elif resp not in ['s','n']:
print('Informe s para jogar novamente ou n para parar de jogar!')
continue
break
break
else:
print('Em qual posição vai atirar novamente Capitão??')
elif nivel_dificuldade == 3:
if contador_de_erros == 15:
interface(matriz_aliado)
print('ACABOU nossas BALAS Capitão!!')
for y in range(0, 10):
'''Verifica se vc ganhou a batalha.'''
contador_de_seus_acertos += matriz_inimigo[y].count(1)
acertos_jogador.append(contador_de_seus_acertos)
while True:
resp = input('Gostaria de ARMAR os CANHÕES e ATIRAR novamente(s ou n)? ')
if resp == 'n':
interface(matriz_aliado)
elif resp not in ['s', 'n']:
print('Informe s para jogar novamente ou n para parar de jogar!')
continue
break
break
else:
print('Em qual posição vai atirar novamente Capitão??')
elif nivel_dificuldade == 4:
if contador_de_erros == 10:
interface(matriz_aliado)
print('ACABOU nossas BALAS Capitão!!')
for y in range(0, 10):
'''Verifica se vc ganhou a batalha.'''
contador_de_seus_acertos += matriz_inimigo[y].count(1)
acertos_jogador.append(contador_de_seus_acertos)
while True:
resp = input('Gostaria de ARMAR os CANHÕES e ATIRAR novamente(s ou n)? ')
if resp == 'n':
resultado()
elif resp not in ['s', 'n']:
print('Informe s para jogar novamente ou n para parar de jogar!')
continue
break
break
else:
print('Em qual posição vai atirar novamente Capitão??')
|
[
"leandrolinken@gmail.com"
] |
leandrolinken@gmail.com
|
ba8682459d95aafa9bb23a4013ed976df4ed69dd
|
289e3289897f7e794e3f6b281c727f74185ab5ff
|
/deid/dicom/validate.py
|
061406f407b7b40643fa3c4188ca3e56c76cc461
|
[
"MIT"
] |
permissive
|
W7CAM/deid
|
fd3df596e0c48d243b838c1a31ed5376f02eaf61
|
808bbf6355b923f26694d8a73dc21d35a857a145
|
refs/heads/master
| 2020-03-30T02:12:11.790957
| 2018-09-19T15:47:57
| 2018-09-19T15:47:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,932
|
py
|
'''
utils.py: helper functions for working with dicom module
Copyright (c) 2017-2018 Vanessa Sochat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from deid.logger import bot
from pydicom import read_file
import sys
def validate_dicoms(dcm_files,force=False):
'''validate dicoms will test opening one or more dicom files, and return a list
of valid files.
:param dcm_files: one or more dicom files to test'''
if not isinstance(dcm_files,list):
dcm_files = [dcm_files]
valids = []
bot.debug("Checking %s dicom files for validation." %(len(dcm_files)))
for dcm_file in dcm_files:
try:
with open(dcm_file, 'rb') as filey:
dataset = read_file(filey, force=force)
valids.append(dcm_file)
except:
bot.warning('Cannot read input file {0!s}, skipping.'.format(dcm_file))
bot.info("Found %s valid dicom files" %(len(valids)))
return valids
|
[
"vsochat@stanford.edu"
] |
vsochat@stanford.edu
|
9e166518f56b8cab93f0dc4a84da5f13cc0fbc28
|
0d2668b04d29e52972e9c2c71df8a597e9e3b06f
|
/collectData.py
|
cafb864218497d186bd3de75e1197a849a10cb75
|
[] |
no_license
|
mhenstell/cb-collect
|
dceb96062b50e9eecdf85656ae75c3c36a458b86
|
e2295ce5c2047d2f9caedfd93e74929ed7c1ff35
|
refs/heads/master
| 2020-05-17T17:02:52.307848
| 2014-02-28T01:35:45
| 2014-02-28T01:35:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,751
|
py
|
from pycitibike import Citibike
import time
import psycopg2
import sys
import signal
def signal_handler(signal, frame):
print 'You pressed Ctrl+C!'
con.close()
sys.exit(0)
con = None
try:
con = psycopg2.connect(host='localhost', database='citibike', user='citibike', password=sys.argv[1])
cur = con.cursor()
con.autocommit = True
cur.execute('SELECT version()')
ver = cur.fetchone()
print ver
except psycopg2.DatabaseError, e:
print 'Error %s' % e
sys.exit(1)
client = Citibike()
stations = {}
oldStations = {}
lastTime = 0
signal.signal(signal.SIGINT, signal_handler)
while True:
start = time.time()
try:
stations = client.stations()
except Exception as e:
print e
continue
timestamp = psycopg2.TimestampFromTicks(time.time())
print "Updating %s" % timestamp
for station in stations:
sid = station['id']
if sid not in oldStations:
pass
elif station['availableDocks'] != oldStations[sid]['availableDocks'] or station['availableBikes'] != oldStations[sid]['availableBikes']:
sql = "insert into log(stationid, timestamp, availableBikes, availableDocks) values (%i, %s, %i, %i)" % (sid, timestamp, station['availableBikes'], station['availableDocks'])
# print sql
cur.execute(sql)
oldStations[sid] = station
end = time.time()
runlength = int(end - start)
cur.execute("insert into runlog(runlength) values(%i)" % runlength)
time.sleep(30)
|
[
"max@kapamaki.net"
] |
max@kapamaki.net
|
7ac11c9aec24b2e8c50fd89a65f84931e2d16081
|
1cdc31a292e51419988604e0f2284f802c03fc80
|
/WSDAN_model/acc.py
|
e45eeffb0c04e8f9038fd0fb9a292d870379fe68
|
[] |
no_license
|
liuhy14/CS231N_project
|
b27692b9e8c762111d53aed361eeb85e995e4502
|
882d70aba4f507bae653c050d455b41f44bf74b4
|
refs/heads/master
| 2020-05-22T02:13:36.719970
| 2019-06-05T18:20:32
| 2019-06-05T18:20:32
| 186,194,025
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,530
|
py
|
from dataset import *
from models import *
import torch
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
from utils import accuracy
import torch.nn as nn
import os
import numpy as np
import csv
import pickle
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'
data_root = os.path.join('..','..','dataset')
train_file = os.path.join('..','..','dataset','train2019.json')
val_file = os.path.join('..','..','dataset','val2019.json')
test_file = os.path.join('..','..','dataset','test2019.json')
image_size = (512,512)
num_attentions = 32
feature_net = inception_v3(pretrained=True)
num_classes = 1010
batch = 64
net = WSDAN(num_classes=num_classes, M=num_attentions,net=feature_net)
#ckpt = '../../backup_main/latest.ckpt'
ckpt = '../../backup_balanced/latest.ckpt'
#ckpt = '../../backup_balanced/best_top1_val_acc.ckpt'
checkpoint = torch.load(ckpt)
state_dict = checkpoint['state_dict']
cudnn.benchmark = True
net.load_state_dict(state_dict)
net.to('cuda')
net = nn.DataParallel(net)
validate_dataset = INAT(data_root,val_file,image_size, is_train=False)
validate_loader = DataLoader(validate_dataset,batch_size=batch,shuffle=False,num_workers=4,pin_memory=True)
top1 = np.zeros(num_classes)
top3 = np.zeros(num_classes)
top5 = np.zeros(num_classes)
total = np.zeros(num_classes)
net.eval()
with torch.no_grad():
for i,(X,_,y,_) in enumerate(validate_loader):
n = y.size(0)
X = X.to('cuda')
y = y.to('cuda')
y_pred,feature_matrix,attention_map = net(X)
values,indices = y_pred.max(1)
_, top_3_pred = y_pred.topk(3,1,True,True)
_, top_5_pred = y_pred.topk(5,1,True,True)
print('Batch:',i)
top_1 = indices.data.cpu().numpy()
top_3 = top_3_pred.data.cpu().numpy()
top_5 = top_5_pred.data.cpu().numpy()
y_targ = y.data.cpu().numpy()
#print('Accuracy: ',np.sum(top_1==y_targ)/n)
for j in range(n):
total[y_targ[j]] += 1;
if (y_targ[j] == top_1[j]):
top1[y_targ[j]] += 1;
if (y_targ[j] in top_3[j]):
top3[y_targ[j]] += 1;
if (y_targ[j] in top_5[j]):
top5[y_targ[j]] += 1;
pickle.dump(top1,open("top1.pkl","wb"))
pickle.dump(top3,open("top3.pkl","wb"))
pickle.dump(top5,open("top5.pkl","wb"))
#pickle.dump(total,open("total.pkl","wb"))
print("Top 1 Accuracy",sum(top1)/sum(total))
print("Top 3 Accuracy",sum(top3)/sum(total))
print("Top 5 Accuracy",sum(top5)/sum(total))
|
[
"google-dl-platform@googlegroups.com"
] |
google-dl-platform@googlegroups.com
|
c6be021a4d77d891dc84989797588b97e80fd579
|
a230e4c7f4cd862de09201da08babc5e8165052b
|
/wishlist/migrations/0006_remove_wishlist_price.py
|
c11c42285c90680222083a72fd58f547b35077c9
|
[] |
no_license
|
Code-Institute-Submissions/richard-ui-b_fitness_store_SepResub
|
c172e7f2fbaee24093168f4c3d7721a29a708392
|
4dbb32c790d0013a07c0912abeed0b9bab3a5571
|
refs/heads/master
| 2023-08-21T13:16:16.700565
| 2021-09-22T10:47:53
| 2021-09-22T10:47:53
| 409,634,426
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
# Generated by Django 3.2 on 2021-08-18 11:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wishlist', '0005_auto_20210817_1707'),
]
operations = [
migrations.RemoveField(
model_name='wishlist',
name='price',
),
]
|
[
"richard-jones96@hotmail.co.uk"
] |
richard-jones96@hotmail.co.uk
|
60b68041d2d0675f6dc9214fa697608944bf01d7
|
125082a3987e95df7eeb0f181dc7125276d761dd
|
/src/test/javaFileReaderTestPackage/Orange.py
|
3ba54aee734c7cf71ad1db7e747b5c648bf2fb42
|
[] |
no_license
|
EvanQuan/SENG300G1
|
a2568a9d3cbd99e5b006c6a7ae3053f20e9f2af0
|
a1328481680bf49b2307296e30c63d3ea898680b
|
refs/heads/master
| 2021-03-19T13:42:38.443321
| 2018-04-30T01:42:17
| 2018-04-30T01:42:17
| 123,995,626
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 41
|
py
|
# Test python file
print("Hello world!")
|
[
"slchan@XIV.com"
] |
slchan@XIV.com
|
dbde6670e5fecf5794b21381026175dcc3c768de
|
ede97a8afbf35b325a0ada346b45566b7b88312d
|
/app/.~c9_invoke_vwc6aL.py
|
c5d8e94047bbe69c17fd9f49e86638d6716d73fc
|
[] |
no_license
|
loni-mitch/Photogram
|
2ca2a4cd165dcca4d10159d54c5a448ad057d0da
|
ecbdc54949a0b04caa990c07f5bdbd356a1b473e
|
refs/heads/master
| 2020-03-14T09:55:46.236533
| 2018-07-18T17:46:48
| 2018-07-18T17:46:48
| 131,555,353
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,662
|
py
|
"""
Flask Documentation: http://flask.pocoo.org/docs/
Jinja2 Documentation: http://jinja.pocoo.org/2/documentation/
Werkzeug Documentation: http://werkzeug.pocoo.org/documentation/
This file creates your application.
"""
import os
from app import app,db,filefolder,token_key
from flask import render_template, request, redirect, url_for, flash, g,jsonify,session,abort
import os, datetime
from flask_login import login_user, logout_user, current_user, login_required
from forms import SignUpForm,LoginForm,PostForm
from functools import wraps
from models import Users, Posts, Likes, Follows
from werkzeug.utils import secure_filename
from app.models import Users,Posts, Follows, Likes
from werkzeug.security import generate_password_hash, check_password_hash
import jwt
from functools import wraps
###
# Routing for your application.
###
#***************************************FRONT END ROUTES************************************
#---------------------------------------AUTHENTICATION-----------------------------------------
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.headers.get('Authorization', None)
if not auth:
return jsonify({'code': 'authorization_header_missing', 'description': 'Authorization header is expected'}), 401
parts = auth.split()
if parts[0].lower() != 'bearer':
return jsonify({'code': 'invalid_header', 'description': 'Authorization header must start with Bearer'}), 401
elif len(parts) == 1:
return jsonify({'code': 'invalid_header', 'description': 'Token not found'}), 401
elif len(parts) > 2:
return jsonify({'code': 'invalid_header', 'description': 'Authorization header must be Bearer + \s + token'}), 401
token = parts[1]
try:
payload = jwt.decode(token, token_key)
get_user = Users.query.filter_by(id=payload['user_id']).first()
except jwt.ExpiredSignature:
return jsonify({'code': 'token_expired', 'description': 'token is expired'}), 401
except jwt.DecodeError:
return jsonify({'code': 'token_invalid_signature', 'description': 'Token signature is invalid'}), 401
g.current_user = user = get_user
return f(*args, **kwargs)
return decorated
#----------------------------------------INDEX---------------------------------------------
@app.route('/')
def index():
initial_form = SignUpForm()
"""Render website's initial page and let VueJS take over."""
return render_template('index.html', initial_form = initial_form)
#-----------------------------------------LOGIN-------------------------------------------
@app.route('/api/auth/login', methods=["POST"])
def login():
form = LoginForm()
if request.method == "POST" and form.validate_on_submit():
username = request.form['username']
password = request.form['password']
user = Users.query.filter_by(username = username).first()
if user and check_password_hash(user.password, password):
payload = {'user_id' : user.id}
token = jwt.encode(payload, token_key)
session['userid'] = user.id;
return jsonify(data={'token': token, 'userid': user.id}, message="User successfully logged in")
else:
return jsonify(errorm="Incorrect username or password")
error_msgs = form_errors(form)
error = [{'errors': error_msgs}]
return jsonify(errors=error)
#----------------------------------------LOGOUT------------------------------------------
@app.route('/api/auth/logout', methods = ['GET'])
@requires_auth
def logout():
g.current_user = None
session.pop('userid', None)
return jsonify(message = "User successfully logged out")
#----------------------------------------REGISTER------------------------------------------
@app.route('/api/users/register', methods = ['POST'])
def register():
form = SignUpForm()
if request.method == 'POST' and form.validate_on_submit():
firstname = request.form['firstname']
lastname = request.form['lastname']
username = request.form['username']
password = generate_password_hash(request.form['password'])
location = request.form['location']
email = request.form['email']
bio = request.form['biography']
image = request.files['image']
profile_created_on = datetime.datetime.now()
filename = secure_filename(image.filename)
image.save(os.path.join(filefolder, filename))
user = Users(username = username, password = password, firstname = firstname, lastname = lastname, email = email,location = location, biography = bio, image = filename, joined_on = profile_created_on)
db.session.add(user)
db.session.commit()
response = [{'message': 'User has been successfully registered'}]
return jsonify(result = response)
error_msgs = form_errors(form)
error = [{'errors': error_msgs}]
return jsonify(errors =error)
#--------------------------------------------POSTS-------------------------------------------
@app.route('/api/posts/', methods=["GET"])
@requires_auth
def show_all_posts():
all_posts = Posts.query.order_by(Posts.created_on.desc()).all()
postlist = []
for post in all_posts:
user = Users.query.filter_by(id = post.user_id).first()
likes = Likes.query.filter_by(post_id = post.id).all()
likescount = [];
for like in likes:
count = {'test': "counted"}
likescount.append(count)
liked_post = Likes.query.filter_by(user_id = session['userid'], post_id= post.id).first()
if(liked_post is None):
likeflag = False
else:
likeflag = True
postdate= post.created_on.strftime("%d %b %Y");
posted = {"postid":post.id,"userid": post.user_id, "username": user.username, "profilephoto": user.image, "photo": post.postimage, "caption": post.caption, "created_on": postdate, "likes": likescount, "likeflag": likeflag}
postlist.append(posted)
return jsonify(data = postlist)
#------------------------------------USER POSTS----------------------------------------------
@app.route('/api/users/<user_id>/posts',methods=["GET","POST"])
@requires_auth
def add_post(user_id):
form = PostForm()
if request.method == "POST":
if form.validate_on_submit():
userid = user_id
caption = request.form['caption']
photo = request.files['postimage']
post_date = datetime.datetime.now()
post_photo = secure_filename(photo.filename)
post = Posts(user_id = userid, postimage = post_photo, caption = caption, created_on = post_date)
db.session.add(post)
db.session.commit()
photo.save(os.path.join(filefolder, post_photo))
return jsonify({'message':"Successfully created a new post"})
elif request.method == "GET":
user = Users.query.filter_by(id = user_id).first()
if not user:
return jsonify({'message': "no user found"})
user_posts = Posts.query.filter_by(user_id = user_id).all()
userposts = []
for user_post in user_posts:
post_data = {'id':user_post.id,'user_id': user_post.user_id,'postimage': user_post.post_photo,'caption': user_post.caption,'created_on': user_post.post_date}
userposts.append(post_data)
return jsonify(data = userposts)
error_msgs = form_errors(form)
error = [{'errors': error_msgs}]
return jsonify(errors = error)
#------------------------------------------USER----------------------------------------------
@app.route('/api/users/<user_id>/', methods=["GET"])
@requires_auth
def get_user(user_id):
user = Users.query.filter_by(id = user_id).first()
userlist = []
if (int(user_id) == session['userid']):
date_join = user.joined_on.strftime("%B %Y");
user_info = {"userid": user.id, "username": user.username, "firstname": user.firstname, "lastname": user.lastname, "email": user.email, "location": user.location, "biography": user.biography,"photo": user.image, "joined_on": date_join}
userlist.append(user_info)
return jsonify(profile = userlist, isuser =True)
date_join = user.joined_on.strftime("%B %Y");
user_info = {"userid": user.id, "username": user.username, "firstname": user.firstname, "lastname": user.lastname, "email": user.email, "location": user.location, "biography": user.biography,"photo": user.image, "joined_on": date_join}
userlist.append(user_info)
return jsonify(profile = user_list)
#-------------------------------------------LIKE--------------------------------------------
@app.route('/api/users/<post_id>/like',methods=["POST"])
@requires_auth
def like_post(post_id):
like_check = Likes.query.filter_by(user_id = session['userid'], post_id = post_id).first()
if(like_check is None):
like = Likes(user_id = session['userid'], post_id = post_id)
db.session.add(like)
db.session.commit()
return jsonify (message= 'Post liked!')
return jsonify (DB = 'Cannot like post again!')
#------------------------------------FOLLOW--------------------------------------------------
@app.route('/api/users/<user_id>/followers',methods=["GET"])
@requires_auth
def followers(user_id):
followers_count = Follows.query.filter_by(user_id = user_id).all()
number_of_followers = []
for count in followers_count:
num = {'test': "counted"}
number_of_followers.append(num)
return jsonify (follower = number_of_followers)
@app.route('/api/users/<user_id>/following',methods=["GET"])
@requires_auth
def followers_check(user_id):
follow_check = Follows.query.filter_by(user_id = user_id, follower_id = session['userid']).first()
if(follow_check is None):
return jsonify (following = False)
return jsonify (following = True)
@app.route('/api/users/<user_id>/follow',methods=["POST"])
@requires_auth
def follow_user(user_id):
follow = Follows(user_id = user_id, follower_id = session['userid'])
db.session.add(follow)
db.session.commit()
return jsonify (message = 'you are now following this user!')
#-------------------------------OTHER USEFUL ROUTES--------------------------------------
# Here we define a function to collect form errors from Flask-WTF
# which we can later use
def form_errors(form):
error_messages = []
"""Collects form errors"""
for field, errors in form.errors.items():
for error in errors:
message = u"Error in the %s field - %s" % (
getattr(form, field).label.text,
error
)
error_messages.append(message)
return error_messages
###
# The functions below should be applicable to all Flask apps.
###
def dictify(data_object):
"""
Returns a dictionary containing the attributes and thier values
for an object returned from a DB query
"""
key_value_pairs = data_object.__dict__.items()
object_dictionary = {}
for key,value in key_value_pairs:
if not key == '_sa_instance_state':
#All db ojects will have this but we do not need it here
# for example: ('_sa_instance_state', <sqlalchemy.orm.state.InstanceState object at 0x7f6696d831d0>)
object_dictionary[key] = value
return object_dictionary
def flash_errors(form):
for field, errors in form.errors.items():
for error in errors:
flash(u"Error in the %s field - %s" % (
getattr(form, field).label.text,
error), 'danger')
@app.route('/<file_name>.txt')
def send_text_file(file_name):
"""Send your static text file."""
file_dot_text = file_name + '.txt'
return app.send_static_file(file_dot_text)
@app.after_request
def add_header(response):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also tell the browser not to cache the rendered page. If we wanted
to we could change max-age to 600 seconds which would be 10 minutes.
"""
response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'
response.headers['Cache-Control'] = 'public, max-age=0'
return response
@app.errorhandler(404)
def page_not_found(error):
"""Custom 404 page."""
return render_template('404.html'), 404
if __name__ == '__main__':
app.run(debug=True, host="0.0.0.0", port="8080")
|
[
"tajsivers@live.com"
] |
tajsivers@live.com
|
76eff1ed6e5549116e7a44b5f0bee37b329388b9
|
ab4fd9eed607a25f214d830d12ebc6149b4a7bc4
|
/Page1/Euler23.py
|
daadd0369717292b5bfd9a67051694034e77f6f0
|
[] |
no_license
|
lj8175/Euler
|
422b31cef99b31030d91f5b49c92016e2a923d2f
|
9a48771754fe4af964a5f8384eb65edde238620f
|
refs/heads/master
| 2021-01-23T13:22:38.151411
| 2013-12-24T02:51:06
| 2013-12-24T02:51:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 687
|
py
|
'''
Created on 2013-5-26
@author: lj8175
'''
import math
def listDivisors(x):
ret = []
if x > 1: ret.append(1)
for i in range(2,int(math.sqrt(x))+1):
if x%i == 0:
ret.append(i)
if i != x/i : ret.append(x/i)
return ret
def isAbundant(x):
if sum(listDivisors(x)) > x:
return True
return False
def isSumOfTwoAbundant(x):
for i in range(x/2+1):
if isAbundant(i) and isAbundant(x-i):
return True
return False
if __name__ == '__main__':
ret = []
for i in range(28123+1):
if not isSumOfTwoAbundant(i):
ret.append(i)
print sum(ret)
|
[
"lj8175@gmail.com"
] |
lj8175@gmail.com
|
439d4e0e01a0dbd199dfabeb358cc8ce71c00c81
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Non_Linear_Finite_Element_Analysis_of_Solids_and_Structures_Borst/pyfem-1.0/pyfem/util/kinematics.py
|
62c043d65dd3620dc582fc2b0ff225e5bda9f2e4
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,178
|
py
|
############################################################################
# This Python file is part of PyFEM-1.0, released on Aug. 29, 2012. #
# The PyFEM code accompanies the book: #
# #
# 'Non-Linear Finite Element Analysis of Solids and Structures' #
# R. de Borst, M.A. Crisfield, J.J.C. Remmers and C.V. Verhoosel #
# John Wiley and Sons, 2012, ISBN 978-0470666449 #
# #
# The code is written by J.J.C. Remmers, C.V. Verhoosel and R. de Borst. #
# Comments and suggestions can be sent to: #
# PyFEM-support@tue.nl #
# #
# The latest version can be downloaded from the web-site: #
# http://www.wiley.com/go/deborst #
# #
# The code is open source and intended for educational and scientific #
# purposes only. If you use PyFEM in your research, the developers would #
# be grateful if you could cite the book. #
# #
# Disclaimer: #
# The authors reserve all rights but do not guarantee that the code is #
# free from errors. Furthermore, the authors shall not be liable in any #
# event caused by the use of the program. #
############################################################################
from numpy import zeros
class Kinematics:
def __init__( self , nDim , nStr ):
self.F = zeros( shape=( nDim , nDim ) )
self.E = zeros( shape=( nDim , nDim ) )
self.strain = zeros( nStr )
|
[
"me@yomama.com"
] |
me@yomama.com
|
2062ec0c1307e51392ea5eab1970a6f67bf27a58
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02699/s393112489.py
|
3018e717e89c8ac744d09ee4877d1e238df00bde
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
nums = [int(e) for e in input().split()]
Sheep = nums[0]
Wolves = nums[1]
if Sheep > Wolves:
print("safe")
else:
print("unsafe")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
62aeb3577cd61fb36517bd3075a4b2748176647b
|
1d3235c4818de4ed5abd683381ccf2768b8cf202
|
/examples/qm9/train_qm9.py
|
a1d28788f92edd4f10bcefede07c712bdaae9038
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
corochann/chainer-chemistry
|
af9284815b4982123f22f1302ff959fd6577adaa
|
8e918e557fe9bce865d9d543ea2864d027827941
|
refs/heads/master
| 2021-06-01T13:14:08.313857
| 2017-12-01T10:03:04
| 2017-12-01T10:03:04
| 112,725,110
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,749
|
py
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import os
try:
from sklearn.preprocessing import StandardScaler
except ImportError:
print('You need to install scikit-learn to run this example,'
'please run `pip install -U scikit-learn`')
try:
import matplotlib
matplotlib.use('Agg')
except ImportError:
pass
import chainer
from chainer import functions as F, cuda, Variable
from chainer import iterators as I
from chainer import links as L
from chainer import optimizers as O
from chainer import training
from chainer.datasets import split_dataset_random
from chainer.training import extensions as E
import numpy
from chainerchem import datasets as D
from chainerchem.models import MLP, NFP, GGNN, SchNet, WeaveNet
from chainerchem.dataset.converters import concat_mols
from chainerchem.dataset.preprocessors import preprocess_method_dict
from chainerchem.datasets import NumpyTupleDataset
class GraphConvPredictor(chainer.Chain):
def __init__(self, graph_conv, mlp):
"""
Args:
graph_conv: graph convolution network to obtain molecule feature
representation
mlp: multi layer perceptron, used as final connected layer
"""
super(GraphConvPredictor, self).__init__()
with self.init_scope():
self.graph_conv = graph_conv
self.mlp = mlp
def __call__(self, atoms, adjs):
x = self.graph_conv(atoms, adjs)
x = self.mlp(x)
return x
def _predict(self, atoms, adjs):
with chainer.no_backprop_mode(), chainer.using_config('train', False):
x = self.__call__(atoms, adjs)
return F.sigmoid(x)
def predict(self, *args, batchsize=32, device=-1):
if device >= 0:
chainer.cuda.get_device_from_id(device).use()
self.to_gpu() # Copy the model to the GPU
# TODO: Not test yet, check behavior
data = args[0]
y_list = []
for i in range(0, len(data), batchsize):
#adj, atom_types = concat_examples(data[i:i + batchsize], device=device)
atoms, adjs = concat_mols(data[i:i + batchsize], device=device)[:2]
y = self._predict(atoms, adjs)
y_list.append(cuda.to_cpu(y.data))
y_array = numpy.concatenate(y_list, axis=0)
return y_array
def main():
# Supported preprocessing/network list
method_list = ['nfp', 'ggnn', 'schnet', 'weavenet']
label_names = ['A', 'B', 'C', 'mu', 'alpha', 'homo', 'lumo', 'gap', 'r2',
'zpve', 'U0', 'U', 'H', 'G', 'Cv']
parser = argparse.ArgumentParser(
description='Regression with QM9.')
parser.add_argument('--method', '-m', type=str, choices=method_list,
default='nfp')
parser.add_argument('--label', '-l', type=str, choices=label_names,
default='', help='target label for regression, '
'empty string means to predict all '
'property at once')
parser.add_argument('--scale', type=str, default='standardize',
help='Label scaling method')
parser.add_argument('--conv_layers', '-c', type=int, default=4)
parser.add_argument('--batchsize', '-b', type=int, default=128)
parser.add_argument('--gpu', '-g', type=int, default=-1)
parser.add_argument('--out', '-o', type=str, default='result')
parser.add_argument('--epoch', '-e', type=int, default=20)
parser.add_argument('--unit_num', '-u', type=int, default=16)
args = parser.parse_args()
seed = 777
train_data_ratio = 0.7
method = args.method
if args.label:
labels = args.label
cache_dir = os.path.join('input', '{}_{}'.format(method, labels))
class_num = len(labels) if isinstance(labels, list) else 1
else:
labels = None
cache_dir = os.path.join('input', '{}_all'.format(method))
class_num = len(D.get_qm9_label_names())
# Dataset preparation
dataset = None
#cache_dir = os.path.join('input', '{}'.format(method))
if os.path.exists(cache_dir):
print('load from cache {}'.format(cache_dir))
dataset = NumpyTupleDataset.load(os.path.join(cache_dir, 'data.npz'))
if dataset is None:
print('preprocessing dataset...')
preprocessor = preprocess_method_dict[method]()
dataset = D.get_qm9(preprocessor, labels=labels)
os.makedirs(cache_dir)
NumpyTupleDataset.save(os.path.join(cache_dir, 'data.npz'), dataset)
if args.scale == 'standardize':
# Standard Scaler for labels
ss = StandardScaler()
labels = ss.fit_transform(dataset._datasets[-1])
dataset = NumpyTupleDataset(*dataset._datasets[:-1], labels)
train_data_size = int(len(dataset) * train_data_ratio)
train, val = split_dataset_random(dataset, train_data_size, seed)
# Network
if method == 'nfp':
print('Train NFP model...')
n_unit = args.unit_num
model = GraphConvPredictor(NFP(n_unit, n_unit, args.conv_layers),
MLP(n_unit, class_num))
elif method == 'ggnn':
print('Train GGNN model...')
n_unit = args.unit_num
model = GraphConvPredictor(GGNN(n_unit, n_unit, args.conv_layers),
MLP(n_unit, class_num))
elif method == 'schnet':
print('Train SchNet model...')
model = SchNet(out_dim=class_num)
elif method == 'weavenet':
print('Train WeaveNet model...')
# TODO: Review default parameter
n_unit = args.unit_num
n_atom = 20
k_unit = 10
n_layer = 1
n_sub_layer = 1
fully_channels = [5]
n_output = class_num
model = GraphConvPredictor(WeaveNet(n_output, n_atom, [k_unit]*n_layer,
fully_channels, n_sub_layer),
MLP(n_unit, class_num))
else:
print('[ERROR] Invalid mode')
exit()
train_iter = I.SerialIterator(train, args.batchsize)
val_iter = I.SerialIterator(val, args.batchsize,
repeat=False, shuffle=False)
def scaled_abs_error(x0, x1):
if isinstance(x0, Variable):
x0 = cuda.to_cpu(x0.data)
if isinstance(x1, Variable):
x1 = cuda.to_cpu(x1.data)
scaled_x0 = ss.inverse_transform(cuda.to_cpu(x0))
scaled_x1 = ss.inverse_transform(cuda.to_cpu(x1))
return numpy.mean(numpy.absolute(scaled_x0 - scaled_x1), axis=0)[0]
classifier = L.Classifier(model, lossfun=F.mean_squared_error,
accfun=scaled_abs_error)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
classifier.to_gpu()
optimizer = O.Adam()
optimizer.setup(classifier)
updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu,
converter=concat_mols)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
trainer.extend(E.Evaluator(val_iter, classifier, device=args.gpu,
converter=concat_mols))
trainer.extend(E.snapshot(), trigger=(args.epoch, 'epoch'))
trainer.extend(E.LogReport())
trainer.extend(E.PrintReport(['epoch', 'main/loss', 'main/accuracy',
'validation/main/loss',
'validation/main/accuracy',
'elapsed_time']))
trainer.extend(E.ProgressBar())
trainer.run()
if __name__ == '__main__':
main()
|
[
"nakago@preferred.jp"
] |
nakago@preferred.jp
|
0189ddf0d26e6d06e0b79df3d3273c4c0c1f694a
|
61f12e69b3d7a11f9e0e6cf9cedb43c9af07c245
|
/Demo/Dijkstra's Algorithm.py
|
d5d67db1c533113b8ba3caf4eddfe0d5c418e877
|
[] |
no_license
|
ananabh/Graph_Algorithm
|
7bc6c35031fb82638239adb394fa41d27a632e14
|
c9d3c4b5ee720701d76d65eace00eeab484d4187
|
refs/heads/master
| 2021-07-24T13:40:21.312361
| 2020-01-03T10:19:08
| 2020-01-03T10:19:08
| 101,926,480
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,778
|
py
|
import priority_dict
from Adjecency_Matrix import *
def build_distance_table(graph,source):
distance_table = {}
for i in range(graph.numVertices):
distance_table[i] = (None,None)
distance_table[source] = (0, source)
priority_queue = priority_dict.priority_dict()
priority_queue[source] = 0
while len(priority_queue.keys()) > 0:
current_vertex=priority_queue.pop_smallest()
current_distance=distance_table[current_vertex][0]
for neighbour in graph.get_adjacent_vertices(current_vertex):
distance = current_distance + graph.get_edge_weight(current_vertex,neighbour)
neighbour_distance=distance_table[neighbour][0]
if neighbour_distance is None or neighbour_distance > distance:
distance_table[neighbour]=(distance,current_vertex)
priority_queue[neighbour]=distance
return distance_table
def shortest_path(graph, source, destination):
distance_table = build_distance_table(graph, source)
path = [destination]
previous_vertex = distance_table[destination][1]
while previous_vertex is not None and previous_vertex is not source:
path = [previous_vertex] + path
previous_vertex = distance_table[previous_vertex][1]
if previous_vertex is None:
print("There is no path from %d to %d" % (source, destination))
else:
path = [source] + path
print("Shortest path is : ", path)
g = AdjacencyMatrixGraph(8, directed=False)
g.add_edge(0, 1, 1)
g.add_edge(1, 2, 2)
g.add_edge(1, 3, 6)
g.add_edge(2, 3, 2)
g.add_edge(1, 4, 3)
g.add_edge(3, 5, 1)
g.add_edge(5, 4, 5)
g.add_edge(3, 6, 1)
g.add_edge(6, 7, 1)
g.add_edge(0, 7, 8)
shortest_path(g, 0, 6)
shortest_path(g, 4, 7)
shortest_path(g, 7, 0)
|
[
"abhianan@users.noreply.github.com"
] |
abhianan@users.noreply.github.com
|
120095782bf255595410ef8fa35a4d001f991f29
|
b4ba59a2fce3a3a69989e5326e437310254f9a51
|
/1/day16/json模块.py
|
f4abcf5ff6999177f9e77c37c21bd7c8d963c0b0
|
[] |
no_license
|
tianshang486/python
|
afaa206067715732ab828aad7f6afc8e9a475268
|
6414390d98dbf9c4428f1f54519f6c134cbd69b9
|
refs/heads/master
| 2022-08-31T05:49:15.711924
| 2020-05-22T03:08:14
| 2020-05-22T03:08:14
| 265,782,349
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,087
|
py
|
# JavaScript Object Notation:java脚本兑现标记语言
# 已经成为了一种简单的数据交换格式
# 序列化和反序列化:
# 将内存中的数据,转换为字节串,用以保存在文件或通过网络传输,称为序列化过程,反序列化也就是相反在转换回去.
# 序列化:serialization,反序列化:deserialization
# # json 将数据转换为字符串,用于储存或网络传输
import json
# a = json.dumps([1,2,3,])
# print(a,type(a))
# # 元组序列化后,变成列表,但也是字符串
# b = json.dumps((4,5,6))
# print(b,type(b))
# # 字典效果与列表一样
# c = json.dumps({'name':'zxl','age':18})
# print(c,type(c))
# # 除开集合,其他任何格式都可以
#
# # at以文本的方式进行追加
#
# # 序列化
# # 将json结果写到文件中
# with open('b.txt',mode='at',encoding='utf-8') as f1:
# s = json.dump([1,2,3,4],f1)
# print(s,type(s))
# # 反序列化
# res = json.dumps([1,2,3])
# lst = json.loads(res)
# print(lst,type(lst))
# # 元组反序列化后依然是列表属性
# res1 = json.dumps((1,2,3))
# lst1 = json.loads(res1)
# print(lst,type(lst1))
#
# # 在文件中反序列化
# with open('b.txt',encoding='utf-8') as f1:
# res = json.load(f1)
# print(res,type(res))
'''----------------------------------------------'''
# json文件通常是一次性写一次性读
# 使用另一种方式,可以实现多次写,多次读
# 把需要序列化的对象,通过多次序列化的方式,用文件的write方法,
# 把多次序列化后的json字符串写到文件
with open('json.txt',mode='at',encoding='utf-8') as f2:
f2.write(json.dumps([1,2,3,4,5,6]) + '\n')
f2.write(json.dumps([1,2,3,4,5,6]) + '\n')
# 多次序列化后,反序列化回来
with open('json.txt',mode='rt',encoding='utf-8') as f3:
# res = json.loads(f3.readline().strip())
# print(res,type(res))
# res1 = json.loads(f3.readline().strip())
# print(res1,type(res1))
for i in f3:
print(json.loads(i.strip()))
# json 是一种不完全的序列化,序列化成字符串
|
[
"tianshang486@users.noreply.github.com"
] |
tianshang486@users.noreply.github.com
|
b5449d99fb517d8b9c52684e5f6997cde5af9cda
|
c7c1830f23d99806c3532b9a929c08ca0736ad58
|
/tests/distributions/test_1d_dependencies.py
|
22e5547558473d687ada797d9025dbc3d979cfa1
|
[
"MIT"
] |
permissive
|
jonathf/chaospy
|
8a92df59fd83e39bb64921586e7971c03791eea4
|
b5959a24e0bd9b214c292485919d7ce58795f5dc
|
refs/heads/master
| 2023-08-15T16:04:55.764743
| 2023-06-03T11:35:53
| 2023-06-03T11:35:53
| 22,848,758
| 405
| 87
|
MIT
| 2023-05-18T11:52:46
| 2014-08-11T17:54:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,094
|
py
|
"""Test dependent distributions with 1-D components."""
from pytest import raises
import numpy
import chaospy
DIST1 = chaospy.Uniform(1, 2)
DIST2 = chaospy.Gamma(DIST1)
JOINT1 = chaospy.J(DIST1, DIST2)
JOINT2 = chaospy.J(DIST2, DIST1)
def test_1d_stochastic_dependencies():
"""Ensure ``stochastic_dependencies`` behaves as expected for dependent 1-D distributions."""
assert not DIST1.stochastic_dependent
assert DIST2.stochastic_dependent
assert JOINT1.stochastic_dependent
assert JOINT2.stochastic_dependent
def test_1d_dependent_bounds():
"""Ensure lower and upper bounds works for dependent 1-D distributions."""
assert numpy.isclose(DIST2.lower, 0)
assert numpy.isclose(DIST2.upper, 35.84367486)
assert numpy.allclose(JOINT1.lower, [1, 0])
assert numpy.allclose(JOINT1.upper, [2, 35.84367486])
assert numpy.allclose(JOINT2.lower, [0, 1])
assert numpy.allclose(JOINT2.upper, [35.84367486, 2])
def test_1d_dependent_mapping():
"""Ensure inverse and forward behaves as expected for dependent 1-D distributions."""
grid = numpy.array([[0, 0, 1, 1], [0, 1, 0, 1]])
inv_map1 = numpy.array([[1, 1, 2, 2], [0, 32.2369909, 0, 35.84367486]])
inv_map2 = numpy.array([[0, 0, 32.2369909, 35.84367486], [1, 2, 1, 2]])
assert numpy.allclose(JOINT1.inv(grid), inv_map1)
assert numpy.allclose(JOINT2.inv(grid), inv_map2)
assert numpy.allclose(JOINT1.fwd(inv_map1), grid)
assert numpy.allclose(JOINT2.fwd(inv_map2), grid)
def test_1d_dependent_density():
"""Ensure probability density function behaves as expected for dependent 1-D distributions."""
x_loc1 = numpy.array([0.8, 1.8, 1.2, 1.8])
x_loc2 = numpy.array([2, 4, 6, 8])
y_loc1 = numpy.array([0, 1, 1, 1])
y_loc2 = numpy.array([0.1011967, 0.05961306, 0.00386314, 0.00190102])
assert numpy.allclose(
JOINT1.pdf([x_loc1, x_loc2], decompose=True, allow_approx=False),
[y_loc1, y_loc2]
)
assert numpy.allclose(
JOINT2.pdf([x_loc2, x_loc1], decompose=True, allow_approx=False),
[y_loc2, y_loc1]
)
|
[
"noreply@github.com"
] |
jonathf.noreply@github.com
|
b676b205a6acf36a4e4bf6a4dc8048857dcfd928
|
81cac5d646fc14e52b3941279d59fdd957b10f7e
|
/homeassistant/components/nest/sensor_sdm.py
|
a574e92791a4a190e6d2d5a35b3918a851ecf84f
|
[
"Apache-2.0"
] |
permissive
|
arsaboo/home-assistant
|
6b6617f296408a42874a67a71ad9bc6074acd000
|
554e51017e7b1b6949783d9684c4a0e8ca21e466
|
refs/heads/dev
| 2023-07-27T20:56:52.656891
| 2022-01-19T19:30:57
| 2022-01-19T19:30:57
| 207,046,472
| 2
| 0
|
Apache-2.0
| 2019-09-08T01:35:16
| 2019-09-08T01:35:16
| null |
UTF-8
|
Python
| false
| false
| 3,871
|
py
|
"""Support for Google Nest SDM sensors."""
from __future__ import annotations
import logging
from google_nest_sdm.device import Device
from google_nest_sdm.device_traits import HumidityTrait, TemperatureTrait
from google_nest_sdm.exceptions import ApiException
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import PERCENTAGE, TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DATA_SUBSCRIBER, DOMAIN
from .device_info import NestDeviceInfo
_LOGGER = logging.getLogger(__name__)
DEVICE_TYPE_MAP = {
"sdm.devices.types.CAMERA": "Camera",
"sdm.devices.types.DISPLAY": "Display",
"sdm.devices.types.DOORBELL": "Doorbell",
"sdm.devices.types.THERMOSTAT": "Thermostat",
}
async def async_setup_sdm_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the sensors."""
subscriber = hass.data[DOMAIN][DATA_SUBSCRIBER]
try:
device_manager = await subscriber.async_get_device_manager()
except ApiException as err:
_LOGGER.warning("Failed to get devices: %s", err)
raise PlatformNotReady from err
entities: list[SensorEntity] = []
for device in device_manager.devices.values():
if TemperatureTrait.NAME in device.traits:
entities.append(TemperatureSensor(device))
if HumidityTrait.NAME in device.traits:
entities.append(HumiditySensor(device))
async_add_entities(entities)
class SensorBase(SensorEntity):
"""Representation of a dynamically updated Sensor."""
_attr_shoud_poll = False
_attr_state_class = SensorStateClass.MEASUREMENT
def __init__(self, device: Device) -> None:
"""Initialize the sensor."""
self._device = device
self._device_info = NestDeviceInfo(device)
self._attr_unique_id = f"{device.name}-{self.device_class}"
self._attr_device_info = self._device_info.device_info
async def async_added_to_hass(self) -> None:
"""Run when entity is added to register update signal handler."""
self.async_on_remove(
self._device.add_update_listener(self.async_write_ha_state)
)
class TemperatureSensor(SensorBase):
"""Representation of a Temperature Sensor."""
_attr_device_class = SensorDeviceClass.TEMPERATURE
_attr_native_unit_of_measurement = TEMP_CELSIUS
@property
def name(self) -> str:
"""Return the name of the sensor."""
return f"{self._device_info.device_name} Temperature"
@property
def native_value(self) -> float:
"""Return the state of the sensor."""
trait: TemperatureTrait = self._device.traits[TemperatureTrait.NAME]
# Round for display purposes because the API returns 5 decimal places.
# This can be removed if the SDM API issue is fixed, or a frontend
# display fix is added for all integrations.
return float(round(trait.ambient_temperature_celsius, 1))
class HumiditySensor(SensorBase):
"""Representation of a Humidity Sensor."""
_attr_device_class = SensorDeviceClass.HUMIDITY
_attr_native_unit_of_measurement = PERCENTAGE
@property
def name(self) -> str:
"""Return the name of the sensor."""
return f"{self._device_info.device_name} Humidity"
@property
def native_value(self) -> int:
"""Return the state of the sensor."""
trait: HumidityTrait = self._device.traits[HumidityTrait.NAME]
# Cast without loss of precision because the API always returns an integer.
return int(trait.ambient_humidity_percent)
|
[
"noreply@github.com"
] |
arsaboo.noreply@github.com
|
963f0a6fa90b3ce87aa8172d17f135d79aa2b0dc
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/request/MybankCreditSceneprodFinanceConsultRequest.py
|
5fb1cae90a610a37d48aa1ca0226fff708a1ac50
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,009
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MybankCreditSceneprodFinanceConsultModel import MybankCreditSceneprodFinanceConsultModel
class MybankCreditSceneprodFinanceConsultRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, MybankCreditSceneprodFinanceConsultModel):
self._biz_content = value
else:
self._biz_content = MybankCreditSceneprodFinanceConsultModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'mybank.credit.sceneprod.finance.consult'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
[
"liuqun.lq@alibaba-inc.com"
] |
liuqun.lq@alibaba-inc.com
|
5d545d7a7feb7adf03ff88f55614ff258a9f8342
|
184f13269249b08e5b62444ece10af8a3a35c9a5
|
/stop_starting_start_stopping/json_to_pandas/002_json_to_pandas.py
|
d99fc291b4c8ed7c70d85051cc1d0ae8a529d7c4
|
[
"MIT"
] |
permissive
|
bflaven/BlogArticlesExamples
|
3decf588098897b104d429054b8e44efec796557
|
aca40bb33f1ad4e140ddd67d6bb39bdd029ef266
|
refs/heads/master
| 2023-09-04T16:57:57.498673
| 2023-09-01T13:14:12
| 2023-09-01T13:14:12
| 42,390,873
| 9
| 4
|
MIT
| 2023-03-02T22:39:06
| 2015-09-13T09:48:34
|
HTML
|
UTF-8
|
Python
| false
| false
| 796
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
[env]
To activate this environment, use :: conda activate pandas_ga_1
To deactivate an active environment, use :: conda deactivate
# you have created a env with all the required packages
source activate pandas_ga_1
[path]
cd /Users/brunoflaven/Documents/01_work/blog_articles/stop_starting_start_stopping/json_to_pandas
[file]
python 002_json_to_pandas.py
[source]
https://chrisalbon.com/code/python/data_wrangling/load_json_file_into_pandas/
'''
import numpy as np
import json
import pandas as pd
FILE_JSON_SOURCE = 'data/news_category_dataset_light_20k_v4.json'
with open(FILE_JSON_SOURCE) as project_file:
data = json.load(project_file)
df = pd.json_normalize(data)
# View the first ten rows
df.head(10)
print(df.head(10))
|
[
"bflaven@gmail.com"
] |
bflaven@gmail.com
|
4ecb5a23d59eb2dd2e99f7d121543dffa0cbe848
|
8a70a0b13083fa1405da0b3c5e1f2a4ea961a0b6
|
/tests/unit/states/test_boto_iam_role.py
|
ea9eca19a406f54653f5985affdf15e3d692ec7e
|
[
"Apache-2.0"
] |
permissive
|
major0/salt-aws-extension
|
2940037302e8bfcb86c1460624154a087248e7f2
|
ba3b2456a9829af47e0488157d29fd3188ce12df
|
refs/heads/master
| 2023-04-10T11:53:42.045542
| 2021-03-27T00:39:42
| 2021-03-27T00:49:43
| 351,498,283
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,470
|
py
|
"""
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
"""
import salt.states.boto_iam_role as boto_iam_role
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock
from tests.support.mock import patch
from tests.support.unit import TestCase
class BotoIAMRoleTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.states.boto_iam_role
"""
def setup_loader_modules(self):
return {boto_iam_role: {}}
# 'present' function tests: 1
def test_present(self):
"""
Test to ensure the IAM role exists.
"""
name = "myrole"
ret = {"name": name, "result": False, "changes": {}, "comment": ""}
_desc_role = {
"create_date": "2015-02-11T19:47:14Z",
"role_id": "HIUHBIUBIBNKJNBKJ",
"assume_role_policy_document": {
"Version": "2008-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {"Service": "ec2.amazonaws.com"},
"Effect": "Allow",
}
],
},
"role_name": "myfakerole",
"path": "/",
"arn": "arn:aws:iam::12345:role/myfakerole",
}
_desc_role2 = {
"create_date": "2015-02-11T19:47:14Z",
"role_id": "HIUHBIUBIBNKJNBKJ",
"assume_role_policy_document": {
"Version": "2008-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": [
"ec2.amazonaws.com",
"datapipeline.amazonaws.com",
]
},
"Effect": "Allow",
}
],
},
"role_name": "myfakerole",
"path": "/",
"arn": "arn:aws:iam::12345:role/myfakerole",
}
mock_desc = MagicMock(side_effect=[False, _desc_role, _desc_role, _desc_role2, _desc_role])
_build_policy = {
"Version": "2008-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {"Service": "ec2.amazonaws.com"},
}
],
}
mock_policy = MagicMock(return_value=_build_policy)
mock_ipe = MagicMock(side_effect=[False, True, True, True])
mock_pa = MagicMock(side_effect=[False, True, True, True])
mock_bool = MagicMock(return_value=False)
mock_lst = MagicMock(return_value=[])
with patch.dict(
boto_iam_role.__salt__,
{
"boto_iam.describe_role": mock_desc,
"boto_iam.create_role": mock_bool,
"boto_iam.build_policy": mock_policy,
"boto_iam.update_assume_role_policy": mock_bool,
"boto_iam.instance_profile_exists": mock_ipe,
"boto_iam.list_attached_role_policies": mock_lst,
"boto_iam.create_instance_profile": mock_bool,
"boto_iam.profile_associated": mock_pa,
"boto_iam.associate_profile_to_role": mock_bool,
"boto_iam.list_role_policies": mock_lst,
},
):
with patch.dict(boto_iam_role.__opts__, {"test": False}):
comt = " Failed to create {} IAM role.".format(name)
ret.update({"comment": comt})
assert boto_iam_role.present(name) == ret
comt = " myrole role present. " "Failed to create myrole instance profile."
ret.update({"comment": comt})
assert boto_iam_role.present(name) == ret
comt = (
" myrole role present. Failed to associate myrole"
" instance profile with myrole role."
)
ret.update({"comment": comt})
assert boto_iam_role.present(name) == ret
comt = " myrole role present. Failed to update assume role" " policy."
ret.update({"comment": comt})
assert boto_iam_role.present(name) == ret
comt = " myrole role present. "
ret.update({"comment": comt, "result": True})
assert boto_iam_role.present(name) == ret
# 'absent' function tests: 1
def test_absent(self):
"""
Test to ensure the IAM role is deleted.
"""
name = "myrole"
ret = {"name": name, "result": False, "changes": {}, "comment": ""}
mock = MagicMock(
side_effect=[
["mypolicy"],
["mypolicy"],
False,
True,
False,
False,
True,
False,
False,
False,
True,
]
)
mock_bool = MagicMock(return_value=False)
mock_lst = MagicMock(return_value=[])
with patch.dict(
boto_iam_role.__salt__,
{
"boto_iam.list_role_policies": mock,
"boto_iam.delete_role_policy": mock_bool,
"boto_iam.profile_associated": mock,
"boto_iam.disassociate_profile_from_role": mock_bool,
"boto_iam.instance_profile_exists": mock,
"boto_iam.list_attached_role_policies": mock_lst,
"boto_iam.delete_instance_profile": mock_bool,
"boto_iam.role_exists": mock,
"boto_iam.delete_role": mock_bool,
},
):
with patch.dict(boto_iam_role.__opts__, {"test": False}):
comt = " Failed to add policy mypolicy to role myrole"
ret.update(
{
"comment": comt,
"changes": {
"new": {"policies": ["mypolicy"]},
"old": {"policies": ["mypolicy"]},
},
}
)
assert boto_iam_role.absent(name) == ret
comt = (
" No policies in role myrole."
" No attached policies in role myrole. Failed to disassociate "
"myrole instance profile from myrole role."
)
ret.update({"comment": comt, "changes": {}})
assert boto_iam_role.absent(name) == ret
comt = (
" No policies in role myrole."
" No attached policies in role myrole. "
" Failed to delete myrole instance profile."
)
ret.update({"comment": comt, "changes": {}})
assert boto_iam_role.absent(name) == ret
comt = (
" No policies in role myrole."
" No attached policies in role myrole. myrole instance profile "
"does not exist. Failed to delete myrole iam role."
)
ret.update({"comment": comt, "changes": {}})
assert boto_iam_role.absent(name) == ret
|
[
"major@homeonderanged.org"
] |
major@homeonderanged.org
|
1b7dda606e204c9adb7afa7747442c587010c773
|
b7f1b4df5d350e0edf55521172091c81f02f639e
|
/ui/display/manager/chromeos/DEPS
|
11d4683fe1f5978952f42d107f090539f3546ffe
|
[
"BSD-3-Clause"
] |
permissive
|
blusno1/chromium-1
|
f13b84547474da4d2702341228167328d8cd3083
|
9dd22fe142b48f14765a36f69344ed4dbc289eb3
|
refs/heads/master
| 2023-05-17T23:50:16.605396
| 2018-01-12T19:39:49
| 2018-01-12T19:39:49
| 117,339,342
| 4
| 2
|
NOASSERTION
| 2020-07-17T07:35:37
| 2018-01-13T11:48:57
| null |
UTF-8
|
Python
| false
| false
| 570
|
include_rules = [
"+chromeos",
# DeviceDataManager is not created in all environments (such as ash when
# running in mus/mash).
"-ui/events/devices/device_data_manager.h",
]
specific_include_rules = {
"default_touch_transform_setter.cc": [
# DefaultTouchTransformSetter only runs in environments where
# DeviceDataManager exists.
"+ui/events/devices/device_data_manager.h",
],
"touch_transform_controller.cc": [
"+third_party/skia",
],
"touch_transform_controller_unittest.cc": [
"+ui/events/devices/device_data_manager.h",
],
}
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
|
b892094c906e669ca13ea77da3f7255061dbb084
|
15fb62305a2fa0146cc84b289642cc01a8407aab
|
/Python/260-SingleNumberIII.py
|
5aa0250e74cf6f16323e8337d59d32df42e14435
|
[] |
no_license
|
geniousisme/leetCode
|
ec9bc91864cbe7520b085bdab0db67539d3627bd
|
6e12d67e4ab2d197d588b65c1ddb1f9c52a7e047
|
refs/heads/master
| 2016-09-09T23:34:03.522079
| 2015-09-23T16:15:05
| 2015-09-23T16:15:05
| 32,052,408
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
class Solution:
# @param {integer[]} nums
# @return {integer[]}
def singleNumber(self, nums):
res_dict = {}
for i in xrange(len(nums)):
if res_dict.get(nums[i]):
del res_dict[nums[i]]
else:
res_dict[nums[i]] = 1
return res_dict.keys()
if __name__ == '__main__':
s = Solution()
nums = [1, 2, 1, 3, 2, 5]
print s.singleNumber(nums)
|
[
"chia-hao.hsu@aiesec.net"
] |
chia-hao.hsu@aiesec.net
|
099016f383108c0d3506befd143281fa30515a8e
|
89ce4730897195dc99072fb251c94016d27e8538
|
/p28.py
|
afd0e4d7d9db988a0597f51e15416eb43832306a
|
[] |
no_license
|
paulhunter/ProjectEuler_Python
|
aba2be51398dc5663ef361cfdd1387aeaf814232
|
b70aaac5cd37ff6f112faae874a0e8c91e7f930d
|
refs/heads/master
| 2020-05-30T14:48:49.685036
| 2013-02-25T04:43:36
| 2013-02-25T04:43:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,407
|
py
|
#Project Euler Problem 28
'''
Starting with the number 1 and moving to the right in a
clockwise direction a 5 by 5 spiral is formed as follows:
21 22 23 24 25
20 7 8 9 10
19 6 1 2 11
18 5 4 3 12
17 16 15 14 13
It can be verified that the sum of the numbers on the
diagonals is 101.
What is the sum of the numbers on the diagonals in a
1001 by 1001 spiral formed in the same way?
'''
'''
NOTES:
There is a very easy pattern to identify when creating this spiral,
after we create the 1 in the center, we add 2 to it to get 3, which
is the next diagonal, then add 2 and get 5, 2 -> 7....
You basically get a pattern of adding 2,2,2,2,4,4,4,4,6,6,6,6..... to get
the diagonals. At the end of each sequence of the same number, you have a grid that is
n+1 x n+1 if n is the number you added. so we just need to do this until we hit the fourth
time we add 1000 and we are done. EZPZ
'''
# we are going to add val+i, val+i+i, val+i+i+i and val+i+i+i+i before we
#add to the value of i, so, we can skip an innder loop and use the formula
#sum += 4*val * 10*i, and then set val to val += 4*i EZPZ
import time
t = time.clock()
sum = 1
i = 2 #Interval
val = 1 #Current diag value we are going to add and our place holder for where we are
while i <= 1000:
sum += 4*val + 10*i
val += 4*i
i += 2
print "Answer: ",sum
print "Time: ",time.clock()-t
|
[
"p.hunter.eng@gmail.com"
] |
p.hunter.eng@gmail.com
|
42deec200686e5cebbb1212044985190796f824f
|
a4deea660ea0616f3b5ee0b8bded03373c5bbfa2
|
/executale_binaries/register-variants/vpsrld_xmm_xmm_xmm.gen.vex.py
|
ab8f856bb1af006584b77351f5ffadc8b2fefaf1
|
[] |
no_license
|
Vsevolod-Livinskij/x86-64-instruction-summary
|
4a43472e26f0e4ec130be9a82f7e3f3c1361ccfd
|
c276edab1b19e3929efb3ebe7514489f66087764
|
refs/heads/master
| 2022-02-02T18:11:07.818345
| 2019-01-25T17:19:21
| 2019-01-25T17:19:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 162
|
py
|
import angr
proj = angr.Project('vpsrld_xmm_xmm_xmm.exe')
print proj.arch
print proj.entry
print proj.filename
irsb = proj.factory.block(proj.entry).vex
irsb.pp()
|
[
"sdasgup3@illinois.edu"
] |
sdasgup3@illinois.edu
|
2d70f8ba02da9ab61aabd14e2102db3ec4010d1d
|
25ad0b1a056c6fb988fa8c8718bb7cd55388ee5f
|
/interactions/migrations/0001_initial.py
|
a00fef0274e5c3249ea75cb5c5fe438d9a63f978
|
[
"MIT"
] |
permissive
|
CSI-BennettUniversity/Sample-Project-1
|
eeab14a36eeae8d65cdd85efaf54c85732985dcf
|
23197352372b7ad00a026683477b5a95a4178e35
|
refs/heads/master
| 2023-05-13T17:25:26.652317
| 2021-06-05T14:53:15
| 2021-06-05T14:53:15
| 374,136,065
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,041
|
py
|
# Generated by Django 3.1 on 2020-08-04 08:38
from django.db import migrations, models
import django.db.models.deletion
import interactions.validators
class Migration(migrations.Migration):
initial = True
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='GlobalAverages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('openness', models.JSONField(editable=False)),
('conscientiousness', models.JSONField(editable=False)),
('extraversion', models.JSONField(editable=False)),
('agreeableness', models.JSONField(editable=False)),
('neuroticism', models.JSONField(editable=False)),
('calculated_on', models.DateTimeField(auto_now=True)),
],
options={
'get_latest_by': 'calculated_on',
},
),
migrations.CreateModel(
name='SelfAnswerGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer_date_and_time', models.DateTimeField(auto_now_add=True)),
('answers', models.JSONField(editable=False)),
('accuracy', models.FloatField(blank=True, editable=False, null=True, validators=[interactions.validators.percentage_validator])),
('scores', models.JSONField(editable=False)),
('self_user_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='selfanswergroup_self', to='users.userprofile')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='RelationAnswerGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer_date_and_time', models.DateTimeField(auto_now_add=True)),
('answers', models.JSONField(editable=False)),
('accuracy', models.FloatField(blank=True, editable=False, null=True, validators=[interactions.validators.percentage_validator])),
('scores', models.JSONField(editable=False)),
('attempted_against', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='relationanswergroup_attempted', to='interactions.selfanswergroup')),
('relation_user_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='relationanswergroup_relation', to='users.userprofile')),
('self_user_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='relationanswergroup_self', to='users.userprofile')),
],
options={
'abstract': False,
},
),
]
|
[
"diptesh.choudhuri@gmail.com"
] |
diptesh.choudhuri@gmail.com
|
5d8904e9ba7cf7983dfde2e7977062b9ad52108c
|
30cc48472cd666467a41565a456e84b4fa7da375
|
/mnist/MNIST_data/train-images-idx3-ubyte/train-images-idx3-ubyte/train-images-idx3-ubyte/train-images-idx3-ubyte/test.py
|
44d127b0123cb5cceccd6fcd21709ad0992c3016
|
[] |
no_license
|
stuian/tensorflow
|
0bed07c4ecbd7495081ceed0b6c042803457286d
|
4da9762ac09f4772f91ca6e2e356cb505b486b83
|
refs/heads/master
| 2022-01-22T00:19:38.168252
| 2019-07-26T03:01:21
| 2019-07-26T03:01:21
| 114,263,715
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
import numpy as np
import struct
import matplotlib.pyplot as plt
filename = 'train-images.idx3-ubyte'
binfile = open(filename , 'rb')
buf = binfile.read()
index = 0
magic, numImages , numRows , numColumns = struct.unpack_from('>IIII' , buf , index)
index += struct.calcsize('>IIII')
im = struct.unpack_from('>784B' ,buf, index)
index += struct.calcsize('>784B')
im = np.array(im)
im = im.reshape(28,28)
fig = plt.figure()
plotwindow = fig.add_subplot(111)
plt.imshow(im , cmap='gray')
plt.show()
|
[
"892545949@qq.com"
] |
892545949@qq.com
|
feb157dbafc6815b94d9a8421695893060d6345a
|
fe09b5107ec3869beed7a6c955beff7b23372f28
|
/flowitem.py
|
423cbe4486b75dc3932c16e4f25241cfb9f31d71
|
[] |
no_license
|
ArnoKasper/ProcessSim
|
d24bea86a095a774b8510fc8bbb701d37cd2762e
|
100c067ddb38f7ae094b8f61a678df81c4372d45
|
refs/heads/main
| 2023-03-14T13:50:58.371304
| 2021-03-09T14:18:48
| 2021-03-09T14:18:48
| 328,982,762
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,314
|
py
|
"""
Project: ProcessSim
Made By: Arno Kasper
Version: 1.0.0
"""
class Order(object):
# __ Set all params related to an instance of an process (order)
def __init__(self, simulation):
"""
object having all attributes of the an flow item
:param simulation: simulation object stored in simulation_model.py
:return: void
"""
# Set up individual parameters for each order ------------------------------------------------------------------
self.sim = simulation
self.customized_control = self.sim.model_panel.CUSTOM_CONTROL
# CEM params
self.entry_time = 0
# pool params
self.release = False
self.first_entry = True
self.release_time = 0
self.pool_time = 0
# rotting sequence params
if self.sim.model_panel.WC_AND_FLOW_CONFIGURATION == "GFS" or \
self.sim.model_panel.WC_AND_FLOW_CONFIGURATION == "RJS":
self.routing_sequence = self.sim.random_generator.sample(
self.sim.model_panel.MANUFACTURING_FLOOR_LAYOUT,
self.sim.random_generator.randint(1, len(self.sim.model_panel.MANUFACTURING_FLOOR_LAYOUT)))
# Sort the routing if necessary
if self.sim.model_panel.WC_AND_FLOW_CONFIGURATION == "GFS":
self.routing_sequence.sort() # GFS or PFS require sorted list of stations
elif self.sim.model_panel.WC_AND_FLOW_CONFIGURATION == "PFS":
self.routing_sequence = self.sim.model_panel.MANUFACTURING_FLOOR_LAYOUT.copy()
elif self.sim.model_panel.WC_AND_FLOW_CONFIGURATION == "PJS":
self.routing_sequence = \
self.sim.random_generator.shuffle(self.sim.model_panel.MANUFACTURING_FLOOR_LAYOUT)
else:
raise Exception("Please indicate an allowed the work centre and flow configuration")
# Make a variable independent from routing sequence to allow for queue switching
self.routing_sequence_data = self.routing_sequence[:]
# Make libary variables according to routing_sequence ----------------------------------------------------------
# process time
self.process_time = {}
self.process_time_cumulative = 0
# priority
self.dispatching_priority = {}
# data collection variables
self.queue_entry_time = {}
self.proc_finished_time = {}
self.queue_time = {}
self.order_start_time = {}
self.machine_route = {} # tracks which machine was used
for WC in self.routing_sequence:
# Type of process time distribution
if self.sim.model_panel.PROCESS_TIME_DISTRIBUTION == "2_erlang":
self.process_time[WC] = self.sim.general_functions.two_erlang_truncated()
elif self.sim.model_panel.PROCESS_TIME_DISTRIBUTION == "lognormal":
self.process_time[WC] = self.sim.general_functions.log_normal_truncated()
elif self.sim.model_panel.PROCESS_TIME_DISTRIBUTION == "constant":
self.process_time[WC] = self.sim.model_panel.MEAN_PROCESS_TIME
else:
raise Exception("Please indicate a allowed process time distribution")
# calculate cum
self.process_time_cumulative += self.process_time[WC]
self.dispatching_priority[WC] = 0
# data collection variables
self.queue_entry_time[WC] = 0
self.proc_finished_time[WC] = 0
self.queue_time[WC] = 0
self.order_start_time[WC] = 0
self.machine_route[WC] = "NOT_PASSED"
# Due Date -----------------------------------------------------------------------------------------------------
self.due_date = None
if self.customized_control:
self.due_date =self.sim.customized_settings.due_date(order=self)
if self.due_date is None:
if self.sim.policy_panel.due_date_method == "random":
self.due_date = self.sim.general_functions.random_value_DD()
elif self.sim.policy_panel.due_date_method == "factor_k":
self.due_date = self.sim.general_functions.factor_K_DD(order=self)
elif self.sim.policy_panel.due_date_method == "constant":
self.due_date = self.sim.general_functions.add_contant_DD(order=self)
elif self.sim.policy_panel.due_date_method == "total_work_content":
self.due_date = self.sim.general_functions.total_work_content(order=self)
else:
raise Exception("Please indicate a allowed due date procedure")
self.PRD = self.due_date - (len(self.routing_sequence) * self.sim.policy_panel.PRD_k)
self.ODDs = {}
if self.sim.policy_panel.dispatching_rule == "ODD_k":
for WC in self.routing_sequence:
self.ODDs[WC] = self.due_date - (
(len(self.routing_sequence) - (self.routing_sequence.index(WC) + 1)) * self.sim.policy_panel.ODD_k)
# Other order parameters ---------------------------------------------------------------------------------------
# data collection
self.finishing_time = 0
# Other
self.continuous_trigger = False
return
|
[
"61021358+ZanyAK@users.noreply.github.com"
] |
61021358+ZanyAK@users.noreply.github.com
|
59a4e0f3a4ab3f6b739ff52fa142fce661828d63
|
2dce494ad49876efcbc2ac08fb4e74d56a524e05
|
/models/networks.py
|
c2b1717aba42ef737b1f7aad5fb409f1dacd759e
|
[] |
no_license
|
Chilie/HDC2021_code
|
c06f2c4d024ccd08d93271893c4e87a1f9086a54
|
72482ed6037feacc45683f5e83267ce11c2697d8
|
refs/heads/main
| 2023-09-01T03:57:37.726998
| 2021-09-29T15:11:59
| 2021-09-29T15:11:59
| 410,872,698
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,761
|
py
|
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.autograd import Variable
import numpy as np
from models.fpn_mobilenet import FPNMobileNet
from models.fpn_inception import FPNInception
from models.fpn_inception_cconditionalsr import FPNInceptionCCSR
from models.fpn_inception_simple import FPNInceptionSimple
from models.fpn_densenet import FPNDense
###############################################################################
# Functions
###############################################################################
class ConditionalBatchNorm2d(nn.Module):
def __init__(self, num_features, num_classes, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
self.bn = nn.BatchNorm2d(num_features, affine=False)
if self.bias:
self.embed = nn.Embedding(num_classes, num_features * 2)
self.embed.weight.data[:, :num_features].uniform_() # Initialise scale at N(1, 0.02)
self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0
else:
self.embed = nn.Embedding(num_classes, num_features)
self.embed.weight.data.uniform_()
def forward(self, x, y):
out = self.bn(x)
if self.bias:
gamma, beta = self.embed(y).chunk(2, dim=1)
out = gamma.view(-1, self.num_features, 1, 1) * out + beta.view(-1, self.num_features, 1, 1)
else:
gamma = self.embed(y)
out = gamma.view(-1, self.num_features, 1, 1) * out
return out
class ConditionalInstanceNorm2d(nn.Module):
def __init__(self, num_features, num_classes=20, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
self.instance_norm = nn.InstanceNorm2d(num_features, affine=False, track_running_stats=False)
if bias:
self.embed = nn.Embedding(num_classes, num_features * 2)
self.embed.weight.data[:, :num_features].uniform_() # Initialise scale at N(1, 0.02)
self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0
else:
self.embed = nn.Embedding(num_classes, num_features)
self.embed.weight.data.uniform_()
def forward(self, x, y):
h = self.instance_norm(x)
if self.bias:
gamma, beta = self.embed(y).chunk(2, dim=-1)
out = gamma.view(-1, self.num_features, 1, 1) * h + beta.view(-1, self.num_features, 1, 1)
else:
gamma = self.embed(y)
out = gamma.view(-1, self.num_features, 1, 1) * h
return out
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=True, track_running_stats=True) # False
elif norm_type == 'conditional-batch':
norm_layer = ConditionalBatchNorm2d
elif norm_type == 'conditional-instance':
norm_layer = ConditionalInstanceNorm2d
elif norm_type == 'conditional-instance10':
norm_layer = functools.partial(ConditionalInstanceNorm2d, num_classes=10)
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
##############################################################################
# Classes
##############################################################################
def concatenation(input1, input2):
inputs_shapes2 = [input1.shape[2], input2.shape[2]]
inputs_shapes3 = [input1.shape[3], input2.shape[3]]
if np.all(np.array(inputs_shapes2) == min(inputs_shapes2)) and np.all(np.array(inputs_shapes3) == min(inputs_shapes3)):
inputs_ = [input1, input2]
else:
target_shape2 = min(inputs_shapes2)
target_shape3 = min(inputs_shapes3) # Get the minimal size of the input.
inputs_ = []
for inp in [input1, input2]:
diff2 = (inp.size(2) - target_shape2) // 2
diff3 = (inp.size(3) - target_shape3) // 2
inputs_.append(inp[:, :, diff2: diff2 + target_shape2, diff3:diff3 + target_shape3]) # Cut the redundant dimensions
return torch.cat(inputs_, dim=1)
# Defines the generator that consists of Resnet blocks between a few
# downsampling/upsampling operations.
# Code and idea originally from Justin Johnson's architecture.
# https://github.com/jcjohnson/fast-neural-style/
class ResnetGenerator(nn.Module):
def __init__(self, input_nc=1, output_nc=1, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, use_parallel=True, learn_residual=True, padding_type='reflect'):
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
self.use_parallel = use_parallel
self.learn_residual = learn_residual
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0,
bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2**n_downsampling
for i in range(n_blocks):
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
# model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
output = self.model(input)
if self.learn_residual:
output = input + output
output = torch.clamp(output,min = -1,max = 1)
return output
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
class DicsriminatorTail(nn.Module):
def __init__(self, nf_mult, n_layers, ndf=64, norm_layer=nn.BatchNorm2d, use_parallel=True):
super(DicsriminatorTail, self).__init__()
self.use_parallel = use_parallel
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = int(np.ceil((kw-1)/2))
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence = [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
self.model = nn.Sequential(*sequence)
def forward(self, input):
return self.model(input)
class MultiScaleDiscriminator(nn.Module):
def __init__(self, input_nc=3, ndf=64, norm_layer=nn.BatchNorm2d, use_parallel=True):
super(MultiScaleDiscriminator, self).__init__()
self.use_parallel = use_parallel
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = int(np.ceil((kw-1)/2))
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
for n in range(1, 3):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
self.scale_one = nn.Sequential(*sequence)
self.first_tail = DicsriminatorTail(nf_mult=nf_mult, n_layers=3)
nf_mult_prev = 4
nf_mult = 8
self.scale_two = nn.Sequential(
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True))
nf_mult_prev = nf_mult
self.second_tail = DicsriminatorTail(nf_mult=nf_mult, n_layers=4)
self.scale_three = nn.Sequential(
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True))
self.third_tail = DicsriminatorTail(nf_mult=nf_mult, n_layers=5)
def forward(self, input):
x = self.scale_one(input)
x_1 = self.first_tail(x)
x = self.scale_two(x)
x_2 = self.second_tail(x)
x = self.scale_three(x)
x = self.third_tail(x)
return [x_1, x_2, x]
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc=3, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, use_parallel=True):
super(NLayerDiscriminator, self).__init__()
self.use_parallel = use_parallel
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = int(np.ceil((kw-1)/2))
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def forward(self, input):
return self.model(input)
def get_fullD(model_config):
model_d = NLayerDiscriminator(n_layers=5,
# norm_layer=get_norm_layer(norm_type=model_config['norm_layer']),
use_sigmoid=False)
return model_d
def get_generator(model_config):
generator_name = model_config['g_name']
if generator_name == 'resnet':
model_g = ResnetGenerator(norm_layer=get_norm_layer(norm_type=model_config['norm_layer']),
use_dropout=model_config['dropout'],
n_blocks=model_config['blocks'],
learn_residual=model_config['learn_residual'])
elif generator_name == 'fpn_mobilenet':
model_g = FPNMobileNet(norm_layer=get_norm_layer(norm_type=model_config['norm_layer']))
elif generator_name == 'fpn_inception':
model_g = FPNInception(norm_layer=get_norm_layer(norm_type=model_config['norm_layer']),output_ch=3)
elif generator_name == 'fpn_inceptioncc+sr':
model_g = FPNInceptionCCSR(norm_layer=get_norm_layer(norm_type=model_config['norm_layer']),output_ch=3)
elif generator_name == 'fpn_inception_simple':
model_g = FPNInceptionSimple(norm_layer=get_norm_layer(norm_type=model_config['norm_layer']))
elif generator_name == 'fpn_dense':
model_g = FPNDense()
else:
raise ValueError("Generator Network [%s] not recognized." % generator_name)
return nn.DataParallel(model_g)
def get_discriminator(model_config):
discriminator_name = model_config['d_name']
if discriminator_name == 'no_gan':
model_d = None
elif discriminator_name == 'patch_gan':
model_d = NLayerDiscriminator(n_layers=model_config['d_layers'],
norm_layer=get_norm_layer(norm_type=model_config['norm_layer']),
use_sigmoid=False)
model_d = nn.DataParallel(model_d)
elif discriminator_name == 'double_gan':
patch_gan = NLayerDiscriminator(n_layers=model_config['d_layers'],
# norm_layer=get_norm_layer(norm_type=model_config['norm_layer']),
use_sigmoid=False)
patch_gan = nn.DataParallel(patch_gan)
full_gan = get_fullD(model_config)
full_gan = nn.DataParallel(full_gan)
model_d = {'patch': patch_gan,
'full': full_gan}
elif discriminator_name == 'multi_scale':
model_d = MultiScaleDiscriminator(norm_layer=get_norm_layer(norm_type=model_config['norm_layer']))
model_d = nn.DataParallel(model_d)
else:
raise ValueError("Discriminator Network [%s] not recognized." % discriminator_name)
return model_d
def get_nets(model_config):
return get_generator(model_config), get_discriminator(model_config)
|
[
"matliji@nus.edu.sg"
] |
matliji@nus.edu.sg
|
fa802519ff310cbc0067f8afe4833b3721023352
|
c18780e8d58422ad2d63515484101f227d30a8e8
|
/tools/c7n_policystream/setup.py
|
fa8d0fc26246a52c76e428ff14f5ec60583525f0
|
[
"Apache-2.0"
] |
permissive
|
stefangordon/cloud-custodian
|
18dc02d5f605b4896d745e51d7005055e595e45a
|
099d9c03f787b43c97e2acb9bbf97bc5a092b644
|
refs/heads/master
| 2021-07-14T14:21:38.529152
| 2021-02-16T15:22:38
| 2021-02-16T15:22:38
| 125,872,127
| 0
| 0
|
Apache-2.0
| 2020-05-25T13:49:14
| 2018-03-19T14:37:36
|
Python
|
UTF-8
|
Python
| false
| false
| 5,399
|
py
|
# Automatically generated from poetry/pyproject.toml
# flake8: noqa
# -*- coding: utf-8 -*-
from setuptools import setup
modules = \
['policystream']
install_requires = \
['argcomplete (>=1.12.2,<2.0.0)',
'attrs (>=20.3.0,<21.0.0)',
'boto3 (>=1.17.5,<2.0.0)',
'boto3>=1.12.0,<2.0.0',
'botocore (>=1.20.5,<2.0.0)',
'c7n (>=0.9.10,<0.10.0)',
'click>=7.0,<8.0',
'importlib-metadata (>=3.4.0,<4.0.0)',
'jmespath (>=0.10.0,<0.11.0)',
'jsonpickle (>=1.3,<2.0)',
'jsonschema (>=3.2.0,<4.0.0)',
'pygit2>=1.0,<1.1',
'pyrsistent (>=0.17.3,<0.18.0)',
'python-dateutil (>=2.8.1,<3.0.0)',
'pyyaml (>=5.4.1,<6.0.0)',
'pyyaml>=5.3,<6.0',
'requests>=2.22.0,<3.0.0',
's3transfer (>=0.3.4,<0.4.0)',
'six (>=1.15.0,<2.0.0)',
'tabulate (>=0.8.7,<0.9.0)',
'typing-extensions (>=3.7.4.3,<4.0.0.0)',
'urllib3 (>=1.26.3,<2.0.0)',
'zipp (>=3.4.0,<4.0.0)']
entry_points = \
{'console_scripts': ['c7n-policystream = policystream:cli']}
setup_kwargs = {
'name': 'c7n-policystream',
'version': '0.4.9',
'description': 'Cloud Custodian - Git Commits as Logical Policy Changes',
'long_description': '# c7n-policystream: Policy Changes from Git\n\n[//]: # ( !!! IMPORTANT !!! )\n[//]: # (This file is moved during document generation.)\n[//]: # (Only edit the original document at ./tools/c7n_policystream/README.md)\n\nUsing custodian in accordance with infrastructure as code principles,\nwe store policy assets in a versioned control repository. This\nprovides for an audit log and facilitates code reviews. However this\ncapability is primarily of use to humans making semantic interpretations\nof changes.\n\nThis script also provides logical custodian policy changes over a git\nrepo and allows streaming those changes for machine readable/application\nconsumption. Its typically used as a basis for CI integrations or indexes\nover policies.\n\nTwo example use cases:\n\n - Doing dryrun only on changed policies within a pull request\n - Constructing a database of policy changes.\n\nPolicystream works on individual github repositories, or per Github integration\nacross an organization\'s set of repositories.\n\n## Install\n\npolicystream can be installed via pypi, provided the require pre-requisites\nlibraries are available (libgit2 > 0.26)\n\n```\npip install c7n-policystream\n```\n\nDocker images available soon, see build for constructing your own.\n\n## Build\n\nAlternatively a docker image can be built as follows\n\n```shell\n# Note must be top level directory of checkout\ncd cloud-custodian\n\ndocker build -t policystream:latest -f tools/c7n_policystream/Dockerfile .\n\ndocker run --mount src="$(pwd)",target=/repos,type=bind policystream:latest\n```\n\n## Usage\n\nStreaming use case (default stream is to stdout, also supports kinesis, rdbms and sqs)\n\n```\n $ c7n-policystream stream -r foo\n 2018-08-12 12:37:00,567: c7n.policystream:INFO Cloning repository: foo\n <policy-add policy:foi provider:aws resource:ec2 date:2018-08-02T15:13:28-07:00 author:Kapil commit:09cb85>\n <policy-moved policy:foi provider:aws resource:ec2 date:2018-08-02T15:14:24-07:00 author:Kapil commit:76fce7>\n <policy-remove policy:foi provider:aws resource:ec2 date:2018-08-02T15:14:46-07:00 author:Kapil commit:570ca4>\n <policy-add policy:ec2-guard-duty provider:aws resource:ec2 date:2018-08-02T15:14:46-07:00 author:Kapil commit:570ca4>\n <policy-add policy:ec2-run provider:aws resource:ec2 date:2018-08-02T15:16:00-07:00 author:Kapil commit:d3d8d4>\n <policy-remove policy:ec2-run provider:aws resource:ec2 date:2018-08-02T15:18:31-07:00 author:Kapil commit:922c1a>\n <policy-modified policy:ec2-guard-duty provider:aws resource:ec2 date:2018-08-12T09:39:43-04:00 author:Kapil commit:189ea1>\n 2018-08-12 12:37:01,275: c7n.policystream:INFO Streamed 7 policy changes\n```\n\nPolicy diff between two source and target revision specs. If source\nand target are not specified default revision selection is dependent\non current working tree branch. The intent is for two use cases, if on\na non-master branch then show the diff to master. If on master show\nthe diff to previous commit on master. For repositories not using the\n`master` convention, please specify explicit source and target.\n\n\n```\n $ c7n-policystream diff -r foo -v\n```\n\nPull request use, output policies changes between current branch and master.\n\n```\n $ c7n-policystream diff -r foo\n policies:\n - filters:\n - {type: cross-account}\n name: lambda-access-check\n resource: aws.lambda\n```\n\n## Options\n\n```\n$ c7n-policystream --help\nUsage: c7n-policystream [OPTIONS] COMMAND [ARGS]...\n\n Policy changes from git history\n\nOptions:\n --help Show this message and exit.\n\nCommands:\n diff Policy diff between two arbitrary revisions.\n org-checkout Checkout repositories from a GitHub organization.\n org-stream Stream changes for repos in a GitHub organization.\n stream Stream git history policy changes to destination.\n```\n',
'long_description_content_type': 'text/markdown',
'author': 'Cloud Custodian Project',
'author_email': None,
'maintainer': None,
'maintainer_email': None,
'url': 'https://cloudcustodian.io',
'py_modules': modules,
'install_requires': install_requires,
'entry_points': entry_points,
'python_requires': '>=3.6,<4.0',
}
setup(**setup_kwargs)
|
[
"noreply@github.com"
] |
stefangordon.noreply@github.com
|
c59a9e19a3ac0d9385b405c97d8d7785de977646
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03284/s851420398.py
|
cab7da04f619aeda933ba8430cc5633f70494311
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 186
|
py
|
n, k = map(int, input().split())
h = [0]*k
while True:
for j in range(k):
h[j] += 1
n -= 1
if n <= 0:
print(max(h) - min(h))
exit()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
d682999bd96d9638a182f78f631d75cb4acf4bbd
|
b4e704f67214028cd0ba8b7a641a2267faf606e4
|
/projectmf/functional_tests/fulldayofeating/test_calculate_full_day_of_eating_without_ingredient.py
|
5eb64f17f3655f768d4b3d1a671fb416382b07d2
|
[] |
no_license
|
matthias4366/mf-2
|
d21b32aae9ae617d78b4b1541edc196f6f822e4e
|
73e2110c031c8c17596e27af1143b557b1062b3c
|
refs/heads/master
| 2022-10-23T05:54:32.889983
| 2020-06-11T15:40:04
| 2020-06-11T15:40:04
| 190,999,715
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,847
|
py
|
from selenium.webdriver.support.ui import Select
from functional_tests.utils.click_navbar_item import \
click_navbar_item
from ..base import FunctionalTestWithUserLoggedIn
from selenium.webdriver.common.keys import Keys
import time
from measuredfood.models import (
FullDayOfEating,
NutrientProfile,
)
import json
# import the ingredient dictionaries
import sys
sys.path.insert(0, '/projectmf/data/')
sys.path.append('..')
sys.path.append('...')
sys.path.append('....')
sys.path.append('.....')
sys.path.append('......')
sys.path.append('.......')
with open(
'data/nutrient_profile/nutrient_profile_sandor_clegane.json',
'r') as fp:
nutrient_profile_dict_list = json.load(fp)
class FullDayOfEatingTest(FunctionalTestWithUserLoggedIn):
def test_calculate_full_day_of_eating_without_ingredient(self):
"""
The test case is the creation of a full day of eating without a
SpecificIngredient. It is successful if the correct error message is
produced.
"""
# The user objects can be accessed with self.user.
# Create a NutrientProfile.
# Simulate clicking on the navbar item for nutrient profiles.
click_navbar_item(
'id_menu_item_nutrient_profiles',
self.browser,
Keys,
time,
)
time.sleep(0.1)
# Add the first nutrient profile from the list of nutrient profiles
# saved in the fixtures.
new_nutrient_profile_button = self.browser.find_element_by_id(
'id_button_new_nutrient_profile'
)
new_nutrient_profile_button.click()
time.sleep(0.1)
# Iterator for the nutrient profile.
k_np = 0
for key, value in nutrient_profile_dict_list[k_np].items():
id_from_key = 'id_' + key
if value is not None:
self.browser.find_element_by_id(id_from_key).send_keys(
str(value)
)
# Simulate clicking the save button
save_button = self.browser.find_element_by_id(
'id_button_save_new_nutrientprofile'
)
save_button.click()
time.sleep(1)
# Test whether the saved nutrient profile is in the database.
nutrient_profile_query = NutrientProfile.objects.filter(
name=nutrient_profile_dict_list[k_np]['name']
)
nutrient_profile_was_saved = nutrient_profile_query.exists()
self.assertTrue(nutrient_profile_was_saved)
# Create FullDayOfEating object.
# Simulate clicking the navbar item Full days of eating.
click_navbar_item(
'id_menu_item_fulldayofeating',
self.browser,
Keys,
time,
)
time.sleep(0.1)
# Simulate click on 'New full day of eating' button.
new_fulldayofeating_button = self.browser.find_element_by_id(
'id_button_new_fulldayofeating'
)
new_fulldayofeating_button.click()
name_dummy_full_day_of_eating = 'Dummy full day of eating'
# Type in the name of the new full day of eating.
self.browser.find_element_by_id('id_name').send_keys(
name_dummy_full_day_of_eating
)
# From the nutrient profile dropdown menu, select the nutrient
# profile that was created at the beginning of this test.
select_nutrient_profile = Select(self.browser.find_element_by_id(
'id_nutrient_profile'
))
select_nutrient_profile.select_by_visible_text(
nutrient_profile_dict_list[k_np]['name']
)
save_full_day_of_eating_button = self.browser.find_element_by_id(
'id_button_save_new_fulldayofeating'
)
save_full_day_of_eating_button.click()
# Check that full day of eating object exists in the database.
full_day_of_eating_query = FullDayOfEating.objects.filter(
name=name_dummy_full_day_of_eating
)
full_day_of_eating_was_saved = full_day_of_eating_query.exists()
self.assertTrue(full_day_of_eating_was_saved)
time.sleep(0.5)
# Click the button 'Calculate full day of eating'.
calculate_button = self.browser.find_element_by_id(
'id_button_calculate_full_day_of_eating'
)
calculate_button.click()
# time.sleep(7)
# Check if the correct error message is displayed.
# id = NoSpecificIngredientInFullDayOfEatingError
# Test whether the appropriate error page is shown.
error_paragraph = self.browser.find_elements_by_id(
'NoSpecificIngredientInFullDayOfEatingError'
)
error_page_is_shown = \
len(error_paragraph) > 0
self.assertTrue(error_page_is_shown)
|
[
"spam.matthias.h.schulz@gmail.com"
] |
spam.matthias.h.schulz@gmail.com
|
f2d230e0ae384792d15e162bc278f5e4f24b7a82
|
faf2d6fd277ef996359e0841dc77c440152c21eb
|
/main1.py
|
c0f3cc23ae68382bd71eb9753d8713ca94867b8b
|
[] |
no_license
|
Nithinpadmaprabhu/Air-Quality
|
cf41340d9797e413c1c900199e99c5df43b282df
|
674afcc5ea3a01b69b6b77d1d5e7e1eb0c621764
|
refs/heads/master
| 2021-03-26T15:59:42.724411
| 2020-03-16T14:22:13
| 2020-03-16T14:22:13
| 247,720,263
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,552
|
py
|
#!/usr/bin/python3
'''
Created on 01 October 2017
@author: Yahya Almardeny
This is the main entry point to the program
The Flow of this program goes here
'''
import time as t , socket, os, datetime
from mq import MQ
from mcp3008 import MCP3008
from point import Point
from groveO2 import Grove_O2
from soundDetector import Sound_Detector
from tmp36 import TMP36
from humidity import Humidity
from gp2y import *
import promptWindow as window
import db as db
if __name__ == '__main__':
pass
########################### Format JSON ############################
# Input: device name, sensors readings in JSON format
# Output: data
###################################################################
def format(deviceName, *args):
data = "{\"" +deviceName+"\":["
for i in range(len(args)):
if(i==len(args)-1):
data += str(args[i])
else:
data += str(args[i]) +","
data += "]}"
return data
while True:
# global scope vars to set the interval time for sending data to the database (3600 means every 1 hour)
global counter, limit
counter, limit = 0, 30. # every 30 second , that means 120 readings in one hour of deployment (gives nice graph, not crowded)
# Ask user for Device and Server Information at startup in the first run
window.get_information("/home/pi/Desktop/EAPMS/device_info.csv")
# Data for MQ Sensors from Datasheet
mq2_data = {"Smoke":[Point(200,3.45) ,Point(500,2.5), Point(800,2), Point(1000,1.9),
Point(1600,1.65), Point(2000,1.5),Point(3000,1.3),Point(5000,0.92), Point(10000,0.6)],
"LPG":[Point(200,1.75) ,Point(500,1.2), Point(800,0.88), Point(1000,0.68),
Point(1600,0.56), Point(2000,0.48),Point(3000,0.38),Point(5000,0.28), Point(10000,0.24)],
"Propane":[Point(200,1.78) ,Point(500,1.2), Point(800,0.88), Point(1000,0.7),
Point(1600,0.6), Point(2000,0.5),Point(3000,0.40),Point(5000,0.30), Point(10000,0.25)],
"CH4":[Point(200,3) ,Point(500,2.4), Point(800,1.9), Point(1000,1.8),
Point(1600,1.6), Point(2000,1.4),Point(3000,1.3),Point(5000,0.92), Point(10000,0.7)]};
mq135_data = {"CO2":[Point(10,2.5) ,Point(40,1.5), Point(100,1.1), Point(200,0.8)],
"CO":[Point(10,2.9) ,Point(40,1.95), Point(100,1.7), Point(200,1.5)],
"NH4":[Point(10,2.7) ,Point(40,1.53), Point(100,1), Point(200,0.785)],
"Benzene":[Point(10,1.6) ,Point(40,1.1), Point(100,0.8), Point(200,0.65)]};
mq131_data = {"Ozone":[Point(5,6),Point(10,4),Point(20, 1.5),Point(100,0.5)],
"NO2": [Point(5,9),Point(10,8), Point(20,7), Point(100,4.5)],
"CL2": [Point(5,8),Point(10,6.8), Point(20,4.8), Point(100,0.8)]};
so2_data = {"SO2":[Point(35,1),Point(100,1.5),Point(200, 1.95),
Point(300,2.2),Point(400,2.35),Point(500,2.45),Point(600,2.5)]};
# Initalize the required sensors devices
mcp = MCP3008()
mq2 = MQ("MQ2", mcp,0,5,9.8, mq2_data)
mq135 = MQ("MQ135", mcp,1,20,3.75, mq135_data)
mq131 = MQ("MQ131", mcp, 2, 20,20,mq131_data)
o2 = Grove_O2(mcp, 3, 3.3, 7.43)
noise = Sound_Detector(mcp, 4)
sh12 = MQ("2SH12", mcp, 5, 50, 6, so2_data)
tmp36 = TMP36(mcp, 6, 3.3)
humidity = Humidity(mcp, 7, 3.3)
pm = GP2Y(13, 3.3, 1)
# Initialize Connection with UDP Server & MySQL Server
device_infoFile = open("/home/pi/Desktop/EAPMS/device_info.csv", 'r')
info = device_infoFile.read().split(',')
deviceName, serverIp, port = info[0],str(info[1]), int(info[2])
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
cursor = db.connect_to_mysqlServer(serverIp, 'pi', 'pi', 'eapmsDB', 'data');
while True:
mq2R, mq135R, mq131R, sh12R, o2R, noiseR, tempR, humidityR, pmR = mq2.read(),mq135.read(),mq131.readPPB(), sh12.read(), o2.read(), noise.read(), tmp36.read(), humidity.read(tmp36.readTemp()), pm.read();
data = format(deviceName,mq2R, mq135R, mq131R, sh12R, o2R, noiseR, tempR, humidityR, pmR)
print(data)
serverSocket.sendto(bytes(data, 'UTF-8'), (serverIp, port)) # send data in JSON format to the Server
counter = db.send_to_database(cursor, deviceName, counter, limit, mq2R, mq135R, mq131R, sh12R, o2R, noiseR, tempR, humidityR, pmR) # send data to database on the Server
t.sleep(1)
|
[
"npadmaprabhu@tssg.org"
] |
npadmaprabhu@tssg.org
|
54efbe8f8beeb4862f01be570c1ab80e8425d577
|
d66818f4b951943553826a5f64413e90120e1fae
|
/hackerrank/Python/Validating Credit Card Numbers/solution.py
|
84526230cfaed162d112747247d775c3adf85975
|
[
"MIT"
] |
permissive
|
HBinhCT/Q-project
|
0f80cd15c9945c43e2e17072416ddb6e4745e7fa
|
19923cbaa3c83c670527899ece5c3ad31bcebe65
|
refs/heads/master
| 2023-08-30T08:59:16.006567
| 2023-08-29T15:30:21
| 2023-08-29T15:30:21
| 247,630,603
| 8
| 1
|
MIT
| 2020-07-22T01:20:23
| 2020-03-16T06:48:02
|
Python
|
UTF-8
|
Python
| false
| false
| 233
|
py
|
import re
for _ in range(int(input())):
s = input().strip()
if re.match(r'^[456](\d{15}|\d{3}(-\d{4}){3})$', s) and not re.search(r'(\d)\1\1\1', s.replace('-', '')):
print('Valid')
else:
print('Invalid')
|
[
"hbinhct@gmail.com"
] |
hbinhct@gmail.com
|
c87b761f3286e13710b7d0867cbfa6847bae3448
|
6b0b119709ea55a06aa982bb022fc96d85c0cd68
|
/ex_4_2.py
|
c4c2f537ef7535828e929f21eb7e7d3c81cf3884
|
[] |
no_license
|
VincentLu91/Python
|
878ebaf1ed845463c6acc9997be5a36d25edfb2d
|
27dcf28c0e5b0c5d16132b9a38acfea3828fdf4c
|
refs/heads/master
| 2020-06-01T07:00:27.772672
| 2014-11-17T19:54:20
| 2014-11-17T19:54:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
'''
Ask the user to type something (use raw_input). To find out whether the input
was a number, compare whether the input is after "0" and before ":" in
alphabetical order. If it is a number convert it into an integer. Then print
the input and its type. (Note: this won't work if the user enters a real
number.)
'''
n = raw_input("Type something: ")
if (n > "0") and (n < ":"):
n = int(n) # convert input number to integer
print "The input is ", n
print "The type of this input is ", type(n)
|
[
"vincentlu@hotmail.ca"
] |
vincentlu@hotmail.ca
|
b9c170fac8be79c709b437a07518b7f7c61d83c9
|
cdd6c79dc5d42af57dc305c45f6d773383a9b433
|
/weather/migrations/0001_initial.py
|
219e63f83774f072cdd214131d944e84b0a897f8
|
[] |
no_license
|
ins099/weatherapp
|
021c14a71daf9e4499be17eabf7a385158744a65
|
ca11e8ab7ade6017ce2b91657d40eeda3141bc62
|
refs/heads/master
| 2022-12-21T11:22:36.720534
| 2020-09-22T18:24:47
| 2020-09-22T18:24:47
| 297,736,100
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 483
|
py
|
# Generated by Django 3.1.1 on 2020-09-22 15:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=25)),
],
),
]
|
[
"alaminsaram92@gmail.com"
] |
alaminsaram92@gmail.com
|
ce2c657ba2500a9211c7b986a84657c4bcb1bc17
|
f70fd4b12df2d4456a0353c8b43d48f52441d204
|
/apps/users/adminx.py
|
d19cc196af3ec8d246fdd1ddc52d1eef2e347daf
|
[] |
no_license
|
gauravjha111/Online-Learning-Platform-based-on-Django-Admin
|
698cac2cc8f0852f046060e800985a3125abf160
|
b22dccf9536ecd21283e1f6b6d0ee6360b6c09fc
|
refs/heads/master
| 2021-08-07T04:59:42.938019
| 2017-11-07T15:36:08
| 2017-11-07T15:36:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,251
|
py
|
# _*_ coding: utf-8 _*_
__author__ = 'Yujia Lian'
__date__ = '6/6/17 1:52 AM'
import xadmin
from .models import EmailVerifyRecord, Banner, UserProfile
from xadmin import views
from xadmin.layout import Fieldset, Main, Side, Row
from xadmin.plugins.auth import UserAdmin
class BaseSetting(object):
enable_themes = True
use_bootswatch = True
class UserProfileAdmin(UserAdmin):
pass
class GlobalSettings(object):
site_title = "Geek education backend manage system"
site_footer = "Geek education all rights reserved. Contact:yujia.lian001@gmail.com"
menu_style = "accordion"
class EmailVerifyRecordAdmin(object):
list_display = ['code', 'email', 'send_type', 'send_time']
search_fields = ['code', 'email', 'send_type']
list_filter = ['code', 'email', 'send_type', 'send_time']
class BannerAdmin(object):
list_display = ['title', 'image', 'url', 'index', 'add_time']
search_fields = ['title', 'image', 'url', 'index']
list_filter = ['title', 'image', 'url', 'index', 'add_time']
xadmin.site.register(EmailVerifyRecord, EmailVerifyRecordAdmin)
xadmin.site.register(Banner, BannerAdmin)
xadmin.site.register(views.BaseAdminView, BaseSetting)
xadmin.site.register(views.CommAdminView, GlobalSettings)
|
[
"yujia.lian001@gmail.com"
] |
yujia.lian001@gmail.com
|
1c85c1d3be0dee8a1f0ef5c6fa751e8de61604ad
|
17a6a8e37dfb3ab1121009c8cf77c0d4d2454d4b
|
/news_crawler/modules/clean_text.py
|
716d5e4f62d53af6b5aa08c027386362f58050fe
|
[] |
no_license
|
andymithamclarke/fashionjoblosses
|
2c0031e3ba865b4e13196c43ca55f0865b908924
|
dbad52fd6820a777ff4485a833f011969c53a4bf
|
refs/heads/master
| 2023-04-14T14:11:36.889824
| 2020-10-29T15:19:34
| 2020-10-29T15:19:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 766
|
py
|
# =========================
# Clean the scraped text of irregularities
# =========================
# Irregularities found:
# - '\n' can appear within the body of the text
# - <p> elements can be too short and thus useless
# Solutions implemented:
# - Returning the <p> element only if it's str(length) is greater than 10 chars
# __Note__:
# Expecting that there will be more solutions required as the volume of the corpus increases
# ================
# IMPORTS
# ================
import re
# Function to remove tags
def remove_tags(text):
TAG_RE = re.compile(r'<[^>]+>')
return TAG_RE.sub('', text)
# Core function that is called by article_scraper
def clean_text(full_text_list):
return [remove_tags(item) for item in full_text_list]
|
[
"andrewjhclarke@gmail.com"
] |
andrewjhclarke@gmail.com
|
300c776ef0c3046017fa26b509976d23f5146208
|
226e283c7a367e61a55cd9cc6d2549409522756d
|
/Manuel_upper/Board.py
|
0aae053e21ce884612b2cbfe7350660e753a4434
|
[] |
no_license
|
diontran/GameSearchROPASCI360
|
fe2454d1b55736494739b307b3410a8f3b87cacd
|
c6cf333dd72c3e746eff7b376d577eab53f29a9a
|
refs/heads/main
| 2023-05-13T16:59:25.785871
| 2021-06-02T17:33:45
| 2021-06-02T17:33:45
| 373,251,997
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,672
|
py
|
import itertools
class Board:
"""A class representing the board to the player, providing information for the player to make a decision"""
def __init__(self, player_type):
self.player_type = player_type
self.game_state = {'R':[], 'P':[], 'S':[], 'r':[], 'p':[], 's':[]}
#A list of list of hex, where each list represents one row on the board
board_hex = [[(4, -4),(4, -3),(4, -2),(4, -1),(4, -0)],
[(3, -4),(3, -3),(3, -2),(3, -1),(3, 0),(3, 1)],
[(2, -4),(2, -3),(2, -2),(2, -1),(2, 0),(2, 1),(2, 2)],
[(1, -4),(1, -3),(1, -2),(1, -1),(1, 0),(1, 1),(1, 2),(1, 3)],
[(0, -4),(0, -3),(0, -2),(0, -1),(0, 0),(0, 1),(0, 2),(0, 3),(0, 4)],
[(-1, -3),(-1, -2),(-1, -1),(-1, 0),(-1, 1),(-1, 2),(-1, 3),(-1, 4)],
[(-2, -2),(-2, -1),(-2, 0),(-2, 1),(-2, 2),(-2, 3),(-2, 4)],
[(-3, -1),(-3, 0),(-3, 1),(-3, 2),(-3, 3),(-3, 4)],
[(-4, 0),(-4, 1),(-4, 2),(-4, 3),(-4, 4)]]
def get_row(self,row_num):
"""Function returns the list of hex corresponding to row number from top to bottom, row 0 being the first row"""
return self.board_hex[4-row_num]
def update_move(self,action,player_side):
"""Update the board's game state according to the action player passed into"""
#Add the token to the board
if action[0] == 'THROW':
self.game_state[action[1]].insert(0,action[2])
if action[0] == 'SLIDE' or action[0] == 'SWING':
original_pos = action[1]
new_pos = action[2]
#update list of key in game state accordingly
if self.player_type == 'upper' and player_side == 'player':
search_keys = ['R', 'P', 'S']
elif self.player_type == 'upper' and player_side == 'opponent':
search_keys = ['r', 'p', 's']
elif self.player_type == 'lower' and player_side == 'player':
search_keys = ['r', 'p', 's']
elif self.player_type == 'lower' and player_side == 'opponent':
search_keys = ['R', 'P', 'S']
for key in search_keys:
if original_pos in self.game_state.get(key):
updated_list = self.game_state.get(key)
updated_list.remove(original_pos)
updated_list.insert(0, new_pos)
self.game_state[key] = updated_list
#Battle if there are other tokens in the spot
tokens_in_pos = []
for key in self.game_state.keys():
for pos in self.game_state[key]:
if pos == action[2]:
tokens_in_pos.insert(0, key)
kill_types = self.kill_tokens(tokens_in_pos)
#Remove tokens in kill_types
all_kill_types = []
if kill_types != []:
for type in kill_types:
all_kill_types.insert(0, type)
all_kill_types.insert(0, type.lower())
if all_kill_types != []:
for token in all_kill_types:
self.game_state[token].remove(action[2])
def get_player_token(self):
"""Function returns a dictionary of player's tokens"""
if self.player_type == 'upper':
included_keys = ['R', 'P', 'S']
else:
included_keys = ['r', 'p', 's']
return {k:v for k,v in self.game_state.items() if k in included_keys}
def generate_possible_move(self,pos):
"""Function returns a list of possible new positions that the current token in pos can move to"""
possible_positions = []
#Positions for swinging
directions = [(0,-1),(1,-1),(1,0),(0,1),(-1,1),(-1,0)]
for tuple in directions:
new_pos = ((pos[0] + tuple[0]) , (pos[1] + tuple[1]))
if new_pos in list(itertools.chain(*self.board_hex)):
possible_positions.insert(0,new_pos)
return possible_positions
def token_win(type1, type2):
"""Function that returns if token type 1 beats token type 2"""
t1 = type1.upper()
t2 = type2.upper()
if t1 == 'R':
if t2 == 'S':
return True
else:
return False
if t1 == 'P':
if t2 == 'R':
return True
else:
return False
if t1 == 'S':
if t2 == 'P':
return True
else:
return False
def kill_tokens(self,tokens_in_pos):
"""Function receives a list of types of tokens in the same position, and remove the one which loses in battle"""
capital = []
for type in tokens_in_pos:
if type.upper() not in capital:
capital.insert(0, type.upper)
#Case where all three types in same position
if 'R' in capital and 'P' in capital and 'S' in capital:
return capital #All tokens removed
#Cases where two token types battle
elif 'R' in capital and 'P' in capital:
return ['R']
elif 'P' in capital and 'S' in capital:
return ['P']
elif 'S' in capital and 'R' in capital:
return ['S']
else:
return []
def player_no_token(self):
if self.player_type == 'upper':
keys = ['R', 'P', 'S']
else:
keys = ['r', 'p', 's']
number_of_tokens = 0
for key in keys:
number_of_tokens += len(self.game_state[key])
if number_of_tokens == 0:
return True
else:
return False
|
[
"dion.doanh.tran@gmail.com"
] |
dion.doanh.tran@gmail.com
|
e5d651724d5f41ceac086f9d0da8f159b38821bf
|
a572103139a007d45f27ba9f86a174537f5725ae
|
/blog/urls.py
|
d69766315002496350281a4f7d3b6533ce9e53eb
|
[] |
no_license
|
VitoCorleonexin/Blog
|
f406d31a9f4fdbadd2a8819615996e3f3d6d7b6e
|
5336711abe63a1a4982e9e33f237e9d59588eae9
|
refs/heads/master
| 2022-04-20T07:47:55.319747
| 2020-04-22T16:05:36
| 2020-04-22T16:05:36
| 256,343,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 611
|
py
|
from django.urls import path
from . import views
app_name = 'blog'
urlpatterns = [
path('',views.PostListView.as_view(), name='blog-home'),
path('about/',views.about, name='blog-about'),
path('post/<int:pk>/',views.PostDetailView.as_view(), name='post-detail'),
path('post/new/',views.PostCreateView.as_view(), name='post-create'),
path('post/<int:pk>/update/',views.PostUpdateView.as_view(), name='post-update'),
path('post/<int:pk>/delete/',views.PostDeleteView.as_view(), name='post-delete'),
path('user/<str:username>/',views.UserPostListView.as_view(), name='user-posts'),
]
|
[
"wangwenxin03@sina.com"
] |
wangwenxin03@sina.com
|
ad3a2801016e2887d1a8afbfb79b40f4b67fd555
|
9f1e45b3deb43eb971ce5872eb832fef56672d20
|
/__init__.py
|
c7a258bfb60d7be0a842f236558ce0add704368d
|
[
"BSD-3-Clause"
] |
permissive
|
fhLUG/limnoria-mittag.at
|
b8825301c3503a68cf8dbabc97f01efe2878b9d6
|
2dd20e58d56e762e3eae47c9745ea7f9838d86e7
|
refs/heads/master
| 2020-06-25T13:14:04.681377
| 2016-07-30T13:28:21
| 2016-09-22T15:54:26
| 67,422,238
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,577
|
py
|
###
# Copyright (c) 2016, fhLUG
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Mittag: Mittag.at IRC plugin
"""
import supybot
import supybot.world as world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you're keeping the plugin in CVS or some similar system.
__version__ = ".01"
# Replace this with an appropriate author or supybot.Author instance.
__author__ = supybot.Author('fhLUG')
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
# This is a url where the most recent plugin package can be downloaded.
__url__ = 'http://fhLUG.at'
from . import config
from . import plugin
from imp import reload
# In case we're being reloaded.
reload(config)
reload(plugin)
# Add more reloads here if you add third-party modules and want them to be
# reloaded when this plugin is reloaded. Don't forget to import them as well!
if world.testing:
from . import test
Class = plugin.Class
configure = config.configure
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
[
"knittl89+git@googlemail.com"
] |
knittl89+git@googlemail.com
|
61b0d22c7fff82918f2b874d0859a21b34fbdb70
|
426e7709cf8ac82fc928a489f52cad1a17019a5d
|
/OOP/math_dojo.py
|
49737d89234c919a70f6dc444b57ab30cb235722
|
[
"MIT"
] |
permissive
|
gfhuertac/coding_dojo_python
|
3eb3fa98175e7d55f5d2510355f932342649fc25
|
4d17bb63fb2b9669216a0f60326d4a4b9055af7e
|
refs/heads/master
| 2022-05-09T21:51:37.534322
| 2020-06-06T21:28:52
| 2020-06-06T21:28:52
| 229,930,788
| 0
| 0
|
MIT
| 2022-04-22T23:15:54
| 2019-12-24T11:37:27
|
Python
|
UTF-8
|
Python
| false
| false
| 788
|
py
|
import unittest
from functools import reduce
class MathDojo:
def __init__(self):
self.result = 0
def add(self, num, *nums):
self.result += num + reduce(lambda x,y: x+y, nums, 0)
return self
def subtract(self, num, *nums):
self.result -= num + reduce(lambda x,y: x+y, nums, 0)
return self
class MathDojoTestCase(unittest.TestCase):
def setUp(self):
self.md = MathDojo()
def testBase(self):
x = self.md.add(2).add(2,5,1).subtract(3,2).result
self.assertEqual(x, 5)
def testNegative(self):
x = self.md.add(-2).add(2).result
self.assertEqual(x, 0)
if __name__ == '__main__':
unittest.main()
# create an instance:
md = MathDojo()
# to test:
x = md.add(2).add(2,5,1).subtract(3,2).result
print(x)
|
[
"gonzalo@huerta.cl"
] |
gonzalo@huerta.cl
|
cc4a079b90a29d40b63d8616277c45c31f14704d
|
cc03e0d7ceb6ca5e37c239a5c354cad36c78ade6
|
/urban/urban/settings.py
|
a88876ae562b71fce74454b6ca279c39313c8e82
|
[] |
no_license
|
randypantinople/Analysis-of-Starbucks-Global-Presence
|
87235315e048baea7f0ead7f1ddfa48ecd915c38
|
5516ae5095bf07d2936c57da0704ae3af0a0bfc8
|
refs/heads/master
| 2022-11-26T09:31:03.309927
| 2020-08-01T16:23:05
| 2020-08-01T16:23:05
| 281,262,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,135
|
py
|
# Scrapy settings for urban project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'urban'
SPIDER_MODULES = ['urban.spiders']
NEWSPIDER_MODULE = 'urban.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'urban (Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 2
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'urban.middlewares.UrbanSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'urban.middlewares.UrbanDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'urban.pipelines.WriteItemPipeline': 200
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"randypantinople@yahoo.com"
] |
randypantinople@yahoo.com
|
5ce94942c2232ed793c891c895905868df103abb
|
580d6acdbb6dc3d6d27cb164097731fa601de292
|
/ECS655U_VENV_P3/bin/jupyter-run
|
134c4b6eed9c33a3c1cbb504758780012f85651d
|
[] |
no_license
|
johnsn27/security_engineering
|
182418530bdf3c1c3927dfcad686e7bbe9bbfc9f
|
996341864a5e2a8e7bd7ea24b05fdeca7e3f7938
|
refs/heads/main
| 2023-03-02T18:59:15.608520
| 2021-02-13T19:52:34
| 2021-02-13T19:52:34
| 338,605,903
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 301
|
#!/Users/johnsn27/Documents/Uni/security_engineering/ECS655U_VENV_P3/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_client.runapp import RunApp
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(RunApp.launch_instance())
|
[
"nathan.johnson@bbc.co.uk"
] |
nathan.johnson@bbc.co.uk
|
|
9086642ccad8678a2bfbd13c03b645bdc7cd6bcc
|
10a4151ad6143d75a000b2c71957e2b698f704fa
|
/2920.py
|
e014cbe94c61fd34a28b2ec8db16a45cf0dfb052
|
[] |
no_license
|
Yabby1997/Baekjoon-Online-Judge
|
2f811507be6d725bfcf33c1e1dd2ccbf052fba47
|
3aa74d89f8accc98fbcdba1bf705d5291a80178c
|
refs/heads/master
| 2021-12-30T02:53:56.897972
| 2021-12-26T07:18:46
| 2021-12-26T07:18:46
| 231,369,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 563
|
py
|
melodyList = list(map(int, input().split()))
melodyCheck = [0]*7
output = 0
for i in range(len(melodyList)-1): #LEN FUNCTION RETURNS LENGTH OF LIST
melodyCheck[i] = melodyList[i+1]-melodyList[i]
for i in range(len(melodyCheck)-1):
if melodyCheck[i] == melodyCheck[i+1]:
output = melodyCheck[i]
else:
output = 0
break #YOU CAN USE BREAK IN PYTHON
if output == 1:
print("ascending")
elif output == -1:
print("descending")
else:
print("mixed")
|
[
"yabby1997@gmail.com"
] |
yabby1997@gmail.com
|
87d3f98dcf7aebf0fbec3905291a13e56a4136f2
|
661aad54c4ea654b197d0fdff3e18e3aa0056661
|
/constants.py
|
80453ccde24a468b2fb3a17da51d161d9a5a79a8
|
[] |
no_license
|
mohit89mohit/instabot
|
0a884d523ca96c35f011eebdc0b082133b95dc58
|
9221185e14cc5b6ba76a2acc40916802d108fef1
|
refs/heads/master
| 2020-06-24T07:02:39.766014
| 2017-07-11T18:49:53
| 2017-07-11T18:49:53
| 96,926,119
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 195
|
py
|
# global variables file
# instagram API access token
APP_ACCESS_TOKEN = '2288274672.6d26f8c.5209938e17cd40619b178ddc37793f48'
# instagram API base url
BASE_URL = 'https://api.instagram.com/v1/'
|
[
"mohitchambial89mohit@gmail.com"
] |
mohitchambial89mohit@gmail.com
|
594a55f3e4d5cd1c2c14230e6bd967f1d7b7f5d8
|
61febabc6aa34b7c47208aa7be5dfca88287ddaf
|
/Ch. 7 User Input and while Loops/Deli.py
|
ef00a69b390ec09c873760b47c3a2dd13dd6aa42
|
[] |
no_license
|
chrisstophere/Python-Crash-Course
|
4be7262acc2ff8ad26d99aceb028c25e4c7f9b0b
|
702c44734e93df68ec55831626fb7a7a22ce2b8d
|
refs/heads/master
| 2021-05-24T10:31:54.679224
| 2020-05-07T21:10:09
| 2020-05-07T21:10:09
| 253,520,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
sandwich_orders = ['ham', 'roast beef', 'turkey']
finished_sandwiches = []
while sandwich_orders:
sandwich = sandwich_orders.pop()
print(f"I made your {sandwich.title()}.")
finished_sandwiches.append(sandwich)
print("\n")
for sandwich in finished_sandwiches:
print(f"I have made your {sandwich} sandwich.")
|
[
"chris@ewentech.com"
] |
chris@ewentech.com
|
95cdcd2931a7898d7fc1fcb55a5ae3669b6636f8
|
ddbac0c502110680e0d48974bb55c2032962f4a7
|
/password_test.py
|
2d1eb5418c4b6809938dd5e838869992fb3d527a
|
[
"MIT"
] |
permissive
|
NinahMo/password-locker
|
97a1b34488e56b8d8fab3a38965c6cac0f041ef4
|
4c8a9ad740e95da8cccfe82bbbaefdcce753728f
|
refs/heads/master
| 2022-11-11T12:40:43.552476
| 2020-06-29T12:20:45
| 2020-06-29T12:20:45
| 275,170,419
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,535
|
py
|
import unittest
from password import Password
class TestUser(unittest.TestCase):
def setUp(self):
self.new_password = Password("Ninahmozzy","0712345678","aBcDeF","aBcDeF")
def tearDown(self):
Password.password_list = []
def test_init(self):
self.assertEqual(self.new_password.username,"Ninahmozzy")
self.assertEqual(self.new_password.phone_number,"0712345678")
self.assertEqual(self.new_password.password,"aBcDeF")
self.assertEqual(self.new_password.pas2,"aBcDeF")
def test_save_password(self):
self.new_password.save_password()
self.assertEqual(len(Password.password_list),1)
def test_save_multiple_contact(self):
'''
'''
self.new_password.save_password()
test_password=Password("Ninahmozzy","0712345678","aBcDeF","aBcDeF")
test_password.save_password()
self.assertEqual(len(Password.password_list),2)
def delete_password(self):
Password.password_list.remove(self)
def test_find_password_by_username(self):
self.new_password.save_password()
test_password = Password("Ninahmozzy","0712345678","aBcDeF","aBcDeF")
test_password.save_password()
found_password = Password.find_by_username("Ninahmozzy")
self.assertEqual(found_password.password,test_password.password)
def test_display_all_usernames(self):
self.assertEqual(Password.display_passwords(),Password.password_list)
if __name__ == '__main__':
unittest.main()
|
[
"ninahb746@gmail.com"
] |
ninahb746@gmail.com
|
db32be82e4cbb8041e41a5706bfdb1e999fedfae
|
6b065c0e4e2d103f37b5d7ac062c8f778cc7b960
|
/set-01/length.py
|
47eaf20923af1513b3f035ea1bf38fe31157f74d
|
[] |
no_license
|
abhi1615/GUVI
|
3033dd953175976431a10f86384c489ee683c0d1
|
6563279d7cfd293962e863f42dded676e0a7cbf5
|
refs/heads/master
| 2020-03-31T09:48:18.645870
| 2019-01-19T14:20:44
| 2019-01-19T14:20:44
| 152,111,496
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 108
|
py
|
def len():
n=int(input())
c=0
while n>0:
c+=1
n//=10
print(c)
len()
|
[
"noreply@github.com"
] |
abhi1615.noreply@github.com
|
a57e9b2e13acb81a055216ed04bc58da919abdcd
|
e568e1fc751d16b739c16d6d396f18ca5c520be2
|
/microblog/app/__init__.py
|
1ef6a54dfa8255ec1ac774ab2bd04a5d89711899
|
[] |
no_license
|
KennF/Timeline
|
3a76dd38a7dc9e174265932d33202e7e981d5a65
|
c4faa4d7ab8d2d6cda81226678a7bd669e4efcaf
|
refs/heads/master
| 2021-01-10T16:49:09.748686
| 2013-09-04T10:34:31
| 2013-09-04T10:34:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,277
|
py
|
import os
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
from flask.ext.openid import OpenID
from config import basedir, ADMINS, MAIL_SERVER, MAIL_PORT, MAIL_USERNAME, MAIL_PASSWORD
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
lm = LoginManager()
lm.setup_app(app)
lm.login_view = 'login'
oid = OpenID(app, os.path.join(basedir, 'tmp'))
if not app.debug:
import logging
from logging.handlers import SMTPHandler
credentials = None
if MAIL_USERNAME or MAIL_PASSWORD:
credentials = (MAIL_USERNAME, MAIL_PASSWORD)
mail_handler = SMTPHandler((MAIL_SERVER, MAIL_PORT), 'no-reply@' + MAIL_SERVER, ADMINS, 'microblog failure', credentials)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
if not app.debug:
import logging
from logging.handlers import RotatingFileHandler
file_handler = RotatingFileHandler('tmp/microblog', 'a', 1*1024*1024, 10)
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
app.logger.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
from app import views, models
|
[
"cactus.fxy@gmail.com"
] |
cactus.fxy@gmail.com
|
bba7c9430acdb73d6eda2a0982f190538c19e509
|
6c671228ff9e08cbdbd58eba7938bc363cc7d55a
|
/infrastructure/serializers/__init__.py
|
7f944e47bf94bc82b63ee4b7ac9c87fddb13671b
|
[
"MIT"
] |
permissive
|
joaquinquintas/shipwell_backend_ricardo
|
a9ee69ec48ecdb09a4748d30d319d1a2ecfe948d
|
55de24c8c7e3a685a1dca786e5c026410d353473
|
refs/heads/master
| 2020-07-27T04:47:22.679327
| 2019-09-16T18:53:46
| 2019-09-16T18:53:46
| 208,873,814
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
'''
serializer package
'''
from .dict_serializer import DictSerializer
from .average_temperature_serializer import AverageTemperatureSerializer
from .temperature_serializer import TemperatureSerializer
from .validation_error_serializer import ValidationErrorSerializer
__all__ = [
'AverageTemperatureSerializer',
'TemperatureSerializer',
'ValidationErrorSerializer',
]
|
[
"ricardosiri68@gmail.com"
] |
ricardosiri68@gmail.com
|
006672a2ef6b5dce7819eac585bedfefdbb99216
|
7bf756218bacb058a10e81964a5b9722dd8a82c2
|
/rentspace/apps/userprofile/views/admin_restapi.py
|
30421afa3234920a6ba9eac75862911de2cb73db
|
[] |
no_license
|
saikumar-divvela/rentspace
|
a30f3450d545718893a93cfd4f333c7dce441c29
|
81065e27d4b1b3c6d1ffdadb7e1948592f21932e
|
refs/heads/master
| 2021-06-17T13:59:41.376526
| 2017-05-24T07:55:37
| 2017-05-24T07:55:37
| 57,314,062
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,444
|
py
|
from django.http import HttpResponse
from django.http import Http404
from rest_framework import status
from userprofile.models import User
from userprofile.serializers import UserSerializer
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
@permission_classes((permissions.AllowAny,))
class UserList(APIView):
def get(self,request,format=None):
users = User.objects.all();
serializer = UserSerializer(users,many=True)
print (request.session)
print (request.session.keys())
print (request.session.items())
#print (serializer.data)
return Response(serializer.data)
def post(self,request,format=None):
print (request.data)
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@permission_classes((permissions.AllowAny,))
class UserDetail(APIView):
def get_object(self,pk):
try:
return User.objects.get(pk=pk)
except User.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
user = self.get_object(pk)
serializer = UserSerializer(user)
return Response(serializer.data)
def put(self, request, pk, format=None):
user = self.get_object(pk)
serializer = UserSerializer(user,data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self,request,pk,format=None):
user = self.get_object(pk)
user.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
def get_status(self,request,pk,format=None):
user = self.get_object(pk)
output = {}
output["is_email_verified"] = user.is_email_verified
output["is_phone_verified"] = user.is_phone_verified
output["is_id_verified"] = user.is_id_verified
return Response(output)
|
[
"saikumar.divvela@gmail.com"
] |
saikumar.divvela@gmail.com
|
a790c33ec9c44720c105c842259b2b07a70bb80d
|
95d73f1daebb98fe6707b999c9763f3b84d418a4
|
/cms/plugin_pool.py
|
2ba37a444175b8cab1731ac1a6ac72031895f547
|
[] |
no_license
|
leotop/django_ukrhim
|
8e01e284076878c7691986d5e8d056795d2bb900
|
e5a60a79f441ae732350e518f9b71e2724dc010a
|
refs/heads/master
| 2021-01-22T15:51:27.617651
| 2015-01-23T11:00:37
| 2015-01-23T11:00:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,063
|
py
|
# -*- coding: utf-8 -*-
from cms.exceptions import PluginAlreadyRegistered, PluginNotRegistered
from cms.plugin_base import CMSPluginBase
from cms.utils.django_load import load
from cms.utils.helpers import reversion_register
from cms.utils.placeholder import get_placeholder_conf
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
import warnings
class PluginPool(object):
def __init__(self):
self.plugins = {}
self.discovered = False
def discover_plugins(self):
if self.discovered:
return
self.discovered = True
load('cms_plugins')
def register_plugin(self, plugin):
"""
Registers the given plugin(s).
If a plugin is already registered, this will raise PluginAlreadyRegistered.
"""
if not issubclass(plugin, CMSPluginBase):
raise ImproperlyConfigured(
"CMS Plugins must be subclasses of CMSPluginBase, %r is not."
% plugin
)
plugin_name = plugin.__name__
if plugin_name in self.plugins:
raise PluginAlreadyRegistered(
"Cannot register %r, a plugin with this name (%r) is already "
"registered." % (plugin, plugin_name)
)
plugin.value = plugin_name
self.plugins[plugin_name] = plugin
if 'reversion' in settings.INSTALLED_APPS:
try:
from reversion.registration import RegistrationError
except ImportError:
from reversion.revisions import RegistrationError
try:
reversion_register(plugin.model)
except RegistrationError:
pass
def unregister_plugin(self, plugin):
"""
Unregisters the given plugin(s).
If a plugin isn't already registered, this will raise PluginNotRegistered.
"""
plugin_name = plugin.__name__
if plugin_name not in self.plugins:
raise PluginNotRegistered(
'The plugin %r is not registered' % plugin
)
del self.plugins[plugin_name]
def get_all_plugins(self, placeholder=None, page=None, setting_key="plugins", include_page_only=True):
self.discover_plugins()
plugins = self.plugins.values()[:]
plugins.sort(key=lambda obj: unicode(obj.name))
final_plugins = []
if page:
template = page.get_template()
else:
template = None
allowed_plugins = get_placeholder_conf(
setting_key,
placeholder,
template,
)
for plugin in plugins:
include_plugin = False
if placeholder:
if allowed_plugins:
if plugin.__name__ in allowed_plugins:
include_plugin = True
elif setting_key == "plugins":
include_plugin = True
if plugin.page_only and not include_page_only:
include_plugin = False
if include_plugin:
final_plugins.append(plugin)
if final_plugins:
plugins = final_plugins
# plugins sorted by modules
plugins = sorted(plugins, key=lambda obj: unicode(obj.module))
return plugins
def get_text_enabled_plugins(self, placeholder, page):
plugins = self.get_all_plugins(placeholder, page)
plugins +=self.get_all_plugins(placeholder, page, 'text_only_plugins')
final = []
for plugin in plugins:
if plugin.text_enabled:
if plugin not in final:
final.append(plugin)
return final
def get_plugin(self, name):
"""
Retrieve a plugin from the cache.
"""
self.discover_plugins()
return self.plugins[name]
plugin_pool = PluginPool()
|
[
"root@ip-172-31-19-251.us-west-2.compute.internal"
] |
root@ip-172-31-19-251.us-west-2.compute.internal
|
0dfc17422b802593624f2382527227749645fa83
|
5a411f275eaf9d177bdae83c2c06990dcdff3e1e
|
/mm.py
|
e5a2c8b518855246b960d10cc8fc8f2e33e20b71
|
[] |
no_license
|
jimmy1231/mental-math
|
9f91e6b08931dcdb7752535695939ad907d551dc
|
9c92af42890f466cfdaf97390e7e832d99f22822
|
refs/heads/master
| 2021-03-02T10:47:13.502317
| 2020-03-08T18:02:15
| 2020-03-08T18:02:15
| 245,862,312
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,027
|
py
|
import pyttsx3
import random
import math
if __name__ == "__main__":
ops = ['+', '-', '*', '/'];
engine = pyttsx3.init()
engine.setProperty('volume', 1.0)
engine.setProperty('rate', 130)
eval = random.randint(0, 20)
print('Start with {}'.format(eval))
engine.say('Start with {}'.format(eval))
engine.runAndWait()
for i in range(0, 10):
op = ops[random.randint(0,len(ops)-1)]
str = ''
if op == '+':
num = random.randint(1, 20)
eval = eval + num
str += 'plus {}'.format(num)
elif op == '-':
num = random.randint(1, 20)
eval = eval - num
str += 'minus {}'.format(num)
elif op == '*':
num = random.randint(1, 3)
eval = eval * num
str += 'times {}'.format(num)
elif op == '/':
num = random.randint(1, 3)
if math.fmod(eval/num, 1) != 0:
continue
eval = eval / num
str += 'divide by {}'.format(num)
print(str)
engine.say(str)
engine.runAndWait()
print('Did you get {}?'.format(int(eval)))
engine.say('Did you get {}?'.format(int(eval)))
engine.runAndWait()
|
[
"dfjimmy.li@gmail.com"
] |
dfjimmy.li@gmail.com
|
2a8416de77ac7dd2c7768c9e83a912883f727d7f
|
05b1db9381b60f58f7372b0cb7977dc7bf700358
|
/sec6_lec_66_filehandling_loops_func_cond.py
|
318048abad7d83243b59c70ceaf140a3d24bf6ee
|
[] |
no_license
|
tgolf4fun/the_python_mega_course
|
0b1d8370dacadfc92963dd2bf16b6b54ec4fc745
|
23c8519fcbd9a973bd69ac45acfb98ea56ab4388
|
refs/heads/master
| 2021-08-17T10:17:51.995596
| 2017-11-21T04:08:13
| 2017-11-21T04:08:13
| 108,569,577
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
py
|
temperatures = [10, -20, -289, 100]
def writer(temperatures):
with open("files/sec6lec66.txt", "w") as file:
for t in temperatures:
if t > -273.15:
t = t*9/5 + 32
file.write(str(t) + "\n")
writer(temperatures)
|
[
"tgolf4fun@gmail.com"
] |
tgolf4fun@gmail.com
|
5acee9d5a274bab67816f86b40e55547c4a86bd9
|
479bb9dbed79a8e3756a52d05ab74d3001a13110
|
/ex6.py
|
468ad17f638defa555b5019b9203ab9ea19d5f02
|
[] |
no_license
|
DavidLohrentz/LearnPy3HardWay
|
49c1c8f130df916ca369449851db7a0bd5dcae5a
|
39ef557b9ba6f9b82defd474659eecf27441b0e1
|
refs/heads/master
| 2021-09-05T06:28:49.373804
| 2018-01-24T20:17:56
| 2018-01-24T20:17:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,141
|
py
|
print()
print()
# define types_of_people
types_of_people = "1,000,000"
# define x with formatted string containing variable above
x = f"There are {types_of_people} types of people."
print("<<<<<<< x after assigning value", x)
# define binary
binary = "binary"
# define do_not
do_not = "refuse to self-flaggelate"
print("<<<<<< after assigning do_not", do_not)
#define y formatted string with two variables
y = f"Those who know {binary} and those who {do_not}."
#print variable x
print(x)
# print variable y
print(y)
# print a string containing variable x
print(f"I said: {x}")
# print a string containing variable y
print(f"I also said '{y}'")
# Define variable hilarious as boolean with value of False
hilarious = False
# define T, N & G
T = True
N = "No"
G = "Eat shit and die, Trump"
# define joke_evaluation as a string with an undefined variable
joke_evaluation = "Isn't that joke so funny?! {}"
# print joke_evaluation and call hilarious to fill the empty variable
print(joke_evaluation.format(G))
#define w
w = "This is the left side of..."
# define e
e = "a banana."
print(w + e)
|
[
"noreply@github.com"
] |
DavidLohrentz.noreply@github.com
|
d6fd438475d5a3584caedb9bb0912b0d23438de0
|
7935b04458783507cc83bbed73140aae4c826f58
|
/docxtemplater/xml_helper.py
|
3270f56d85cd0df4ef8ba8d8c00ce9f4036f74ae
|
[] |
no_license
|
nikhildamle/docx-templater
|
c164f64e6ce04b58202897eba119866584b89acf
|
bd7b2f654fd3976551f7bfe37875afc862893db9
|
refs/heads/master
| 2021-01-10T20:06:27.986756
| 2015-09-13T07:50:23
| 2015-09-13T07:50:23
| 42,385,402
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,294
|
py
|
from xml.dom.minidom import Node, Element
def get_text(node: Element):
text = None
for child in node.childNodes:
if child.nodeType == Node.TEXT_NODE:
if text is None: text = ''
text += child.nodeValue
return text
def set_text(node: Element, text):
dom = node.ownerDocument
for child in node.childNodes:
if child.nodeType == Node.TEXT_NODE:
node.removeChild(child)
text_node = dom.createTextNode(text)
text_node.nodeValue = text
node.appendChild(text_node)
def remove_whitespace_only_nodes(node: Node):
"""Removes all of the whitespace-only text descendants of a DOM node.
:param node: Node to cleanup
If the specified node is a whitespace-only text node then it is left
unmodified.
"""
remove_list = []
for child in node.childNodes:
# Below if returns true for TEXT_NODE which contains no characters
# other than spaces and old lines
if child.nodeType == Node.TEXT_NODE and not child.data.strip():
remove_list.append(child)
elif child.hasChildNodes():
remove_whitespace_only_nodes(child)
for node in remove_list:
node.parentNode.removeChild(node)
node.unlink() # Garbage collect unneeded Nodes
|
[
"nikhilbcd@gmail.com"
] |
nikhilbcd@gmail.com
|
90c39d6e77cf38bb10df1f74b31b59ebf3379d89
|
97bc215350175b6322afe94e22a468d7b522352c
|
/GrackleHeatingCloud_0.01/Plots/scripts/temperature_timeseries.py
|
fb0a1a3916c47c25c31fddcb2102c60ffc12be23
|
[] |
no_license
|
rickyfernandez/enzo_runs
|
5e57baea83682237dfb13e081c6eb277cc22f397
|
62f0f914513426610aadf7b372e1a7615212ec87
|
refs/heads/master
| 2021-01-19T01:06:04.316227
| 2016-09-01T18:49:41
| 2016-09-01T18:49:41
| 41,223,407
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 969
|
py
|
import yt
import os
import numpy as np
import matplotlib.pyplot as plt
pc_to_cm = 3.0857E18 # 1pc in cm
plot_range = range(0,12,2)
plot_range.append(11)
num_plots = len(plot_range)
colormap = plt.cm.gist_ncar
plt.gca().set_color_cycle([colormap(i) for i in np.linspace(0,0.9, num_plots)])
file_names = []
for i in plot_range:
file_names.append("../../DD" + `i`.zfill(4) + "/" + "cloud_collision_" + `i`.zfill(4))
labels = []
for file in file_names:
ds = yt.load(file)
sphere = ds.sphere("max", (75, "pc"))
plot = yt.ProfilePlot(sphere, "radius", ["temperature"],
weight_field="cell_mass")
profile = plot.profiles[0]
plt.loglog(profile.x/pc_to_cm, profile["temperature"])
labels.append(r"%0.2f Myr" % ds.current_time.value)
plt.xlabel(r"Radius $(\mathrm{pc})$")
plt.ylabel(r"Temperature $(\mathrm{K})$")
plt.xlim(1,75)
plt.legend(labels, loc="upper left", frameon=False, prop={'size':10})
plt.savefig("temperature_series")
|
[
"rafernandezjr@gmail.com"
] |
rafernandezjr@gmail.com
|
a4c2fdcc6b5ae2ee1d96826b298814f675fbe913
|
796e17c8b983281f8ff24abc37a7ecb173ad6acd
|
/tests/examples/agnostic-examples/h_cost_functions/SConstruct
|
239c437fed168901a8c39630ed8b70e72c6009c2
|
[
"MIT"
] |
permissive
|
anubhav-cs/LAPKT-public
|
d556783d1fc7f9b9a7f9d0b88a4a95ffdebcd2e6
|
87407cf6995baa50b54f63eb0c3de4e724dd23cb
|
refs/heads/master
| 2023-07-19T09:39:22.125096
| 2022-10-12T10:48:20
| 2022-10-12T10:48:20
| 456,788,199
| 1
| 1
| null | 2022-02-08T04:56:00
| 2022-02-08T04:55:59
| null |
UTF-8
|
Python
| false
| false
| 1,229
|
import os
debug = ARGUMENTS.get('debug', 0)
common_env = Environment()
include_paths = ['../../../include', '../../../interfaces/agnostic', '../..' ]
lib_paths = [ ]
libs = [ ]
common_env.Append( CPPPATH = [ os.path.abspath(p) for p in include_paths ] )
if int(debug) == 1 :
common_env.Append( CCFLAGS = ['-g','-Wall', '-std=c++0x', '-DDEBUG' ] )
elif int(debug) == 2 :
common_env.Append( CCFLAGS = ['-g','-Wall', '-std=c++0x', '-DNDEBUG' ] )
else:
common_env.Append( CCFLAGS = ['-O3','-Wall', '-std=c++0x', '-DNDEBUG'] )
cxx_sources = Glob('*.cxx')
c_sources = Glob('*.c')
src_objs = [ common_env.Object(s) for s in cxx_sources ] + [ common_env.Object(s) for s in c_sources ]
Export('common_env')
src_objs += SConscript( '../../common/SConscript', 'common_env' )
generic_objs = SConscript('../../../src/SConscript.aptk')
agnostic_objs = SConscript('../../../interfaces/agnostic/SConscript.agnostic')
ff_wrapper_objs = SConscript('../../../interfaces/ff-wrapped/SConscript.ff')
common_env.Append( LIBS=libs)
common_env.Append( LIBPATH=[ os.path.abspath(p) for p in lib_paths ] )
common_env.Program( 'h-cost-funcs', src_objs + generic_objs + agnostic_objs + ff_wrapper_objs )
|
[
"anubhav.singh.eng@gmail.com"
] |
anubhav.singh.eng@gmail.com
|
|
28b4bb1bb6be3fc429a488b7d4d0e9130a698355
|
8dd519098e0066a985d654576c5350280e5a9042
|
/master/Chapter08/07_dqn_distrib_plots.py
|
d215acd77ca1b93f054074481f32631c79cfbd6b
|
[] |
no_license
|
mecha2k/rl_handson
|
491f0dfb161d0d968eb2941ade9de1c33addb282
|
b6f025e1cca07cfddc6eb5c4b78dc2c7a3b6071a
|
refs/heads/master
| 2023-03-22T05:49:19.932512
| 2021-03-15T22:49:33
| 2021-03-15T22:49:33
| 330,268,772
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,374
|
py
|
#!/usr/bin/env python3
import gym
import ptan
import ptan.ignite as ptan_ignite
from datetime import datetime, timedelta
import argparse
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from ignite.engine import Engine
from ignite.metrics import RunningAverage
from ignite.contrib.handlers import tensorboard_logger as tb_logger
import libc.dqn_extra
from libc import dqn_model, common
NAME = "07_distrib"
SAVE_STATES_IMG = True
SAVE_TRANSITIONS_IMG = True
if SAVE_STATES_IMG or SAVE_TRANSITIONS_IMG:
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pylab as plt
Vmax = 10
Vmin = -10
N_ATOMS = 51
DELTA_Z = (Vmax - Vmin) / (N_ATOMS - 1)
STATES_TO_EVALUATE = 1000
EVAL_EVERY_FRAME = 100
class DistributionalDQN(nn.Module):
def __init__(self, input_shape, n_actions):
super(DistributionalDQN, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(input_shape[0], 32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU(),
)
conv_out_size = self._get_conv_out(input_shape)
self.fc = nn.Sequential(
nn.Linear(conv_out_size, 512), nn.ReLU(), nn.Linear(512, n_actions * N_ATOMS)
)
self.register_buffer("supports", torch.arange(Vmin, Vmax + DELTA_Z, DELTA_Z))
self.softmax = nn.Softmax(dim=1)
def _get_conv_out(self, shape):
o = self.conv(torch.zeros(1, *shape))
return int(np.prod(o.size()))
def forward(self, x):
batch_size = x.size()[0]
fx = x.float() / 256
conv_out = self.conv(fx).view(batch_size, -1)
fc_out = self.fc(conv_out)
return fc_out.view(batch_size, -1, N_ATOMS)
def both(self, x):
cat_out = self(x)
probs = self.apply_softmax(cat_out)
weights = probs * self.supports
res = weights.sum(dim=2)
return cat_out, res
def qvals(self, x):
return self.both(x)[1]
def apply_softmax(self, t):
return self.softmax(t.view(-1, N_ATOMS)).view(t.size())
def calc_values_of_states(states, net, device="cpu"):
mean_vals = []
for batch in np.array_split(states, 64):
states_v = torch.tensor(batch).to(device)
action_values_v = net.qvals(states_v)
best_action_values_v = action_values_v.max(1)[0]
mean_vals.append(best_action_values_v.mean().item())
return np.mean(mean_vals)
def save_state_images(frame_idx, states, net, device="cpu", max_states=200):
ofs = 0
p = np.arange(Vmin, Vmax + DELTA_Z, DELTA_Z)
for batch in np.array_split(states, 64):
states_v = torch.tensor(batch).to(device)
action_prob = net.apply_softmax(net(states_v)).data.cpu().numpy()
batch_size, num_actions, _ = action_prob.shape
for batch_idx in range(batch_size):
plt.clf()
for action_idx in range(num_actions):
plt.subplot(num_actions, 1, action_idx + 1)
plt.bar(p, action_prob[batch_idx, action_idx], width=0.5)
plt.savefig("states/%05d_%08d.png" % (ofs + batch_idx, frame_idx))
ofs += batch_size
if ofs >= max_states:
break
def save_transition_images(
batch_size, predicted, projected, next_distr, dones, rewards, save_prefix
):
for batch_idx in range(batch_size):
is_done = dones[batch_idx]
reward = rewards[batch_idx]
plt.clf()
p = np.arange(Vmin, Vmax + DELTA_Z, DELTA_Z)
plt.subplot(3, 1, 1)
plt.bar(p, predicted[batch_idx], width=0.5)
plt.title("Predicted")
plt.subplot(3, 1, 2)
plt.bar(p, projected[batch_idx], width=0.5)
plt.title("Projected")
plt.subplot(3, 1, 3)
plt.bar(p, next_distr[batch_idx], width=0.5)
plt.title("Next state")
suffix = ""
if reward != 0.0:
suffix = suffix + "_%.0f" % reward
if is_done:
suffix = suffix + "_done"
plt.savefig("%s_%02d%s.png" % (save_prefix, batch_idx, suffix))
def calc_loss(batch, net, tgt_net, gamma, device="cpu", save_prefix=None):
states, actions, rewards, dones, next_states = common.unpack_batch(batch)
batch_size = len(batch)
states_v = torch.tensor(states).to(device)
actions_v = torch.tensor(actions).to(device)
next_states_v = torch.tensor(next_states).to(device)
# next state distribution
next_distr_v, next_qvals_v = tgt_net.both(next_states_v)
next_actions = next_qvals_v.max(1)[1].data.cpu().numpy()
next_distr = tgt_net.apply_softmax(next_distr_v).data.cpu().numpy()
next_best_distr = next_distr[range(batch_size), next_actions]
dones = dones.astype(np.bool)
# project our distribution using Bellman update
proj_distr = libc.dqn_extra.distr_projection(
next_best_distr, rewards, dones, Vmin, Vmax, N_ATOMS, gamma
)
# calculate net output
distr_v = net(states_v)
state_action_values = distr_v[range(batch_size), actions_v.data]
state_log_sm_v = F.log_softmax(state_action_values, dim=1)
proj_distr_v = torch.tensor(proj_distr).to(device)
if save_prefix is not None:
pred = F.softmax(state_action_values, dim=1).data.cpu().numpy()
save_transition_images(
batch_size, pred, proj_distr, next_best_distr, dones, rewards, save_prefix
)
loss_v = -state_log_sm_v * proj_distr_v
return loss_v.sum(dim=1).mean()
if __name__ == "__main__":
random.seed(common.SEED)
torch.manual_seed(common.SEED)
params = common.HYPERPARAMS["pong"]
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", default=False, action="store_true", help="Enable cuda")
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
env = gym.make(params.env_name)
env = ptan.common.wrappers.wrap_dqn(env)
env.seed(common.SEED)
net = DistributionalDQN(env.observation_space.shape, env.action_space.n).to(device)
tgt_net = ptan.agent.TargetNet(net)
selector = ptan.actions.EpsilonGreedyActionSelector(epsilon=params.epsilon_start)
epsilon_tracker = common.EpsilonTracker(selector, params)
agent = ptan.agent.DQNAgent(lambda x: net.qvals(x), selector, device=device)
exp_source = ptan.experience.ExperienceSourceFirstLast(env, agent, gamma=params.gamma)
buffer = ptan.experience.ExperienceReplayBuffer(exp_source, buffer_size=params.replay_size)
optimizer = optim.Adam(net.parameters(), lr=params.learning_rate)
def process_batch(engine, batch):
save_prefix = None
prev_save = getattr(engine.state, "prev_save", 0)
if SAVE_TRANSITIONS_IMG:
interesting = any(map(lambda s: s.last_state is None or s.reward != 0.0, batch))
if interesting and engine.state.iteration // 30000 > prev_save:
save_prefix = "images/img_%08d" % engine.state.iteration
engine.state.prev_save = engine.state.iteration // 30000
optimizer.zero_grad()
loss_v = calc_loss(
batch,
net,
tgt_net.target_model,
gamma=params.gamma,
device=device,
save_prefix=save_prefix,
)
loss_v.backward()
optimizer.step()
epsilon_tracker.frame(engine.state.iteration)
if engine.state.iteration % params.target_net_sync == 0:
tgt_net.sync()
eval_states = getattr(engine.state, "eval_states", None)
if eval_states is None:
eval_states = buffer.sample(STATES_TO_EVALUATE)
eval_states = [np.array(transition.state, copy=False) for transition in eval_states]
engine.state.eval_states = np.array(eval_states, copy=False)
if engine.state.iteration % EVAL_EVERY_FRAME == 0:
engine.state.metrics["values"] = calc_values_of_states(eval_states, net, device=device)
if SAVE_STATES_IMG and engine.state.iteration % 10000 == 0:
save_state_images(engine.state.iteration, eval_states, net, device=device)
return {
"loss": loss_v.item(),
"epsilon": selector.epsilon,
}
engine = Engine(process_batch)
ptan_ignite.EndOfEpisodeHandler(exp_source, bound_avg_reward=params.stop_reward).attach(engine)
ptan_ignite.EpisodeFPSHandler().attach(engine)
@engine.on(ptan_ignite.EpisodeEvents.EPISODE_COMPLETED)
def episode_completed(trainer: Engine):
print(
"Episode %d: reward=%s, steps=%s, speed=%.3f frames/s, elapsed=%s"
% (
trainer.state.episode,
trainer.state.episode_reward,
trainer.state.episode_steps,
trainer.state.metrics.get("avg_fps", 0),
timedelta(seconds=trainer.state.metrics.get("time_passed", 0)),
)
)
@engine.on(ptan_ignite.EpisodeEvents.BOUND_REWARD_REACHED)
def game_solved(trainer: Engine):
print(
"Game solved in %s, after %d episodes and %d iterations!"
% (
timedelta(seconds=trainer.state.metrics["time_passed"]),
trainer.state.episode,
trainer.state.iteration,
)
)
trainer.should_terminate = True
logdir = f"runs/{datetime.now().isoformat(timespec='minutes')}-{params.run_name}-{NAME}"
tb = tb_logger.TensorboardLogger(log_dir=logdir)
RunningAverage(output_transform=lambda v: v["loss"]).attach(engine, "avg_loss")
episode_handler = tb_logger.OutputHandler(
tag="episodes", metric_names=["reward", "steps", "avg_reward"]
)
tb.attach(
engine, log_handler=episode_handler, event_name=ptan_ignite.EpisodeEvents.EPISODE_COMPLETED
)
# write to tensorboard every 100 iterations
ptan_ignite.PeriodicEvents().attach(engine)
handler = tb_logger.OutputHandler(
tag="train", metric_names=["avg_loss", "avg_fps", "values"], output_transform=lambda a: a
)
tb.attach(engine, log_handler=handler, event_name=ptan_ignite.PeriodEvents.ITERS_100_COMPLETED)
engine.run(common.batch_generator(buffer, params.replay_initial, params.batch_size))
|
[
"mecha2k@naver.com"
] |
mecha2k@naver.com
|
72d60ae0420addea5f29714026881b3dbb90d936
|
51965a7eaa6891151274f0659402f0106f5045cb
|
/01-headers.py
|
5c4ae6e1d7d955b21fe08f96a435392c66627853
|
[] |
no_license
|
Susielove/MySpider
|
246b699533678004f9ce62ba6e1b30bfd994ed13
|
091f14fed755446634fd4603ef25d65a5edf4dc7
|
refs/heads/master
| 2020-04-10T05:03:15.224090
| 2019-01-05T14:18:33
| 2019-01-05T14:18:33
| 160,816,339
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
import requests
headers ={"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1"}
response = requests.get("http://www.baidu.com",headers=headers)
b = response.content.decode()
print(b)
|
[
"44665170+Susielove@users.noreply.github.com"
] |
44665170+Susielove@users.noreply.github.com
|
c2e7fe9e5e348b121084739d363788f95d4b4160
|
4f2cdd9a34fce873ff5995436edf403b38fb2ea5
|
/Data-Structures/List/Part2/P015.py
|
9bbc2f8f0beb4506bc15ef903c2418a9aa774178
|
[] |
no_license
|
sanjeevseera/Python-Practice
|
001068e9cd144c52f403a026e26e9942b56848b0
|
5ad502c0117582d5e3abd434a169d23c22ef8419
|
refs/heads/master
| 2021-12-11T17:24:21.136652
| 2021-08-17T10:25:01
| 2021-08-17T10:25:01
| 153,397,297
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
"""
Write a Python program to get the frequency of the elements in a list
"""
import collections
my_list = [10,10,10,10,20,20,20,20,40,40,50,50,30]
print("Original List : ",my_list)
ctr = collections.Counter(my_list)
print("Frequency of the elements in the List : ",ctr)
|
[
"seerasanjeev@gmail.com"
] |
seerasanjeev@gmail.com
|
ee5ad823b9c373e0a19790607fc0fe2e979a1d71
|
03a91e45aaa056d45d6b613c826aca4a4da12edf
|
/Lists.py
|
f7ad3467afb52c193d279bd8ccfbc02826a533b2
|
[] |
no_license
|
Anoosha-Shetty/Learn-Python
|
deb785a3557e364b505a6f6f584f7c0340556d16
|
9c733004c560225bb6d80aae60cf565e408161c0
|
refs/heads/master
| 2021-01-17T05:21:40.341096
| 2017-03-22T23:01:12
| 2017-03-22T23:01:12
| 61,144,553
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,232
|
py
|
fruits = ["Apple", "Banana", "Orange"]
print("fruits[0]: " , fruits[0])
print("fruits[1]: " , fruits[1])
print("fruits[2]: " , fruits[2])
print("Length of the list: " , len(fruits))
for i in range(len(fruits)):
pos = i + 1
pos = str(pos)
fruits[i] = pos + "." + " " + fruits[i] + "**"
print(fruits)
fruits.append("r74385742")
print(fruits)
fruits.insert(2, "Strawberry")
print(fruits)
fruits.extend(["4t2u9tghwign", "nruwg59tu", (4,5,7), "nruwg59tu"])
print(fruits)
del fruits[5]
print(fruits)
fruits.remove("nruwg59tu")
print(fruits)
del fruits[5]
print(fruits)
fruits.sort()
print("sorted list: " , fruits)
#note: if the list contains a tuple then we cannot use the sort command
squares = [0,1,4,9,16,25,36,49,64,81,100]
print(squares)
print("squares(0:3) - " , squares[0:3])
print("squares(:5) - " , squares[:5])
print("squares(:) - " , squares[:])
print("squares(-1) - " , squares[-1])
print("squares(2:7) - " , squares[2:7])
#below command will delete 2 elements from the end
del squares[-2:]
print(squares)
for i in range(len(squares)):
print("Element : " , squares[i])
for i,j in enumerate(squares):
print("Element", i, " --> ", j)
|
[
"noreply@github.com"
] |
Anoosha-Shetty.noreply@github.com
|
44d5e9f7235af8fbb5cbafc29da89178edc03e76
|
9c2957e9e5c456c36cc1f4e15cbc8e97c1035830
|
/Chapters 1-11/Chapter 5/checking_usernames.py
|
15a32fd66f73f804987fbe1fdd6575acd3975906
|
[] |
no_license
|
beardedsamwise/Python-Crash-Course
|
fa88e846026d234011f44e2488b94c67116560ef
|
6e64e9b1cd5675ef62048446188cfde5d0799f28
|
refs/heads/master
| 2020-04-30T19:11:47.791817
| 2019-06-23T23:45:02
| 2019-06-23T23:45:02
| 177,031,562
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
current_users = ['peter','alex','john','steve','dave']
new_users = ['Ralph','Alex','Michael','Stewart','Reginald']
#finish this off
current_users_lower = []
for users in current_users:
current_users_lower.append(users.lower())
for new_user in new_users:
if new_user.lower() in current_users_lower:
print("This username " + new_user + " is not available")
else:
print("The username " + new_user + " is available")
|
[
"samjamesbentley@gmail.com"
] |
samjamesbentley@gmail.com
|
46af2291173cadcb72ea56fa2c5ab0fd636c8a92
|
233eeb7ea61ff92c1a2230792a761626256805ba
|
/serializers/JsonSerializer.py
|
068997afff5052250bf4d967c20c9cdc640b2292
|
[] |
no_license
|
PowerOfDark/micro_tcp
|
6651984879352510a24d85d169901ec689c84801
|
f28ccb5378b03b2d77ff7443b0de7c2089cb20b0
|
refs/heads/master
| 2020-09-21T05:47:43.223863
| 2019-11-28T18:55:53
| 2019-11-28T18:55:53
| 224,699,462
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
import json
from serializers.Serializer import Serializer
class JsonSerializer(Serializer):
def __init__(self):
super().__init__()
def serialize(self, payload):
return json.dumps(payload)
def deserialize(self, payload):
return json.loads(payload)
|
[
"przem2003@o2.pl"
] |
przem2003@o2.pl
|
bcefaa1d1848f6a61eba54b36b4a2f7e18e44c95
|
e0bd2e00cbdfb758872a2603b0ccf68d4c882309
|
/vid_addons/l10n_in_dealers_discount/l10n_in_dealers_discount.py
|
828f1a1aaf7bd16b63ef73d61926d3f0fa379a2b
|
[] |
no_license
|
vineeth993/NiceBackUp
|
58fdc5cc11df50ec5baf20bce2806e1a5b617475
|
303e26a9b7e4e731ba13033cd15e754578dfab08
|
refs/heads/master
| 2021-07-16T23:12:22.909970
| 2021-06-19T09:20:28
| 2021-06-19T09:20:28
| 111,262,234
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,639
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2013 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
class sale_order_line(osv.osv):
_inherit = 'sale.order.line'
_columns = {
'price_dealer': fields.float('Dealer Price', readonly=True, select=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}),
'dealer_discount': fields.float('Dealer Discount', readonly=True, select=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}),
'dealer_discount_per': fields.float('Dealer Discount (%)', readonly=True, select=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]})
}
def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):
res = super(sale_order_line, self)._prepare_order_line_invoice_line(cr, uid, line=line, account_id=account_id, context=context)
res = dict(res, price_dealer=line.price_dealer * line.product_uom_qty, dealer_discount=line.dealer_discount * line.product_uom_qty, dealer_discount_per=line.dealer_discount_per/100)
return res
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False, context=None):
'''
The purpose of this function to get value of price unit, list price, packing amount on product change.
:return: return this value list price , price unit, packing amount.
:rtype: dictionary
'''
res = super(sale_order_line, self).product_id_change(cr, uid, ids, pricelist, product, qty=qty,
uom=uom, qty_uos=qty_uos, uos=uos, name=name, partner_id=partner_id,
lang=lang, update_tax=update_tax, date_order=date_order, packaging=packaging, fiscal_position=fiscal_position, flag=flag, context=context)
if context is None:
context = {}
dealer_id = context.get('dealer_id')
dealer_pricelist_id = context.get('dealer_pricelist_id')
if dealer_id and dealer_pricelist_id:
dealer_res = super(sale_order_line, self).product_id_change(cr, uid, ids, dealer_pricelist_id, product, qty=qty,
uom=uom, qty_uos=qty_uos, uos=uos, name=name, partner_id=dealer_id,
lang=lang, update_tax=False, date_order=date_order, packaging=False, fiscal_position=fiscal_position, flag=flag, context=context)
price_unit = res['value']['price_unit']
price_dealer = dealer_res['value']['price_unit']
dealer_discount = price_unit - price_dealer
res['value']['price_dealer'] = price_dealer
res['value']['dealer_discount'] = dealer_discount
res['value']['dealer_discount_per'] = (dealer_discount * 100) / price_unit
return res
class sale_order(osv.Model):
_inherit = 'sale.order'
_columns = {
'dealer_id': fields.many2one('res.partner', 'Dealer'),
'dealer_pricelist_id': fields.many2one('product.pricelist', 'Dealer Pricelist', domain=[('type','=','sale')])
}
def onchange_dealer_id(self, cr, uid, ids, part, context=None):
if not part:
return {'value': {'dealer_pricelist_id': False}}
val = {}
part = self.pool.get('res.partner').browse(cr, uid, part, context=context)
pricelist = part.property_product_pricelist and part.property_product_pricelist.id or False
if pricelist:
val['dealer_pricelist_id'] = pricelist
return {'value': val}
def _get_default_values(self, cr, uid, preline, context=None):
res = super(sale_order, self)._get_default_values(cr, uid, preline=preline, context=context)
res = dict(res,
price_dealer = -preline.price_dealer,
dealer_discount = -preline.dealer_discount,
dealer_discount_per = -preline.dealer_discount_per
)
return res
def _make_invoice(self, cr, uid, order, lines, context=None):
inv_obj = self.pool.get('account.invoice')
obj_invoice_line = self.pool.get('account.invoice.line')
if context is None:
context = {}
invoiced_sale_line_ids = self.pool.get('sale.order.line').search(cr, uid, [('order_id', '=', order.id), ('invoiced', '=', True)], context=context)
from_line_invoice_ids = []
for invoiced_sale_line_id in self.pool.get('sale.order.line').browse(cr, uid, invoiced_sale_line_ids, context=context):
for invoice_line_id in invoiced_sale_line_id.invoice_lines:
if invoice_line_id.invoice_id.id not in from_line_invoice_ids:
from_line_invoice_ids.append(invoice_line_id.invoice_id.id)
for preinv in order.invoice_ids:
if preinv.state not in ('cancel',) and preinv.id not in from_line_invoice_ids:
for preline in preinv.invoice_line:
res = self._get_default_values(cr, uid, preline, context=context)
inv_line_id = obj_invoice_line.copy(cr, uid, preline.id, res, context=context)
lines.append(inv_line_id)
inv = self._prepare_invoice(cr, uid, order, lines, context=context)
inv.update({
'dealer_id':order.dealer_id.id
})
inv_id = inv_obj.create(cr, uid, inv, context=context)
data = inv_obj.onchange_payment_term_date_invoice(cr, uid, [inv_id], inv['payment_term'], time.strftime(DEFAULT_SERVER_DATE_FORMAT))
if data.get('value', False):
inv_obj.write(cr, uid, [inv_id], data['value'], context=context)
inv_obj.button_compute(cr, uid, [inv_id])
return inv_id
def _prepare_order_line_move(self, cr, uid, order, line, picking_id, date_planned, context=None):
res = super(sale_order, self)._prepare_order_line_move(cr, uid, order=order, line=line, picking_id=picking_id, date_planned=date_planned, context=context)
res = dict(res, price_dealer = line.price_dealer, dealer_discount=line.dealer_discount, dealer_discount_per=line.dealer_discount_per)
return res
def _prepare_invoice(self, cr, uid, order, lines, context=None):
invoice_vals = super(sale_order, self)._prepare_invoice(cr, uid, order, lines, context=context)
invoice_vals.update({
'dealer_id': order.dealer_id.id,
'dealer_pricelist_id': order.dealer_pricelist_id
})
return invoice_vals
class sale_advance_payment_inv(osv.osv_memory):
_inherit = 'sale.advance.payment.inv'
def _prepare_advance_invoice_vals(self, cr, uid, ids, context=None):
if context is None:
context = {}
result = super(sale_advance_payment_inv, self)._prepare_advance_invoice_vals(cr, uid, ids, context)
sale_obj = self.pool.get('sale.order')
wizard = self.browse(cr, uid, ids[0], context)
sale_ids = context.get('active_ids', [])
update_val = {}
for sale in sale_obj.browse(cr, uid, sale_ids, context=context):
total_price_dealer = total_dealer_discount = 0.0
price_dealer = dealer_discount = 0.0
for line in sale.order_line:
total_price_dealer += line.price_dealer * line.product_uom_qty
total_dealer_discount += line.dealer_discount * line.product_uom_qty
res = {}
total_amount = 0.0
if wizard.advance_payment_method == 'percentage':
price_dealer = total_price_dealer * (wizard.amount / 100)
dealer_discount = total_dealer_discount * (wizard.amount / 100)
total_amount = (sale.amount_total * wizard.amount) / 100
else:
inv_amount = wizard.amount
percent = inv_amount / sale.amount_total
total_amount = inv_amount
price_dealer = total_price_dealer * percent
dealer_discount = total_dealer_discount * percent
res['price_dealer'] = price_dealer
res['dealer_discount'] = dealer_discount
res['dealer_discount_per'] = dealer_discount / total_amount
update_val[sale.id] = res
#TODO: Need to re-implement it in best way
for line in result:
line[1].get('invoice_line')[0][2].update(update_val.get(line[0]))
return result
sale_advance_payment_inv()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"vineeth@vidts.in"
] |
vineeth@vidts.in
|
532a4bf5539dd341150d7a4edec95945232e13fe
|
4b447c626792d9d38de26bcfcc1ad29de77c4b4f
|
/run_cross_domain_disen_sGRL_NoAE_cyc.py
|
b18d35507a0927e335e49acbf50f23bf5206af3f
|
[
"MIT"
] |
permissive
|
jordi-bird/cross-domain-disen
|
d8408dfb0cd6db0c7e5afadd669a72af48a734c6
|
f49422d2defde5e1c88ea5bb7d2989b87fd49ce5
|
refs/heads/master
| 2020-04-23T23:50:32.100687
| 2019-05-08T13:28:24
| 2019-05-08T13:28:24
| 171,547,670
| 0
| 0
| null | 2019-02-19T20:50:56
| 2019-02-19T20:50:56
| null |
UTF-8
|
Python
| false
| false
| 25,766
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import scipy.io as sio
import argparse
import os
import json
import glob
import random
import collections
import math
import time
from ops import *
from model_sGRL_NoAE_cyc import create_model
import os
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir", help="path to folder containing images")
# Added features mode to extract features for retrieval
parser.add_argument("--mode", required=True, choices=["train", "test", "features"])
parser.add_argument("--output_dir", required=True, help="where to put output files")
parser.add_argument("--seed", type=int)
parser.add_argument("--checkpoint", default=None,
help="directory with checkpoint to resume training from or use for testing")
parser.add_argument("--max_steps", type=int, help="number of training steps (0 to disable)")
parser.add_argument("--max_epochs", type=int, help="number of training epochs")
parser.add_argument("--summary_freq", type=int, default=30, help="update summaries every summary_freq steps")
parser.add_argument("--progress_freq", type=int, default=50, help="display progress every progress_freq steps")
parser.add_argument("--trace_freq", type=int, default=0, help="trace execution every trace_freq steps")
parser.add_argument("--display_freq", type=int, default=0,
help="write current training images every display_freq steps")
parser.add_argument("--save_freq", type=int, default=5000, help="save model every save_freq steps, 0 to disable")
parser.add_argument("--separable_conv", action="store_true", help="use separable convolutions in the generator")
parser.add_argument("--aspect_ratio", type=float, default=1.0, help="aspect ratio of output images (width/height)")
parser.add_argument("--lab_colorization", action="store_true",
help="split input image into brightness (A) and color (B)")
parser.add_argument("--batch_size", type=int, default=8, help="number of images in batch")
parser.add_argument("--which_direction", type=str, default="AtoB", choices=["AtoB", "BtoA"])
parser.add_argument("--ngf", type=int, default=64, help="number of generator filters in first conv layer")
parser.add_argument("--ndf", type=int, default=64, help="number of discriminator filters in first conv layer")
parser.add_argument("--scale_size", type=int, default=256, help="scale images to this size before cropping to 256x256")
parser.add_argument("--flip", dest="flip", action="store_true", help="flip images horizontally")
parser.add_argument("--no_flip", dest="flip", action="store_false", help="don't flip images horizontally")
parser.set_defaults(flip=False)
parser.add_argument("--lr", type=float, default=0.0002, help="initial learning rate for adam")
parser.add_argument("--beta1", type=float, default=0.5, help="momentum term of adam")
parser.add_argument("--l1_weight", type=float, default=100.0, help="weight on L1 term for generator gradient")
parser.add_argument("--gan_weight", type=float, default=1.0, help="weight on GAN term for generator gradient")
# Cross-domain-disen new arugments
parser.add_argument("--gan_exclusive_weight", type=float, default=0.1,
help="weight on GAN term for exclusive generator gradient")
parser.add_argument("--classifier_shared_weight", type=float, default=0.2,
help="weight on domain classifier term")
parser.add_argument("--cyc_weight", type=float, default=100,
help="weight on cycle consistency term")
parser.add_argument("--noise", type=float, default=0.1, help="Stddev for noise input into representation")
# export options
parser.add_argument("--output_filetype", default="png", choices=["png", "jpeg"])
a = parser.parse_args()
CROP_SIZE = 256
Examples = collections.namedtuple("Examples", "paths, inputsX, inputsY, count, steps_per_epoch")
def load_examples():
if a.input_dir is None or not os.path.exists(a.input_dir):
raise Exception("input_dir does not exist")
input_paths = glob.glob(os.path.join(a.input_dir, "*.jpg"))
decode = tf.image.decode_jpeg
if len(input_paths) == 0:
input_paths = glob.glob(os.path.join(a.input_dir, "*.png"))
decode = tf.image.decode_png
if len(input_paths) == 0:
raise Exception("input_dir contains no image files")
def get_name(path):
name, _ = os.path.splitext(os.path.basename(path))
return name
# if the image names are numbers, sort by the value rather than asciibetically
# having sorted inputs means that the outputs are sorted in test mode
if all(get_name(path).isdigit() for path in input_paths):
input_paths = sorted(input_paths, key=lambda path: int(get_name(path)))
else:
input_paths = sorted(input_paths)
with tf.name_scope("load_images"):
path_queue = tf.train.string_input_producer(input_paths, shuffle=a.mode == "train")
reader = tf.WholeFileReader()
paths, contents = reader.read(path_queue)
raw_input = decode(contents)
raw_input = tf.image.convert_image_dtype(raw_input, dtype=tf.float32)
assertion = tf.assert_equal(tf.shape(raw_input)[2], 3, message="image does not have 3 channels")
with tf.control_dependencies([assertion]):
raw_input = tf.identity(raw_input)
raw_input.set_shape([None, None, 3])
if a.lab_colorization:
# load color and brightness from image, no B image exists here
lab = rgb_to_lab(raw_input)
L_chan, a_chan, b_chan = preprocess_lab(lab)
a_images = tf.expand_dims(L_chan, axis=2)
b_images = tf.stack([a_chan, b_chan], axis=2)
else:
# break apart image pair and move to range [-1, 1]
width = tf.shape(raw_input)[1] # [height, width, channels]
a_images = preprocess(raw_input[:, :width // 2, :])
b_images = preprocess(raw_input[:, width // 2:, :])
# No longer in terms of input/target, but bidirectionally on domains X and Y
inputsX, inputsY = [a_images, b_images]
# synchronize seed for image operations so that we do the same operations to both
# input and output images
seed = random.randint(0, 2 ** 31 - 1)
def transform(image):
r = image
if a.flip:
r = tf.image.random_flip_left_right(r, seed=seed)
# area produces a nice downscaling, but does nearest neighbor for upscaling
# assume we're going to be doing downscaling here
r = tf.image.resize_images(r, [a.scale_size, a.scale_size], method=tf.image.ResizeMethod.AREA)
offset = tf.cast(tf.floor(tf.random_uniform([2], 0, a.scale_size - CROP_SIZE + 1, seed=seed)), dtype=tf.int32)
if a.scale_size > CROP_SIZE:
r = tf.image.crop_to_bounding_box(r, offset[0], offset[1], CROP_SIZE, CROP_SIZE)
elif a.scale_size < CROP_SIZE:
raise Exception("scale size cannot be less than crop size")
return r
with tf.name_scope("inputX_images"):
inputX_images = transform(inputsX)
with tf.name_scope("inputY_images"):
inputY_images = transform(inputsY)
paths_batch, inputsX_batch, inputsY_batch = tf.train.batch([paths, inputX_images, inputY_images],
batch_size=a.batch_size)
# shuffle Y images to obtain unpaired data
inputsY_batch = tf.random.shuffle(inputsY_batch)
steps_per_epoch = int(math.ceil(len(input_paths) / a.batch_size))
return Examples(
paths=paths_batch,
inputsX=inputsX_batch,
inputsY=inputsY_batch,
count=len(input_paths),
steps_per_epoch=steps_per_epoch,
)
def save_images(fetches, step=None):
image_dir = os.path.join(a.output_dir, "images")
if not os.path.exists(image_dir):
os.makedirs(image_dir)
filesets = []
for i, in_path in enumerate(fetches["paths"]):
name, _ = os.path.splitext(os.path.basename(in_path.decode("utf8")))
fileset = {"name": name, "step": step}
all_kinds = ["inputsX", "outputsX2Y", "outputsX2Yp",
"im_swapped_X", "sel_auto_X", "inputsY",
"outputsY2X", "outputsY2Xp", "im_swapped_Y", "sel_auto_Y",
"cycX_output", "cycY_output"]
for kind in all_kinds:
filename = name + "-" + kind + ".png"
if step is not None:
filename = "%08d-%s" % (step, filename)
fileset[kind] = filename
out_path = os.path.join(image_dir, filename)
contents = fetches[kind][i]
with open(out_path, "wb") as f:
f.write(contents)
filesets.append(fileset)
return filesets
def save_features(fetches, step=None):
image_dir = os.path.join(a.output_dir, "features")
if not os.path.exists(image_dir):
os.makedirs(image_dir)
filesets = []
for i, in_path in enumerate(fetches["paths"]):
name, _ = os.path.splitext(os.path.basename(in_path.decode("utf8")))
fileset = {"name": name, "step": step}
filename = name + ".mat"
out_path = os.path.join(image_dir, filename)
sio.savemat(out_path, {'inX': fetches["inputsX"][i],
'inY': fetches["inputsY"][i],
'sR_X2Y': fetches["sR_X2Y"][i],
'sR_Y2X': fetches["sR_Y2X"][i],
'eR_X2Y': fetches["eR_X2Y"][i],
'eR_Y2X': fetches["eR_Y2X"][i]})
return filesets
def append_index(filesets, step=False):
index_path = os.path.join(a.output_dir, "index.html")
if os.path.exists(index_path):
index = open(index_path, "a")
else:
index = open(index_path, "w")
index.write("<html><body><table><tr>")
if step:
index.write("<th>step</th>")
index.write(
"<th>name</th><th>inX</th><th>out(1)</th><th>out(2)</th><th>swap</th><th>randomimage</th><th>cyc</th><th>inY</th><th>out(1)</th><th>out(2)</th><th>swap</th><th>rnd</th><th>cyc</th></tr>")
for fileset in filesets:
index.write("<tr>")
if step:
index.write("<td>%d</td>" % fileset["step"])
index.write("<td>%s</td>" % fileset["name"])
all_kinds = ["inputsX", "outputsX2Y", "outputsX2Yp",
"im_swapped_X", "sel_auto_X","cycX_output", "inputsY",
"outputsY2X", "outputsY2Xp", "im_swapped_Y", "sel_auto_Y", "cycY_output"]
for kind in all_kinds:
index.write("<td><img src='images/%s'></td>" % fileset[kind])
index.write("</tr>")
return index_path
def main():
if a.seed is None:
a.seed = random.randint(0, 2 ** 31 - 1)
tf.set_random_seed(a.seed)
np.random.seed(a.seed)
random.seed(a.seed)
if not os.path.exists(a.output_dir):
os.makedirs(a.output_dir)
if a.mode == "test" or a.mode == "features":
if a.checkpoint is None:
raise Exception("checkpoint required for test mode")
# load some options from the checkpoint
options = {"which_direction", "ngf", "ndf", "lab_colorization"}
with open(os.path.join(a.checkpoint, "options.json")) as f:
for key, val in json.loads(f.read()).items():
if key in options:
print("loaded", key, "=", val)
setattr(a, key, val)
# disable these features in test mode
a.scale_size = CROP_SIZE
a.flip = False
for k, v in a._get_kwargs():
print(k, "=", v)
with open(os.path.join(a.output_dir, "options.json"), "w") as f:
f.write(json.dumps(vars(a), sort_keys=True, indent=4))
examples = load_examples()
print("examples count = %d" % examples.count)
# inputs and targets are [batch_size, height, width, channels]
model = create_model(examples.inputsX, examples.inputsY, a)
# undo colorization splitting on images that we use for display/output
inputsX = deprocess(examples.inputsX)
inputsY = deprocess(examples.inputsY)
outputsX2Y = deprocess(model.outputsX2Y)
outputsY2X = deprocess(model.outputsY2X)
outputsX2Yp = deprocess(model.outputsX2Yp)
outputsY2Xp = deprocess(model.outputsY2Xp)
outputs_exclusiveX2Y = deprocess(model.outputs_exclusiveX2Y)
outputs_exclusiveY2X = deprocess(model.outputs_exclusiveY2X)
im_swapped_X = deprocess(model.im_swapped_X)
im_swapped_Y = deprocess(model.im_swapped_Y)
sel_auto_X = deprocess(model.sel_auto_X)
sel_auto_Y = deprocess(model.sel_auto_Y)
sR_X2Y = model.sR_X2Y
sR_Y2X = model.sR_Y2X
eR_X2Y = model.eR_X2Y
eR_Y2X = model.eR_Y2X
cycX_output = deprocess(model.cycX_output)
cycY_output = deprocess(model.cycY_output)
def convert(image):
if a.aspect_ratio != 1.0:
# upscale to correct aspect ratio
size = [CROP_SIZE, int(round(CROP_SIZE * a.aspect_ratio))]
image = tf.image.resize_images(image, size=size, method=tf.image.ResizeMethod.BICUBIC)
return tf.image.convert_image_dtype(image, dtype=tf.uint8, saturate=True)
# reverse any processing on images so they can be written to disk or displayed to user
with tf.name_scope("convert_inputsX"):
converted_inputsX = convert(inputsX)
with tf.name_scope("convert_inputsY"):
converted_inputsY = convert(inputsY)
with tf.name_scope("convert_outputsX2Y"):
converted_outputsX2Y = convert(outputsX2Y)
with tf.name_scope("convert_outputsY2X"):
converted_outputsY2X = convert(outputsY2X)
with tf.name_scope("convert_outputsX2Yp"):
converted_outputsX2Yp = convert(outputsX2Yp)
with tf.name_scope("convert_outputsY2Xp"):
converted_outputsY2Xp = convert(outputsY2Xp)
with tf.name_scope("convert_outputs_exclusiveX2Y"):
converted_outputs_exclusiveX2Y = convert(outputs_exclusiveX2Y)
with tf.name_scope("convert_outputs_exclusiveY2X"):
converted_outputs_exclusiveY2X = convert(outputs_exclusiveY2X)
with tf.name_scope("convert_im_swapped_Y"):
converted_im_swapped_Y = convert(im_swapped_Y)
with tf.name_scope("convert_sel_auto_Y"):
converted_sel_auto_Y = convert(sel_auto_Y)
with tf.name_scope("convert_im_swapped_X"):
converted_im_swapped_X = convert(im_swapped_X)
with tf.name_scope("convert_sel_auto_X"):
converted_sel_auto_X = convert(sel_auto_X)
with tf.name_scope("convert_cycX_output"):
converted_cycX_output = convert(cycX_output)
with tf.name_scope("convert_cycY_output"):
converted_cycY_output = convert(cycY_output)
with tf.name_scope("encode_images"):
display_fetches = {
"paths": examples.paths,
"inputsX": tf.map_fn(tf.image.encode_png, converted_inputsX, dtype=tf.string, name="inputX_pngs"),
"inputsY": tf.map_fn(tf.image.encode_png, converted_inputsY, dtype=tf.string, name="inputY_pngs"),
"outputsX2Y": tf.map_fn(tf.image.encode_png, converted_outputsX2Y, dtype=tf.string, name="outputX2Y_pngs"),
"outputsY2X": tf.map_fn(tf.image.encode_png, converted_outputsY2X, dtype=tf.string, name="outputY2X_pngs"),
"outputsX2Yp": tf.map_fn(tf.image.encode_png, converted_outputsX2Yp, dtype=tf.string,
name="outputX2Yp_pngs"),
"outputsY2Xp": tf.map_fn(tf.image.encode_png, converted_outputsY2Xp, dtype=tf.string,
name="outputY2Xp_pngs"),
"outputs_exclusiveX2Y": tf.map_fn(tf.image.encode_png, converted_outputs_exclusiveX2Y, dtype=tf.string,
name="output_exclusiveX2Y_pngs"),
"outputs_exclusiveY2X": tf.map_fn(tf.image.encode_png, converted_outputs_exclusiveY2X, dtype=tf.string,
name="output_exclusiveY2X_pngs"),
"im_swapped_Y": tf.map_fn(tf.image.encode_png, converted_im_swapped_Y, dtype=tf.string,
name="im_swapped_Y_pngs"),
"sel_auto_Y": tf.map_fn(tf.image.encode_png, converted_sel_auto_Y, dtype=tf.string, name="sel_auto_Y_pngs"),
"im_swapped_X": tf.map_fn(tf.image.encode_png, converted_im_swapped_X, dtype=tf.string,
name="im_swapped_X_pngs"),
"sel_auto_X": tf.map_fn(tf.image.encode_png, converted_sel_auto_X, dtype=tf.string, name="sel_auto_X_pngs"),
"cycX_output": tf.map_fn(tf.image.encode_png, converted_cycX_output, dtype=tf.string, name="cycX_output_pngs"),
"cycY_output": tf.map_fn(tf.image.encode_png, converted_cycY_output, dtype=tf.string, name="cycY_output_pngs")
}
with tf.name_scope("extract_features"):
features_fetches = {
"paths": examples.paths,
"inputsX": converted_inputsX,
"sR_X2Y": sR_X2Y,
"eR_X2Y": eR_X2Y,
"inputsY": converted_inputsY,
"sR_Y2X": sR_Y2X,
"eR_Y2X": eR_Y2X,
}
# summaries
with tf.name_scope("X1_input_summary"):
tf.summary.image("inputsX", converted_inputsX, max_outputs=3)
with tf.name_scope("Y1_input_summary"):
tf.summary.image("inputsY", converted_inputsY, max_outputs=3)
with tf.name_scope("X2Y_output_summary"):
tf.summary.image("outputsX2Y", converted_outputsX2Y, max_outputs=3)
with tf.name_scope("Y2X_output2_summary"):
tf.summary.image("outputsY2X", converted_outputsY2X, max_outputs=3)
with tf.name_scope("swapped_1Y_summary"):
tf.summary.image("im_swapped_Y", converted_im_swapped_Y, max_outputs=3)
tf.summary.image("sel_auto_Y", converted_sel_auto_Y, max_outputs=3)
with tf.name_scope("swapped_2X_summary"):
tf.summary.image("im_swapped_X", converted_im_swapped_X, max_outputs=3)
tf.summary.image("sel_auto_X", converted_sel_auto_X, max_outputs=3)
with tf.name_scope("otherNoise_output_summary"):
tf.summary.image("outputsX2Yp", converted_outputsX2Yp, max_outputs=3)
tf.summary.image("outputsY2Xp", converted_outputsY2Xp, max_outputs=3)
with tf.name_scope("zzexclusive_X2Y_summary"):
tf.summary.image("outputsX2Y", converted_outputs_exclusiveX2Y, max_outputs=3)
with tf.name_scope("zzexclusive_Y2X_summary"):
tf.summary.image("outputsY2X", converted_outputs_exclusiveY2X, max_outputs=3)
with tf.name_scope("cycX_output_summary"):
tf.summary.image("cycX_output", converted_cycX_output, max_outputs=3)
with tf.name_scope("cycY_output_summary"):
tf.summary.image("cycY_output", converted_cycY_output, max_outputs=3)
tf.summary.scalar("discriminatorX2Y_loss", model.discrimX2Y_loss)
tf.summary.scalar("discriminatorY2X_loss", model.discrimY2X_loss)
tf.summary.scalar("generatorX2Y_loss", model.genX2Y_loss)
tf.summary.scalar("generatorY2X_loss", model.genY2X_loss)
tf.summary.scalar("generator_exclusiveX2Y_loss", model.gen_exclusiveX2Y_loss)
tf.summary.scalar("discriminator_exclusiveX2Y_loss", model.discrim_exclusiveX2Y_loss)
tf.summary.scalar("generator_exclusiveY2X_loss", model.gen_exclusiveY2X_loss)
tf.summary.scalar("discriminator_exclusiveY2X_loss", model.discrim_exclusiveY2X_loss)
tf.summary.scalar("discriminator_sharedX2Y_loss", model.discrim_sharedX2Y_loss)
tf.summary.scalar("discriminator_sharedY2X_loss", model.discrim_sharedY2X_loss)
tf.summary.scalar("code_sR_X2Y_recon_loss", model.code_sR_X2Y_recon_loss)
tf.summary.scalar("code_sR_Y2X_recon_loss", model.code_sR_Y2X_recon_loss)
tf.summary.scalar("code_eR_X2Y_recon_loss", model.code_eR_X2Y_recon_loss)
tf.summary.scalar("code_eR_Y2X_recon_loss", model.code_eR_Y2X_recon_loss)
tf.summary.scalar("code_recon_loss", model.code_recon_loss)
tf.summary.scalar("cycX_loss", model.cycX_loss)
tf.summary.scalar("cycX_loss", model.cycY_loss)
# for var in tf.trainable_variables():
# tf.summary.histogram(var.op.name + "/values", var)
# for grad, var in model.discrimX2Y_grads_and_vars + model.genX2Y_grads_and_vars:
# tf.summary.histogram(var.op.name + "/gradientsX2Y", grad)
# for grad, var in model.discrimY2X_grads_and_vars + model.genY2X_grads_and_vars:
# tf.summary.histogram(var.op.name + "/gradientsY2X", grad)
with tf.name_scope("parameter_count"):
parameter_count = tf.reduce_sum([tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()])
saver = tf.train.Saver(max_to_keep=1)
logdir = a.output_dir if (a.trace_freq > 0 or a.summary_freq > 0) else None
sv = tf.train.Supervisor(logdir=logdir, save_summaries_secs=0, saver=None)
with sv.managed_session(config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=True)) as sess:
print("parameter_count =", sess.run(parameter_count))
if a.checkpoint is not None:
print("loading model from checkpoint")
checkpoint = tf.train.latest_checkpoint(a.checkpoint)
saver.restore(sess, checkpoint)
max_steps = 2 ** 31 - 1
if a.max_epochs is not None:
max_steps = examples.steps_per_epoch * a.max_epochs
if a.max_steps is not None:
max_steps = a.max_steps
if a.mode == "test":
# testing
# at most, process the test data once
start = time.time()
max_steps = min(examples.steps_per_epoch, max_steps)
for step in range(max_steps):
results = sess.run(display_fetches)
filesets = save_images(results)
for i, f in enumerate(filesets):
print("evaluated image", f["name"])
index_path = append_index(filesets)
print("wrote index at", index_path)
print("rate", (time.time() - start) / max_steps)
elif a.mode == "features":
max_steps = min(examples.steps_per_epoch, max_steps)
for step in range(max_steps):
results = sess.run(features_fetches)
save_features(results)
else:
# training
start = time.time()
for step in range(max_steps):
def should(freq):
return freq > 0 and ((step + 1) % freq == 0 or step == max_steps - 1)
options = None
run_metadata = None
if should(a.trace_freq):
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
fetches = {
"train": model.train,
"global_step": sv.global_step,
}
if should(a.progress_freq):
fetches["discrimX2Y_loss"] = model.discrimX2Y_loss
fetches["discrimY2X_loss"] = model.discrimY2X_loss
fetches["genX2Y_loss"] = model.genX2Y_loss
fetches["genY2X_loss"] = model.genY2X_loss
fetches["code_recon_loss"] = model.code_recon_loss
fetches["cycX_loss"] = model.cycX_loss
fetches["cycY_loss"] = model.cycY_loss
if should(a.summary_freq):
fetches["summary"] = sv.summary_op
if should(a.display_freq):
fetches["display"] = display_fetches
results = sess.run(fetches, options=options, run_metadata=run_metadata)
if should(a.summary_freq):
print("recording summary")
sv.summary_writer.add_summary(results["summary"], results["global_step"])
if should(a.display_freq):
print("saving display images")
filesets = save_images(results["display"], step=results["global_step"])
append_index(filesets, step=True)
if should(a.trace_freq):
print("recording trace")
sv.summary_writer.add_run_metadata(run_metadata, "step_%d" % results["global_step"])
if should(a.progress_freq):
# global_step will have the correct step count if we resume from a checkpoint
train_epoch = math.ceil(results["global_step"] / examples.steps_per_epoch)
train_step = (results["global_step"] - 1) % examples.steps_per_epoch + 1
rate = (step + 1) * a.batch_size / (time.time() - start)
remaining = (max_steps - step) * a.batch_size / rate
print("progress epoch %d step %d image/sec %0.1f remaining %dm" % (
train_epoch, train_step, rate, remaining / 60))
print("discrimX2Y_loss", results["discrimX2Y_loss"])
print("discrimY2X_loss", results["discrimY2X_loss"])
print("genX2Y_loss", results["genX2Y_loss"])
print("genY2X_loss", results["genY2X_loss"])
print("code_recon_loss", results["code_recon_loss"])
print("cycX_loss", results["cycX_loss"])
print("cycY_loss", results["cycY_loss"])
if should(a.save_freq):
print("saving model")
saver.save(sess, os.path.join(a.output_dir, "model"), global_step=sv.global_step)
if sv.should_stop():
break
main()
|
[
"jordi.puyoles.calafell@gmail.com"
] |
jordi.puyoles.calafell@gmail.com
|
164097886c1763fa882e36260fc4dd6d31deb96b
|
f291410c0162cb976e9b42c3ff5108bff9e4498a
|
/operators/position_suzanne.py
|
001dde4c5d10ecfaddad19dfa28b4fa09a7ce878
|
[] |
no_license
|
PawitraStudio/PSTools
|
03f20897ed3498f978621c25ad78ddd30b21db92
|
a34d15f6ab93697e4a5f3087970f544fcfa270a2
|
refs/heads/master
| 2021-06-04T06:17:21.835779
| 2018-11-07T05:47:44
| 2018-11-07T05:47:44
| 57,173,184
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 836
|
py
|
import bpy
class KMSPositionedSuzanne(bpy.types.Operator):
'Adjust monkey to sit on the ground'
bl_idname = 'ps.position_suz'
bl_label = 'Suzanne Sit'
@classmethod
def poll(cls, context):
return (context.mode == 'OBJECT')
def execute(self, context):
cloc = bpy.context.scene.cursor_location
objs = bpy.context.selected_objects
for o in objs:
if o.name == "Suzanne":
o.location.x = cloc.x
o.location.y = cloc.y
o.location.z = cloc.z+0.4955
bpy.ops.object.shade_smooth()
bpy.ops.object.subdivision_set(level=3)
bpy.context.object.modifiers["Subsurf"].render_levels = 3
bpy.context.object.rotation_euler.x = -0.6254132986068726
return {'FINISHED'}
|
[
"aditia.ap@gmail.com"
] |
aditia.ap@gmail.com
|
cf13e49e8f4465317d1b5cc42220f8382bb2ab6b
|
04809a3617564ec5ce3251e1603fb99252ff1d88
|
/Assignment 1.py
|
59315cb28ac36f7ecd08bc24dcc8f0bc64acffa3
|
[] |
no_license
|
keshavshah2196/Consultadd
|
e1ce4e0b16dcbc3744b2a24eb80034f3b06a20f5
|
51f59acba33c72b8e1891cba353215951f76e502
|
refs/heads/master
| 2022-07-14T16:44:22.020502
| 2020-05-15T00:34:49
| 2020-05-15T00:34:49
| 263,134,588
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,265
|
py
|
#Q1
print("Q1")
a,b,c=5,7,"Ktsh"
print("a=",a)
print("b=",b)
print("c=",c)
print("_-----")
#Q2
print("Q2")
p=8+9j
q=67
print("Elements before swapping",p,q)
p,q=q,p
print("Elemens after swappind",p,q)
print("------")
#Q3
print("Q3")
d=46
f=7/5
print("Original values")
print("d=",d,"f=",f)
temp=d
d=f
f=temp
print("Values afetr swapping")
print("d=",d,"f=",f)
print("------")
#Q4
print("Q4")
print("------")
#Q5
print("Q5")
b=eval(input("Enter any number"))
j=eval(input("Enter any value"))
print("Additin of 2 values=",b+j)
print("Result number 2:",b+j+30)
print("------")
#Q6
print("Q6")
r=eval(input("Enter anu value"))
k=eval(input("Enetr any number or string"))
print("Data type for v1st variable is:",type(r))
print("Data type fpr 2nd variable is:",type(k))
print("------")
#Q7
print("Q7")
getValue=5/7
GetValue=9+8.6j
GETVALUE="Welcome to training"
print(getValue,GetValue,GETVALUE)
print("------")
#Q8
print("Q8")
print("Yes, the value assigned to the ‘a’ variable in the second time will be the final value because Python is “Imperative” language such that it changes the state of the variable every time the variable is defined.")
print("------")
|
[
"noreply@github.com"
] |
keshavshah2196.noreply@github.com
|
7052ee3967aad693dfc6f1cd72bf9eb072dfe7e9
|
850915135e59bf50efdaa02c2d13990529557daf
|
/models/__init__.py
|
ef9be443229d80b11727e20146d8f93b9715f1b7
|
[
"MIT"
] |
permissive
|
VamshiTeja/SMDL
|
9ab387a439f9ae5521273640b3835a1eb897258a
|
0dda36df9d0f2f921f365cc644f61dd81798693c
|
refs/heads/master
| 2022-11-29T09:23:39.755149
| 2020-11-27T08:54:57
| 2020-11-27T08:54:57
| 154,336,728
| 20
| 8
|
MIT
| 2022-11-22T03:14:42
| 2018-10-23T13:52:05
|
Python
|
UTF-8
|
Python
| false
| false
| 101
|
py
|
from .resnet import *
from .SimpleNet import *
from .imagenet_resnet import resnet34, resnet18_ILSVRC
|
[
"josephkj20@gmail.com"
] |
josephkj20@gmail.com
|
ea1ab922b20fdef95fef88c09c6746131b654f70
|
c174107bfe612602658ef7bce64983b04602c92f
|
/ConfidenceDM/decision_model.py
|
d087eee1dfe144ef629f3a2b07a52e3e0aef72bc
|
[] |
no_license
|
lucianopaz/unknownVarianceInferenceModel
|
eda0c50aa9c090c060032546cd0a36af3fdaf0db
|
887ef82556dddd330e30752355c3464fde8638c4
|
refs/heads/master
| 2018-09-10T12:08:32.464708
| 2018-06-05T10:02:17
| 2018-06-05T10:02:17
| 76,459,305
| 0
| 1
| null | 2017-02-15T13:59:38
| 2016-12-14T12:51:21
|
Python
|
UTF-8
|
Python
| false
| false
| 39,390
|
py
|
#!/usr/bin/python
#-*- coding: UTF-8 -*-
"""
Decision Model package
Defines the DecisionModel class, which computes the decision bounds,
belief values, maps belief and accumulated evidence, computes first
passage time probability distributions, convolutions with non-decision
time and belief to confidence mappings. This class implements the entire
decision model.
Author: Luciano Paz
Year: 2016
"""
from __future__ import (division, print_function, absolute_import,
unicode_literals)
import numpy as np
from scipy.signal import fftconvolve
from scipy import io
from scipy import optimize
import math, copy, sys, warnings
from .utils import normcdf, normcdfinv, normpdf, average_downsample
try:
from . import dmmodule
except ImportError:
raise ImportError('C++ extension module could not be imported, please '
'rebuild the package or submit an issue in github.')
class DecisionModel():
"""
Class that implements the dynamic programming method that optimizes
reward rate and computes the optimal decision bounds
"""
def __init__(self,model_var=None,internal_var=0.,external_var=None,
prior_mu_mean=0.,prior_mu_var=1.,n=500,dt=1e-2,T=10.,
reward=1.,penalty=0.,iti=1.,tp=0.,cost=0.05,discrete_prior=None,
prior_var_prob=None):
"""
Constructor input:
model_var = True variance rate of the process that generates samples.
If the variance is unknown by the DecisionModel, the model_var
can be a numpy.ndarray specifying posible values. Notice that
in this case, the prior_var_prob must be a numpy.ndarray of the
same shape as model_var. If model_var is None, its value is
computed from the internal_var and external_var inputs.
internal_var = A float that represents the internal variance rate.
If the input model_var is None, then its value is computed based
on internal_var and external_var.
external_var = A float or numpy.ndarray encoding the true external
variance underlying the sample generation. If external_var is
a numpy.ndarray, it is assumed to represent the posible
variances that can underly the sample generation and the
true variance is a priori unknown. If model_var is None,
the value of internal_var and external_var are used to set
model_var = internal_var + external_var
prior_mu_mean = Mean of the prior distribution on mu
prior_mu_var = Var of the prior distribution on mu
n = Discretization of belief space. Number of elements in g
dt = Time steps
T = Max time where the value is supposed to have converged already
reward = Numerical value of reward upon success
penalty = Numerical value of penalty upon failure
iti = Inter trial interval
tp = Penalty time added to iti after failure
cost = Cost of accumulating new evidence. Can be a float or a
numpy ndarray. See set_cost for details of the accepted costs.
discrete_prior = can be None or a tuple like (mu_prior,weight_prior)
mu_prior and weight_prior must be numpy 1-D arrays of the same shape
that hold the discrete set of posible positive mu values (mu_prior) and their
weight (weight_prior). The initialization then renormalizes the weights to
0.5, because the prior is assumed symmetric around zero.
"""
if internal_var is None and external_var is None:
self.model_var = model_var
self.internal_var = None
self.external_var = None
elif internal_var is None and not model_var is None:
self.model_var = model_var
self.internal_var = None
self.external_var = external_var
elif not internal_var is None:
self.model_var = internal_var+external_var
self.internal_var = internal_var
self.external_var = external_var
if self.model_var is None:
raise ValueError('The resulting model_var cannot be None. There are two alternatives to specify the model_var. 1) specify its value as the input "model_var". 2) specify "internal_var" and "external_var", in this case model_var=internal_var+external_var.')
elif not(isinstance(self.model_var,float) or isinstance(self.model_var,np.ndarray)):
raise ValueError('The resulting model_var must be a float or numpy.ndarray. Instead type(model_var)={0}'.format(type(self.model_var)))
elif isinstance(self.model_var,np.ndarray):
if self.model_var.ndim>1:
raise ValueError('The resulting model_var must be a 1 dimensional numpy.ndarray')
if discrete_prior:
temp = copy.deepcopy(discrete_prior)
self.mu_prior,self.weight_prior = temp
self.prior_mu_mean = 0.
self.weight_prior/=(2*np.sum(self.weight_prior))
self.prior_mu_var = np.sum(2*self.weight_prior*self.mu_prior**2)
self.prior_type = 2
warnings.warn("Discrete mu prior is still an experimental feauture with buggy behavior",FutureWarning)
else:
self.prior_mu_mean = prior_mu_mean
self.prior_mu_var = prior_mu_var
self.prior_type = 1
self.set_n(n)
self.dt = float(dt)
self.T = float(T)
self.nT = int(T/dt)+1
self.t = np.arange(0.,self.nT,dtype=np.float64)*self.dt
self.set_cost(cost)
self.reward = reward
self.penalty = penalty
self.iti = iti
self.tp = tp
self.rho = 0.
if not self.known_variance():
if prior_var_prob is None or self.model_var.shape!=prior_var_prob.shape:
raise ValueError('When model_var is an array (unknown variance), prior_var_prob must be an array with the same shape')
self.prior_var_prob = prior_var_prob/np.sum(prior_var_prob)
inds = self.model_var.argsort()
self.model_var = self.model_var[inds]
if isinstance(self.external_var,np.ndarray) and self.external_var.shape==self.model_var.shape:
self.external_var = self.external_var[inds]
if isinstance(self.internal_var,np.ndarray) and self.internal_var.shape==self.model_var.shape:
self.internal_var = self.internal_var[inds]
self.prior_var_prob = prior_var_prob[inds]/np.sum(prior_var_prob)
else:
self.prior_var_prob = None
def known_variance(self):
return isinstance(self.model_var,float) or self.model_var.size==1
def conjugate_mu_prior(self):
return self.prior_type==1
def __str__(self):
if hasattr(self,'_cost_details'):
_cost_details = self._cost_details
if _cost_details['type']<2:
cost = self._cost_details['details']
else:
cost = self.cost
else:
_cost_details = {'type':None,'details':None}
cost = self.cost
if hasattr(self,'bounds'):
bounds = self.bounds
else:
bounds = None
string = """
<{class_module}.{class_name} object at {address}>
model_var = {model_var}, internal_var = {internal_var}, external_var = {external_var},
prior_mu_mean = {prior_mu_mean}, prior_mu_var = {prior_mu_var}, prior_type = {prior_type},
dg = {dg}, n = {n}, dt = {dt}, nT = {nT}, T = {T},
reward = {reward}, penalty = {penalty}, iti = {iti}, tp = {tp}, rho = {rho},
cost_type = {cost_type}, cost = {cost},
bounds = {bounds}
""".format(class_module=self.__class__.__module__,
class_name=self.__class__.__name__,
address=hex(id(self)),
model_var=self.model_var,
internal_var=self.internal_var,
external_var=self.external_var,
prior_mu_mean=self.prior_mu_mean,
prior_mu_var=self.prior_mu_var,
prior_type=self.prior_type,
dg=self.dg,
n=self.n,
dt=self.dt,
nT=self.nT,
T=self.T,
reward=self.reward,
penalty=self.penalty,
iti=self.iti,
tp=self.tp,
rho=self.rho,
cost_type=_cost_details['type'],
cost=cost,
bounds=bounds)
return string
def set_n(self,n):
self.n = int(n)
if self.n%2==0:
self.n+=1
self.dg = 1./float(self.n)
self.g = np.linspace(self.dg/2.,1.-self.dg/2.,self.n)
def set_dt(self,dt):
oldt = self.t
self.dt = float(dt)
self.nT = int(self.T/self.dt)+1
self.t = np.arange(0.,self.nT,dtype=np.float64)*self.dt
if self._cost_details['type']==0:
self.set_constant_cost(self._cost_details['details'])
elif self._cost_details['type']==1:
self.set_polynomial_cost(self._cost_details['details'])
else:
self.cost = np.interp(self.t[:-1], oldt[:-1], self.cost)
def set_T(self,T):
self.T = float(T)
old_nT = self.nT
self.nT = int(self.T/self.dt)+1
self.t = np.arange(0.,self.nT,dtype=np.float64)*self.dt
if self._cost_details['type']==0:
self.set_constant_cost(self._cost_details['details'])
elif self._cost_details['type']==1:
self.set_polynomial_cost(self._cost_details['details'])
else:
old_cost = self.cost
self.cost = np.zeros_like(self.t)
self.cost[:old_nT-1] = old_cost
self.cost[old_nT-1:] = old_cost[-1]
def copy(self):
model_var = copy.deepcopy(self.model_var)
out = DecisionModel(model_var=copy.deepcopy(self.model_var),
internal_var=copy.deepcopy(self.internal_var),
external_var=copy.deepcopy(self.external_var),
prior_mu_mean=self.prior_mu_mean,prior_mu_var=self.prior_mu_var,
n=self.n,dt=self.dt,T=self.T,reward=self.reward,
penalty=self.penalty,iti=self.iti,tp=self.tp,
cost=0,discrete_prior=copy.deepcopy(self.discrete_prior),
prior_var_prob=self.prior_var_prob)
out.cost = copy.deepcopy(self.cost)
out._cost_details = copy.deepcopy(self._cost_details)
try:
out.value = copy.deepcopy(self.value)
except:
pass
try:
out.bounds = copy.deepcopy(self.bounds)
except:
pass
return out
def set_internal_var(self,internal_var):
self.internal_var = internal_var
self.model_var = self.external_var + self.internal_var
def set_external_var(self,external_var):
self.external_var = external_var
self.model_var = self.external_var + self.internal_var
def set_cost(self,cost):
"""
This function constructs a DecisionModel's cost array of shape
(nT-1,).
Syntax:
self.set_cost(cost)
Input:
cost: a float or a numpy ndarray.
If cost is a float, self.cost set as a numpy array with all
of its values equal to the supplied float.
If cost is a numpy ndarray, the cost array is constructed
in one of two ways. If the supplied cost's shape is equal to
(nT-1,) then the array is copied as is to the DecisionModel's
cost array. If the shape is not equal, then the supplied array
is assumed to hold the coefficients of a polynomial and
the cost array is constructed as a polyval(cost,self.t[:-1]).
Related functions:
set_constant_cost, set_polynomial_cost, set_array_cost
"""
if isinstance(cost,np.ndarray) and not np.isscalar(cost):
s = cost.shape
if len(s)>1:
raise ValueError("Cost must be a scalar or a one dimensional numpy ndarray")
if s[0]==self.nT-1:
self.set_cost_array(cost)
else:
self.set_polynomial_cost(cost)
else:
self.set_constant_cost(cost)
def set_constant_cost(self,cost):
"""
self.set_constant_cost(cost)
Primitive function that sets the instances cost array as
self.cost = float(cost)*numpy.ones(self.nT-1)
"""
self.cost = float(cost)*np.ones(self.nT-1)
self._cost_details = {'type':0,'details':cost}
#~ self.shift_cost()
def set_polynomial_cost(self,coefs):
"""
self.set_polynomial_cost(coefs)
Primitive function that sets the instances cost array as
self.cost = numpy.polyval(coefs,self.t[:-1])
"""
self.cost = np.polyval(coefs,self.t[:-1])
self._cost_details = {'type':1,'details':coefs[:]}
#~ self.shift_cost()
def set_array_cost(self,cost,shift_cost=False):
"""
self.set_array_cost(cost)
Primitive function that sets the instances cost array as
self.cost = cost[:]
"""
self.cost = cost[:]
self._cost_details = {'type':2,'details':None}
#~ if shift_cost:
#~ self.shift_cost()
def shift_cost(self):
"""
self.shift_cost()
Shift cost array rigidly until after the fixed_stim_duration
"""
index = (self.t>=self.fixed_stim_duration).nonzero()[0][0]
self.cost[index:] = self.cost[:(self.nT-index)]
self.cost[:index] = 0.
def post_mu_var(self,t,var_ind=None):
"""
Bayes update of the posterior variance at time t
post_mu_var(self,t,var_ind=None)
Input:
t: A float or numpy.ndarray that is the time at which the
posterior mu variance is desired.
"""
if var_ind is None:
return 1./(t/self.model_var+1./self.prior_mu_var)
else:
return 1./(t/self.model_var[var_ind]+1./self.prior_mu_var)
def post_mu_mean(self,t,x,var_ind=None):
"""
Bayes update of the posterior mean at time t with cumulated sample x
"""
if var_ind is None:
return (x/self.model_var+self.prior_mu_mean/self.prior_mu_var)*self.post_mu_var(t)
else:
return (x/self.model_var[var_ind]+self.prior_mu_mean/self.prior_mu_var)*self.post_mu_var(t,var_ind=var_ind)
def x2g(self,t,x):
"""
Mapping from cumulated sample x at time t to belief
self.x2g(t,x)
Input:
t and x: Can be floats or numpy arrays that can be broadcasted
together.
Output: A numpy array or float depending on the inputs with the
values of the x to g mapping at times t and points x.
"""
if self.conjugate_mu_prior():
if self.known_variance(): # Simplest case known variance conjugate mu prior
return normcdf(self.post_mu_mean(t,x)/np.sqrt(self.post_mu_var(t)))
else: # Unknown variance but conjugate mu prior
num = 0.
den = 0.
for var_ind,mvar in enumerate(self.model_var):
st = np.sqrt(self.post_mu_var(t,var_ind))
pst = self.prior_var_prob[var_ind]*st
num+=(pst*normcdf(self.post_mu_mean(t,x,var_ind)/st))
den+=pst
return num/den
else: # Discrete mu prior but known variance
num = 0.
den = 0.
t_over_model_var = t/self.model_var
x_over_model_var = x/self.model_var
for mu, weight in zip(self.mu_prior,self.weight_prior):
alpha_expmu2t = weight*np.exp(-0.5*mu**2*t_over_model_var)
num+= alpha_expmu2t*np.exp(-mu*x_over_model_var)
den+= alpha_expmu2t*np.exp(mu*x_over_model_var)
return 1./(1.+num/den)
def dx2g(self,t,x):
"""
Derivate of the mapping from cumulated sample x at time t to belief
self.dx2g(t,x)
Input:
t and x: Can be floats or numpy arrays that can be broadcasted
together.
Output: A numpy array or float depending on the inputs with the
derivative of the x to g mapping at times t and points x.
"""
if self.conjugate_mu_prior():
if self.known_variance(): # Simplest case known variance conjugate mu prior
return np.exp(-0.5*self.post_mu_mean(t,x)**2/self.post_mu_var(t))*np.sqrt(self.post_mu_var(t)/(2*np.pi))/self.model_var
else: # Unknown variance but conjugate mu prior
num = 0.
den = 0.
for var_ind,mvar in enumerate(self.model_var):
vt = self.post_mu_var(t,var_ind)
st = np.sqrt(vt)
p = self.prior_var_prob[var_ind]
num+=(p*vt/mvar*np.exp(-0.5*self.post_mu_mean(t,x,var_ind)**2/vt))
den+=p*st
return 0.3989422804014327*num/den
else: # Discrete mu prior but known variance
inv_model_var = 1./self.model_var;
t_over_model_var = t*inv_model_var;
x_over_model_var = x*inv_model_var;
plus = 0.
minus = 0.
dplus = 0.
dminus = 0.
for mu, weight in zip(self.mu_prior,self.weight_prior):
alpha_expmu2t = weight*np.exp(-0.5*mu**2*t_over_model_var)
mu_over_model_var = mu*inv_model_var
plus+= alpha_expmu2t*np.exp(mu*x_over_model_var)
minus+= alpha_expmu2t*np.exp(-mu*x_over_model_var)
dplus+= mu_over_model_var*alpha_expmu2t*np.exp(mu*x_over_model_var)
dminus+= mu_over_model_var*alpha_expmu2t*np.exp(-mu*x_over_model_var)
return (dminus*plus+dplus*minus)/((plus+minus)**2)
def g2x(self,t,g):
"""
Mapping from belief at time t to cumulated sample x (inverse of x2g)
"""
if self.conjugate_mu_prior() and self.known_variance():
return self.model_var*(normcdfinv(g)/np.sqrt(self.post_mu_var(t))-self.prior_mu_mean/self.prior_mu_var)
else:
it = np.nditer([np.array(t),np.array(g),None])
for t_i,g_i,out in it:
f = lambda x: self.x2g(t_i,x)-g_i
fprime = lambda x: self.dx2g(t_i,x)
out[...] = optimize.newton(f, 0., fprime=fprime)
return it.operands[2]
def xbounds(self, tolerance=1e-12, set_rho=True, set_bounds=True,
return_values=False, root_bounds=None):
return dmmodule.xbounds(self,tolerance=tolerance, set_rho=set_rho,
set_bounds=set_bounds,
return_values=return_values,
root_bounds=root_bounds)
xbounds.__doc__ = dmmodule.xbounds.__doc__
def xbounds_fixed_rho(self, rho=None, set_bounds=False,
return_values=False):
return dmmodule.xbounds_fixed_rho(self,rho=rho, set_bounds=set_bounds,
return_values=return_values)
xbounds_fixed_rho.__doc__ = dmmodule.xbounds_fixed_rho.__doc__
def values(self, rho=None):
return dmmodule.values(self,rho=rho)
values.__doc__ = dmmodule.values.__doc__
def fpt(self,mu,model_var=None,bounds=None):
return dmmodule.fpt(self,mu,model_var=model_var,bounds=bounds)
fpt.__doc__ = dmmodule.fpt.__doc__
def fpt_conf_matrix(self,first_passage_time, confidence_response,
confidence_partition=100):
return dmmodule.fpt_conf_matrix(self,first_passage_time,
confidence_response,
confidence_partition=confidence_partition)
fpt_conf_matrix.__doc__ = dmmodule.fpt_conf_matrix.__doc__
def rt_confidence_pdf(self,first_passage_time, confidence_response,
dead_time_convolver, confidence_partition=100):
"""
rt_confidence_pdf(self,first_passage_time, confidence_response,
dead_time_convolver, confidence_partition=100)
This method computes the joint probability of a given response time,
confidence and performance for the supplied model parameters and
first passage probability density.
Input:
first_passage_time: First passage time probability density.
A numpy array of shape (2,self.nT) that can be the
result of np.array(cost_time.DecisionModel.fpt())
confidence_response: The output from self.confidence_mapping(...)
dead_time_convolver: The output from self.get_dead_time_convolver(...)
confidence_partition: An int that is passed to fpt_conf_matrix
in order to discretize the confidence response range.
Output: The joint probability density. A numpy array with of shape
(2,confidence_partition,self.nT+len(dead_time_convolver)-1).
The first axis represents hits [0] or misses [1].
The second axis represents time as indeces to
numpy.arange(out.shape[1])*self.dt
The third axis represents the confidence as indeces to
numpy.linspace(0,1,confidence_partition)
"""
if isinstance(dead_time_convolver,tuple):
dead_time_convolver = dead_time_convolver[0]
with warnings.catch_warnings():
warnings.simplefilter("ignore",np.VisibleDeprecationWarning)
out = fftconvolve(self.fpt_conf_matrix(first_passage_time=first_passage_time,
confidence_response=confidence_response,
confidence_partition=confidence_partition),
dead_time_convolver[None,None,:],mode='full')
out[out<0] = 0.
out/=(np.sum(out)*self.dt)
return out
def rt_pdf(self,first_passage_pdfs,dead_time_convolver):
"""
self.rt_pdf(first_passage_pdfs,dead_time_convolver)
This method computes the joint probability of a given response time
and performance for the supplied model parameters and the
first passage probability density.
Input:
first_passage_pdfs: First passage probability density. A numpy
array of shape (2,self.nT) that can be the output of
np.array(self.fpt(...))
dead_time_convolver: The output from
self.get_dead_time_convolver(...)
Output:
pdf: The joint probability density. A numpy array with of shape
(2,self.nT+len(dead_time_convolver)-1).
The first axis represents hits [0] or misses [1].
The second array represents time as indeces to
numpy.arange(0,pdf.shape[1],dtype=np.float)*self.dt
"""
if isinstance(dead_time_convolver,tuple):
dead_time_convolver = dead_time_convolver[0]
with warnings.catch_warnings():
warnings.simplefilter("ignore",np.VisibleDeprecationWarning)
decision_pdfs = fftconvolve(first_passage_pdfs,dead_time_convolver[None,:],mode='full')
decision_pdfs[decision_pdfs<0] = 0.
decision_pdfs/=(np.sum(decision_pdfs)*self.dt)
return decision_pdfs
def binary_confidence_rt_pdf(self,first_passage_pdfs,confidence_response,dead_time_convolver):
"""
self.binary_confidence_rt_pdf(first_passage_pdfs,confidence_response,dead_time_convolver)
This method computes the joint probability of a given response time,
binary confidence and performance for the supplied model parameters and
first passage probability density.
Input:
first_passage_pdfs: First passage probability density. A numpy
array of shape (2,self.nT) that can be the output of
np.array(self.fpt(...))
confidence_response: The output from self.confidence_mapping(...)
dead_time_convolver: The output from self.get_dead_time_convolver(...)
Output:
pdf: The joint probability density. A numpy array with of shape
(2,2,self.nT+len(dead_time_convolver)-1).
The first axis represents hits [0] or misses [1].
The second axis represents low [0] and high [1] confidence
The third array represents time as indeces to
numpy.arange(0,pdf.shape[2],dtype=np.float)*self.dt
"""
if isinstance(dead_time_convolver,tuple):
dead_time_convolver = dead_time_convolver[0]
phigh = confidence_response
plow = 1.-phigh
confidence_rt = np.concatenate((np.array(first_passage_pdfs)[:,None,:]*plow[:,None,:],np.array(first_passage_pdfs)[:,None,:]*phigh[:,None,:]),axis=1)
with warnings.catch_warnings():
warnings.simplefilter("ignore",np.VisibleDeprecationWarning)
confidence_pdfs = fftconvolve(confidence_rt,dead_time_convolver[None,None,:],mode='full')
confidence_pdfs[confidence_pdfs<0] = 0.
confidence_pdfs/=(np.sum(confidence_pdfs)*self.dt)
return confidence_pdfs
def get_dead_time_convolver(self,dead_time,dead_time_sigma,return_conv_x=False):
"""
self.get_dead_time_convolver(dead_time,dead_time_sigma,return_conv_x=False):
This function returns the dead time (aka non-decision time)
distribution, which is convolved with the first passage time
probability density to get the real response time distribution.
Input:
dead_time: A float that represents the center of the
gaussian used as the dead time distribution (actually
only the upper half is used)
dead_time_sigma: A float that represents the gaussian's
standard deviation.
return_conv_x: A bool. If True, the convolution's
corresponding time numpy.array is also returned.
Output:
conv_val or (conv_val,conv_x) depending on whether
return_conv_x is True or not.
conv_val is an array with the values of the dead time
distribution for the times that are in conv_x.
"""
must_downsample = True
if self.dt>1e-3:
_dt = 1e-3
else:
must_downsample = False
_dt = self.dt
conv_x_T = dead_time+6*dead_time_sigma
dense_conv_x_nT = int(conv_x_T/_dt)+1
conv_x_nT = int(conv_x_T/self.dt)+1
dense_conv_x = np.arange(0,dense_conv_x_nT)*_dt
if dead_time_sigma>0:
dense_conv_val = normpdf(dense_conv_x,dead_time,dead_time_sigma)
dense_conv_val[dense_conv_x<dead_time] = 0.
else:
dense_conv_val = np.zeros_like(dense_conv_x)
dense_conv_val[np.floor(dead_time/_dt)] = 1.
conv_x = np.arange(0,conv_x_nT)*self.dt
if must_downsample:
#~ conv_val = average_downsample(dense_conv_val,conv_x_nT)
if dense_conv_x_nT%conv_x_nT==0:
ratio = int(np.round(dense_conv_x_nT/conv_x_nT))
else:
ratio = int(np.ceil(dense_conv_x_nT/conv_x_nT))
tail = dense_conv_x_nT%ratio
if tail!=0:
padded_cv = np.concatenate((dense_conv_val,np.nan*np.ones(ratio-tail,dtype=np.float)),axis=0)
else:
padded_cv = dense_conv_val
padded_cv = np.reshape(padded_cv,(-1,ratio))
conv_val = np.nanmean(padded_cv,axis=1)
else:
conv_val = dense_conv_val
conv_val/=np.sum(conv_val)
if return_conv_x:
return conv_val,conv_x
else:
return conv_val
def refine_value(self,tolerance=1e-12,dt=None,n=None,T=None):
"""
This method re-computes the value of the beliefs using the
average reward (rho) that was already computed.
"""
change = False
if dt is not None:
if dt<self.dt:
change = True
oldt = self.t.copy()
self.dt = float(dt)
self.nT = int(self.T/self.dt)+1
self.t = np.arange(0.,self.nT,dtype=np.float64)*self.dt
if self._cost_details['type']==0:
self.set_constant_cost(self._cost_details['details'])
elif self._cost_details['type']==1:
self.set_polynomial_cost(self._cost_details['details'])
else:
self.cost = np.interp(self.t[:-1], oldt[:-1], self.cost)
if T is not None:
if T>self.T:
change = True
self.T = float(T)
old_nT = self.nT
old_cost = self.cost
self.nT = int(self.T/self.dt)+1
self.t = np.arange(0.,self.nT,dtype=np.float64)*self.dt
if self._cost_details['type']==0:
self.set_constant_cost(self._cost_details['details'])
elif self._cost_details['type']==1:
self.set_polynomial_cost(self._cost_details['details'])
else:
self.cost = np.zeros_like(self.t)
self.cost[:old_nT-1] = old_cost
self.cost[old_nT-1:] = old_cost[-1]
if n is not None:
n = int(n)
if n%2==0:
n+=1
if n>self.n:
change = True
self.n = n
self.g = np.linspace(0.,1.,self.n)
if change:
temp = self.xbounds_fixed_rho(set_bounds=True, return_values=True)
val0 = temp[2][0,int(0.5*self.n)]
if abs(val0)>tolerance:
if val0<0:
ub = self.rho
lb = self.rho-1e-2
else:
ub = self.rho+1e-2
lb = self.rho
xbs = self.xbounds(tolerance=tolerance, set_rho=True, set_bounds=True, root_bounds=(lb,ub))
else:
self.value = temp[0]
xbs = (temp[0],temp[1])
else:
xbs = self.belief_bound_to_x_bound()
xbs = (xbs[0],xbs[1])
return xbs
def log_odds(self):
ret = np.log(self.bounds)-np.log(1-self.bounds)
ret[1]*=-1
return ret
def confidence_mapping(self,high_confidence_threshold,confidence_map_slope,confidence_mapping_method='log_odds'):
"""
self.high_confidence_mapping(high_confidence_threshold,confidence_map_slope)
Get the high confidence mapping as a function of time.
Returns a numpy array of shape (2,self.nT)
The output[0] is the mapping for hits and output[1] is the
mapping for misses.
"""
if confidence_mapping_method=='log_odds':
return self.confidence_mapping_log_odds(high_confidence_threshold,confidence_map_slope)
elif confidence_mapping_method=='belief':
return self.confidence_mapping_belief(high_confidence_threshold,confidence_map_slope)
else:
raise ValueError('Undefined high confidence mapping method: {0}'.format(confidence_mapping_method))
def confidence_mapping_log_odds(self,high_confidence_threshold,confidence_map_slope):
"""
self.high_confidence_mapping_log_odds(high_confidence_threshold,confidence_map_slope)
Backend of self.high_confidence_mapping that implements the log_odds
mapping. Returns the same type as self.high_confidence_mapping.
"""
if self.dt>1e-3:
_dt = 1e-3
else:
_dt = None
if _dt:
_nT = int(self.T/_dt)+1
_t = np.arange(0.,_nT,dtype=np.float64)*_dt
log_odds = self.log_odds()
log_odds = np.array([np.interp(_t,self.t,log_odds[0]),np.interp(_t,self.t,log_odds[1])])
else:
_nT = self.nT
log_odds = self.log_odds()
_dt = self.dt
# Likely to raise warnings with exp overflows or invalid values in multiply
# if confidence_map_slope is inf or log_odds==high_confidence_threshold
# These issues are resolved naturally in the two-line statements
with warnings.catch_warnings():
warnings.simplefilter("ignore")
phigh = 1./(1.+np.exp(confidence_map_slope*(high_confidence_threshold-log_odds)))
phigh[high_confidence_threshold==log_odds] = 0.5
if _dt:
if _nT%self.nT==0:
ratio = int(np.round(_nT/self.nT))
else:
ratio = int(np.ceil(_nT/self.nT))
tail = _nT%ratio
if tail!=0:
padded_phigh = np.concatenate((phigh,np.nan*np.ones((2,ratio-tail),dtype=np.float)),axis=1)
else:
padded_phigh = phigh
padded_phigh = np.reshape(padded_phigh,(2,-1,ratio))
phigh = np.nanmean(padded_phigh,axis=2)
return phigh
def confidence_mapping_belief(self,high_confidence_threshold,confidence_map_slope):
"""
self.high_confidence_mapping_belief(high_confidence_threshold,confidence_map_slope)
Backend of self.high_confidence_mapping that implements the belief
mapping. Returns the same type as self.high_confidence_mapping.
"""
if self.dt>1e-3:
_dt = 1e-3
else:
_dt = None
if _dt:
_nT = int(self.T/_dt)+1
_t = np.arange(0.,_nT,dtype=np.float64)*_dt
belief = self.bounds.copy()
belief = np.array([np.interp(_t,self.t,2*(belief[0]-0.5)),np.interp(_t,self.t,2*(0.5-belief[1]))])
else:
_nT = self.nT
belief = self.bounds.copy()
belief[0] = 2*(belief[0]-0.5)
belief[1] = 2*(0.5-belief[1])
_dt = self.dt
with warnings.catch_warnings():
warnings.simplefilter("ignore")
phigh = confidence_map_slope*(belief-high_confidence_threshold)
phigh[np.isnan(phigh)] = 0.5
phigh[phigh>1] = 1
phigh[phigh<0] = 0
if _dt:
if _nT%self.nT==0:
ratio = int(np.round(_nT/self.nT))
else:
ratio = int(np.ceil(_nT/self.nT))
tail = _nT%ratio
if tail!=0:
padded_phigh = np.concatenate((phigh,np.nan*np.ones((2,ratio-tail),dtype=np.float)),axis=1)
else:
padded_phigh = phigh
padded_phigh = np.reshape(padded_phigh,(2,-1,ratio))
phigh = np.nanmean(padded_phigh,axis=2)
return phigh
def diffusion_path_samples(mu,var_rate,dt,T,xb,reps=10):
paths = []
sigma = np.sqrt(var_rate*dt)
if not isinstance(mu,np.ndarray):
mus = mu*np.ones(reps)
else:
mus = mu
nT = int(T/dt)+1
for mu in mus:
path = {'x':[0],'t':[0]}
decided = False
for t_i in np.arange(1,nT):
t = t_i*dt
stim = sigma*np.random.randn(1)+mu*dt
path['x'].append(path['x'][-1]+stim)
path['t'].append(t)
if path['x'][-1]>=xb[0][t_i+1]:
path['dec']=1
path['rt']=t
decided = True
break
elif path['x'][-1]<=xb[1][t_i+1]:
path['dec']=2
path['rt']=t
decided = True
break
if not decided:
path['dec']=None
path['rt']=None
paths.append(path)
return sorted(paths,key=lambda path:path['rt'], reverse=True)
def sim_rt(mu,var_rate,dt,T,xb,reps=10000):
if not isinstance(mu,np.ndarray):
mu = mu*np.ones(reps)
sim = np.zeros_like(mu)
rt = np.zeros_like(mu)
decision = np.zeros_like(mu)
not_decided = np.ones_like(mu,dtype=np.bool)
sigma = np.sqrt(dt*var_rate)
nT = int(T/dt)+1
for i in range(1,nT):
t = float(dt)*i
sim+= sigma*np.random.randn(*mu.shape)
stim = sim+t*mu
dec1 = np.logical_and(stim>=xb[0][i+1],not_decided)
dec2 = np.logical_and(stim<=xb[1][i+1],not_decided)
if any(dec1):
rt[dec1] = t
decision[dec1] = 1
not_decided[dec1] = 0
if any(dec2):
rt[dec2] = t
decision[dec2] = 2
not_decided[dec2] = 0
if not any(not_decided):
break
out = (rt,decision)
return out
def _test():
out = dmmodule.testsuite()
dict1,dict2,dict3,t,cx,cg1,cg2,cg3,cdg1,cdg2,cdg3,cx1,cx2,cx3 = out
dict1['T'] = dict2['T'] = dict3['T'] = 3.
d1 = DecisionModel(**dict1)
d2 = DecisionModel(**dict2)
d3 = DecisionModel(**dict3)
d1.set_internal_var(10.)
d2.set_internal_var(10.)
d3.set_internal_var(10.)
print(d1)
print(d2)
print(d3)
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
x = np.linspace(-30,30,100)
dx = x[1]-x[0]
g1 = d1.x2g(t,x)[:,None]
g2 = d2.x2g(t,x)[:,None]
g3 = d3.x2g(t,x)[:,None]
dg1 = d1.dx2g(t,x)
dg2 = d2.dx2g(t,x)
dg3 = d3.dx2g(t,x)
numdg1 = np.vstack(((g1[1,:]-g1[0,:])/dx,0.5*(g1[2:,:]-g1[:-2,:])/dx,(g1[-1,:]-g1[-2,:])/dx))
numdg2 = np.vstack(((g2[1,:]-g2[0,:])/dx,0.5*(g2[2:,:]-g2[:-2,:])/dx,(g2[-1,:]-g2[-2,:])/dx))
numdg3 = np.vstack(((g3[1,:]-g3[0,:])/dx,0.5*(g3[2:,:]-g3[:-2,:])/dx,(g3[-1,:]-g3[-2,:])/dx))
x1 = d1.g2x(t,g1[:,0])
x2 = d2.g2x(t,g2[:,0])
x3 = d3.g2x(t,g3[:,0])
plt.subplot(131)
plt.plot(x,g1,label='conj',color='b')
plt.plot(x,g2,label='discrete mu',color='g')
plt.plot(x,g3,label='discrete var',color='r')
plt.plot(cx,cg1,label='C conj',color='b',linestyle='--')
plt.plot(cx,cg2,label='C discrete mu',color='g',linestyle='--')
plt.plot(cx,cg3,label='C discrete var',color='r',linestyle='--')
plt.legend(loc='best', fancybox=True, framealpha=0.5)
plt.title('x2g')
plt.subplot(132)
plt.plot(x,dg1,label='conj',color='b')
plt.plot(x,dg2,label='discrete mu',color='g')
plt.plot(x,dg3,label='discrete var',color='r')
plt.plot(cx,cdg1,label='C conj',color='b',linestyle='--')
plt.plot(cx,cdg2,label='C discrete mu',color='g',linestyle='--')
plt.plot(cx,cdg3,label='C discrete var',color='r',linestyle='--')
plt.title('dx2g')
plt.subplot(133)
plt.plot(cx,x1-cx1,label='C conj',color='b',linestyle='-')
plt.plot(cx,x2-cx2,label='C discrete mu',color='g',linestyle='-')
plt.plot(cx,x3-cx3,label='C discrete var',color='r',linestyle='-')
plt.title('g2x')
plt.suptitle(r'x $\leftrightarrow$ Mapping')
print(t)
print('conj')
xub1,xlb1,v1,ve1,_,_ = d1.xbounds(return_values=True)
print(d1.rho)
print('discrete mu')
xub2,xlb2,v2,ve2,_,_ = d2.xbounds(return_values=True)
print(d2.rho)
print('discrete var')
xub3,xlb3,v3,ve3,_,_ = d3.xbounds(return_values=True)
print(d3.rho)
plt.figure()
plt.subplot(211)
plt.plot(d1.t,xub1,'b',label='Conj')
plt.plot(d1.t,xlb1,'b')
plt.plot(d2.t,xub2,'g',label='Discrete mu')
plt.plot(d2.t,xlb2,'g')
plt.plot(d3.t,xub3,'r',label='Discrete var')
plt.plot(d3.t,xlb3,'r')
plt.legend(loc='best', fancybox=True, framealpha=0.5)
plt.ylabel('x(t) bounds')
plt.subplot(212)
plt.plot(d1.t,d1.bounds[0],'b',label='Conj')
plt.plot(d1.t,d1.bounds[1],'b')
plt.plot(d2.t,d2.bounds[0],'g',label='Discrete mu')
plt.plot(d2.t,d2.bounds[1],'g')
plt.plot(d3.t,d3.bounds[0],'r',label='Discrete var')
plt.plot(d3.t,d3.bounds[1],'r')
plt.suptitle('Bounds')
plt.ylabel('g(t) bounds')
plt.xlabel('T')
print('Computing rt')
print('Conj')
mu = 1.
rt1 = np.array(d1.fpt(mu,bounds=(xub1,xlb1)))
print('Discrete mu')
rt2 = np.array(d2.fpt(mu,bounds=(xub2,xlb2)))
rt3 = np.zeros_like(rt1)
for model_var,prior_var_prob in zip(d3.model_var,d3.prior_var_prob):
print('Discrete var: {0}'.format(model_var))
rt3+= np.array(d3.fpt(mu,bounds=(xub1,xlb1),model_var=model_var))*prior_var_prob
plt.figure()
plt.plot(d1.t,rt1[0],'b',label='Conj hit')
plt.plot(d1.t,rt1[1],'--b',label='Conj miss')
plt.plot(d2.t,rt2[0],'g',label='Discrete mu')
plt.plot(d2.t,rt2[1],'--g')
plt.plot(d3.t,rt3[0],'r',label='Discrete var hit')
plt.plot(d3.t,rt3[1],'--r',label='Discrete var miss')
plt.ylabel('RT prob')
plt.xlabel('T')
plt.legend(loc='best', fancybox=True, framealpha=0.5)
plt.suptitle('First passage time')
plt.figure()
plt.subplot(311)
plt.imshow((v1[1:]-ve1).T,aspect='auto',cmap='jet',interpolation='none',origin='lower',extent=[d1.t[0],d1.t[-1],d1.g[0],d1.g[-1]])
plt.colorbar()
plt.subplot(312)
plt.imshow((v2[1:]-ve2).T,aspect='auto',cmap='jet',interpolation='none',origin='lower',extent=[d2.t[0],d2.t[-1],d2.g[0],d2.g[-1]])
plt.colorbar()
plt.subplot(313)
plt.imshow((v3[1:]-ve3).T,aspect='auto',cmap='jet',interpolation='none',origin='lower',extent=[d3.t[0],d3.t[-1],d3.g[0],d3.g[-1]])
plt.colorbar()
plt.suptitle(r'$\tilda{V}-V_{explore}')
print('Computing confidence mappings')
high_confidence_threshold = 0.3
confidence_map_slope = 1.7
dead_time = 0.2
dead_time_sigma = 0.4
confidence_response1 = d1.confidence_mapping(high_confidence_threshold,confidence_map_slope,confidence_mapping_method='belief')
confidence_response2 = d2.confidence_mapping(high_confidence_threshold,confidence_map_slope,confidence_mapping_method='belief')
confidence_response3 = d3.confidence_mapping(high_confidence_threshold,confidence_map_slope,confidence_mapping_method='belief')
plt.figure()
ax = plt.subplot(211)
plt.plot(d1.t,(d1.bounds.T*2-1)*np.array([1,-1]),label='conj')
plt.plot(d2.t,(d2.bounds.T*2-1)*np.array([1,-1]),label='Discrete mu')
plt.plot(d3.t,(d3.bounds.T*2-1)*np.array([1,-1]),label='Discrete var')
plt.ylabel('Normed Belief')
plt.subplot(212,sharex=ax)
plt.plot(d1.t,confidence_response1.T,label='conj')
plt.plot(d2.t,confidence_response2.T,label='Discrete mu')
plt.plot(d3.t,confidence_response3.T,label='Discrete var')
plt.ylabel('Confidence response')
plt.xlabel('T')
print('Computing first passage time confidence response partition matrix')
fpt_conf1 = d1.fpt_conf_matrix(rt1,confidence_response1,100)
fpt_conf2 = d2.fpt_conf_matrix(rt2,confidence_response2,100)
fpt_conf3 = d3.fpt_conf_matrix(rt3,confidence_response3,100)
plt.figure()
ax1 = plt.subplot(231)
plt.imshow(fpt_conf1[0].T,aspect='auto',cmap='jet',origin='lower',interpolation='none',extent=[d1.t[0],d1.t[-1],0,1])
plt.plot(d1.t,confidence_response1[0],'--k')
plt.title('Conj')
ax2 = plt.subplot(232)
plt.imshow(fpt_conf2[0].T,aspect='auto',cmap='jet',origin='lower',interpolation='none',extent=[d2.t[0],d2.t[-1],0,1])
plt.plot(d2.t,confidence_response2[0],'--k')
plt.title('Discrete mu')
ax3 = plt.subplot(233)
plt.imshow(fpt_conf3[0].T,aspect='auto',cmap='jet',origin='lower',interpolation='none',extent=[d3.t[0],d3.t[-1],0,1])
plt.plot(d3.t,confidence_response3[0],'--k')
plt.title('Discrete var')
plt.subplot(234,sharex=ax1)
plt.plot(d1.t,np.sum(fpt_conf1,axis=1).T)
plt.plot(d1.t,rt1.T,'--')
plt.ylabel('Prob')
plt.xlabel('T')
plt.subplot(235,sharex=ax2)
plt.plot(d2.t,np.sum(fpt_conf2,axis=1).T)
plt.plot(d2.t,rt2.T,'--')
plt.xlabel('T')
plt.subplot(236,sharex=ax3)
plt.plot(d3.t,np.sum(fpt_conf3,axis=1).T)
plt.plot(d3.t,rt3.T,'--')
plt.xlabel('T')
print('Computing dead time convolver and adding dead time to response time distributions')
dead_time_convolver1 = d1.get_dead_time_convolver(dead_time,dead_time_sigma)
dead_time_convolver2 = d2.get_dead_time_convolver(dead_time,dead_time_sigma)
dead_time_convolver3 = d3.get_dead_time_convolver(dead_time,dead_time_sigma)
rt_conf1 = d1.rt_confidence_pdf(rt1,confidence_response1,dead_time_convolver1,100)
rt_conf2 = d2.rt_confidence_pdf(rt2,confidence_response2,dead_time_convolver2,100)
rt_conf3 = d3.rt_confidence_pdf(rt3,confidence_response3,dead_time_convolver3,100)
t1 = np.arange(rt_conf1.shape[1])*d1.dt
t2 = np.arange(rt_conf2.shape[1])*d2.dt
t3 = np.arange(rt_conf3.shape[1])*d3.dt
plt.figure()
ax = plt.subplot(231)
plt.imshow(rt_conf1[0].T,aspect='auto',cmap='jet',origin='lower',interpolation='none',extent=[t1[0],t1[-1],0,1])
ax = plt.subplot(234)
plt.imshow(rt_conf1[1].T,aspect='auto',cmap='jet',origin='lower',interpolation='none',extent=[t1[0],t1[-1],0,1])
ax = plt.subplot(232)
plt.imshow(rt_conf2[0].T,aspect='auto',cmap='jet',origin='lower',interpolation='none',extent=[t2[0],t2[-1],0,1])
ax = plt.subplot(235)
plt.imshow(rt_conf2[1].T,aspect='auto',cmap='jet',origin='lower',interpolation='none',extent=[t2[0],t2[-1],0,1])
ax = plt.subplot(233)
plt.imshow(rt_conf3[0].T,aspect='auto',cmap='jet',origin='lower',interpolation='none',extent=[t3[0],t3[-1],0,1])
ax = plt.subplot(236)
plt.imshow(rt_conf3[1].T,aspect='auto',cmap='jet',origin='lower',interpolation='none',extent=[t3[0],t3[-1],0,1])
print('Finished test suite')
plt.show(True)
if __name__=="__main__":
_test()
|
[
"chulo1787@gmail.com"
] |
chulo1787@gmail.com
|
207c2c9724fd0274e408dc29c05b801c94a3e1be
|
262c70153995349ccb80fb3f4a6e9a85c4495d9b
|
/web/turk/catalog/migrations/0007_auto_20171122_2107.py
|
06cdf3ecb182099aa49a8867767be458600ed857
|
[] |
no_license
|
nghiank/rec_table
|
2ec43bc673a731e13daf5b69f9cce6be4b295bb6
|
d0f25716b932cc01a57fb476898b2fd297b7e227
|
refs/heads/master
| 2022-04-30T04:05:54.316739
| 2022-03-27T20:20:01
| 2022-03-27T20:20:01
| 100,076,073
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 676
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-22 21:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0006_auto_20171107_1502'),
]
operations = [
migrations.AddField(
model_name='cell',
name='file_path',
field=models.CharField(default=' ', help_text='S3 relative file path', max_length=254),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='cell',
unique_together=set([('image_sheet', 'file_path')]),
),
]
|
[
"nghiank@hotmail.com"
] |
nghiank@hotmail.com
|
bb78898b6eaa938bae587f12df5d3feac094861e
|
52a023aee8818c6b1fa8316ff0b6ab83ed00d211
|
/aircraft/aircraft.py
|
43864d8bc7bbc278a71112cfdd1efe6e5eb17f7b
|
[
"MIT"
] |
permissive
|
6110-CTO/piawaredump1090wrapper
|
9092c1aa3d237b55ec437620d328df57f774b71c
|
c7cc1570a797f12b00908e2e49eb7fd2954f3752
|
refs/heads/master
| 2023-06-21T19:50:05.363966
| 2020-04-13T21:35:46
| 2020-04-13T21:35:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,601
|
py
|
from .calculate_distance_of_aircraft import calculate_distance_of_aircraft
from .exceptions import NotofTypeFloat, IncorrectDeltaCalculation, IncorrectLngLat
class Aircraft:
"""
Airacraft class containg information regarding a single aircraft.
Some notes about arugments:
Note: maybe we can make another class or method that tracks location?
Note: hex is ICAO number. ADSBexchange gives it the name hex.
Note: lat and lng have differing names in class verus API
Note: gs is ground speed.
Note: alt_geom is altitude
icaco_number is used to identify the object when created
"""
def __init__(self, hex,
flight=None,
squawk=None,
lat=None,
lon=None,
alt_geom=None,
geom_rate=None,
track=None,
gs=None,
seen=None,
*args,
**kwargs,):
self.icao_number = hex
self.flight = flight
self.squawk = squawk
self.lat = lat
self.lng = lon
self.altitude = alt_geom
self.ver_rate = geom_rate
self.track = track
self.speed = gs
self.seen = seen
self.is_within_distance = None # boolean if within a certain distance
self.distance_specific_point = None # value from that distance
def __repr__(self):
"""
:Returns: Flight number if available or ICACO number
"""
if self.flight is not None:
return f'{self.flight}'
else:
return f'{self.icao_number}'
def __str__(self):
"""
:Returns: Flight number if available or ICACO number
"""
if self.flight is not None:
return f'{self.flight}'
else:
return f'{self.icao_number}'
def __eq__(self, other):
"""
Compares the two icao numbers and returns. ICAO is the identifies for a unique
aircarft
"""
return self.icao_number == other.icao_number
def __key(self):
"""
Key for __hash__
"""
return (self.icao_number)
def __hash__(self):
"""
Hashes based on __key, which is the icao number
"""
hash_val = hash(self.__key())
print(hash_val)
return hash_val
def update_info(self, **kwargs) -> bool:
"""
Updates attirubutes of the airplane.
:param:
**kwargs: Dictionary of values to update
:return: Boolean value if update was sucessful or not
"""
#: To-do: Check if an attributes exists before adding it.
if not kwargs:
return False
for key, value in kwargs.items():
setattr(self, key, value)
return True
def get_aircraft_within_specific_distance(self, distance: float, lat: float, lng: float) -> float:
"""
Returns the aircrafts distance from a cente
:params:
distance: The radius to be measured from the central point in lng and lat
lng: Longitude of location
lat: Latitude of location
"""
_dist = None
try:
_dist = calculate_distance_of_aircraft(lat, lng, self.lat, self.lng)
self.distance_specific_point = _dist
except NotofTypeFloat as err:
self.distance_specific_point = None
except IncorrectLngLat as err:
self.distance_specific_point = None
except IncorrectDeltaCalculation as err:
self.distance_specific_point = None
if self.distance_specific_point and _dist <= distance:
self.is_within_distance = True
else:
self.is_within_distance = False
return _dist
def get_aircraft_info(self) -> tuple:
"""
Returns a tuple of all parameters of the aircraft. Some may be "None"
if they did not get populated during init. Usually due to lack of
information from the ADSB scanner.
:param: None
:return: Tuple of all characteristics of the aircraft
"""
return (
f'ICAO#: {self.icao_number}, \
Flight#: {self.flight}, \
SQUAWK: {self.squawk}, \
LAT: {self.lat}, \
LNG: {self.lng}, \
ALT: {self.altitude}ft, \
VERT_RATE: {self.ver_rate}, \
TRACK: {self.track}, \
SPEED: {self.speed}kt, \
SEEN: {self.seen}, \
')
|
[
"kc8@users.noreply.github.com"
] |
kc8@users.noreply.github.com
|
c709c9a204f4ac884ebdf50acfacc83a34d4793a
|
f1878806a8787d607705158906a830b06f7cdcf8
|
/tests/utils/tensorflow/test_data_generator.py
|
7f92f97cd90ce2dadde295e564267079afde9480
|
[
"MIT",
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
2boloto/rasa
|
58c1511c2bbc98e8b15900bd011a6f4d0dcf1d81
|
6a73b2e799b495411af46230dbc02f5edec3741c
|
refs/heads/main
| 2023-04-11T03:42:24.370663
| 2021-04-29T15:55:40
| 2021-04-29T15:55:40
| 362,908,820
| 1
| 1
|
Apache-2.0
| 2021-04-29T18:19:04
| 2021-04-29T18:19:03
| null |
UTF-8
|
Python
| false
| false
| 6,496
|
py
|
import pytest
import scipy.sparse
import numpy as np
from rasa.utils.tensorflow.model_data import FeatureArray, RasaModelData
from rasa.utils.tensorflow.data_generator import (
RasaDataGenerator,
RasaBatchDataGenerator,
)
def test_data_generator_with_increasing_batch_size(model_data: RasaModelData):
epochs = 2
data_generator = RasaBatchDataGenerator(
model_data,
batch_size=[1, 2],
epochs=epochs,
batch_strategy="balanced",
shuffle=True,
)
expected_batch_sizes = [[1, 1, 1, 1, 1], [2, 2, 1]]
for _epoch in range(epochs):
iterator = iter(data_generator)
assert len(data_generator) == len(expected_batch_sizes[_epoch])
for i in range(len(data_generator)):
batch, _ = next(iterator)
assert len(batch) == 11
assert len(batch[0]) == expected_batch_sizes[_epoch][i]
with pytest.raises(StopIteration):
next(iterator)
data_generator.on_epoch_end()
def test_data_generator_with_fixed_batch_size(model_data: RasaModelData):
data_generator = RasaBatchDataGenerator(
model_data, batch_size=2, epochs=1, batch_strategy="balanced", shuffle=True
)
expected_batch_sizes = [2, 2, 1]
iterator = iter(data_generator)
assert len(data_generator) == len(expected_batch_sizes)
for i in range(len(data_generator)):
batch, _ = next(iterator)
assert len(batch) == 11
assert len(batch[0]) == expected_batch_sizes[i]
with pytest.raises(StopIteration):
next(iterator)
@pytest.mark.parametrize(
"incoming_data, expected_shape",
[
(FeatureArray(np.random.rand(7, 12), number_of_dimensions=2), (7, 12)),
(FeatureArray(np.random.rand(7), number_of_dimensions=1), (7,)),
(
FeatureArray(
np.array(
[
np.random.rand(1, 10),
np.random.rand(3, 10),
np.random.rand(7, 10),
np.random.rand(1, 10),
]
),
number_of_dimensions=3,
),
(4, 7, 10),
),
(
FeatureArray(
np.array(
[
np.array(
[
np.random.rand(1, 10),
np.random.rand(5, 10),
np.random.rand(7, 10),
]
),
np.array(
[
np.random.rand(1, 10),
np.random.rand(3, 10),
np.random.rand(3, 10),
np.random.rand(7, 10),
]
),
np.array([np.random.rand(2, 10)]),
]
),
number_of_dimensions=4,
),
(8, 7, 10),
),
],
)
def test_pad_dense_data(incoming_data: FeatureArray, expected_shape: np.ndarray):
padded_data = RasaDataGenerator._pad_dense_data(incoming_data)
assert padded_data.shape == expected_shape
@pytest.mark.parametrize(
"incoming_data, expected_shape",
[
(
FeatureArray(
np.array([scipy.sparse.csr_matrix(np.random.randint(5, size=(7, 12)))]),
number_of_dimensions=3,
),
[1, 7, 12],
),
(
FeatureArray(
np.array([scipy.sparse.csr_matrix(np.random.randint(5, size=(7,)))]),
number_of_dimensions=2,
),
[1, 1, 7],
),
(
FeatureArray(
np.array(
[
scipy.sparse.csr_matrix(np.random.randint(10, size=(1, 10))),
scipy.sparse.csr_matrix(np.random.randint(10, size=(3, 10))),
scipy.sparse.csr_matrix(np.random.randint(10, size=(7, 10))),
scipy.sparse.csr_matrix(np.random.randint(10, size=(1, 10))),
]
),
number_of_dimensions=3,
),
(4, 7, 10),
),
(
FeatureArray(
np.array(
[
np.array(
[
scipy.sparse.csr_matrix(
np.random.randint(10, size=(1, 10))
),
scipy.sparse.csr_matrix(
np.random.randint(10, size=(5, 10))
),
scipy.sparse.csr_matrix(
np.random.randint(10, size=(7, 10))
),
]
),
np.array(
[
scipy.sparse.csr_matrix(
np.random.randint(10, size=(1, 10))
),
scipy.sparse.csr_matrix(
np.random.randint(10, size=(3, 10))
),
scipy.sparse.csr_matrix(
np.random.randint(10, size=(1, 10))
),
scipy.sparse.csr_matrix(
np.random.randint(10, size=(7, 10))
),
]
),
np.array(
[
scipy.sparse.csr_matrix(
np.random.randint(10, size=(2, 10))
)
]
),
]
),
number_of_dimensions=4,
),
(8, 7, 10),
),
],
)
def test_scipy_matrix_to_values(
incoming_data: FeatureArray, expected_shape: np.ndarray
):
indices, data, shape = RasaDataGenerator._scipy_matrix_to_values(incoming_data)
assert np.all(shape == expected_shape)
|
[
"noreply@github.com"
] |
2boloto.noreply@github.com
|
41f27f6cf3509d3cdac25470365372a01d6a6123
|
ffdea80b45e9c86abde16bf77c05589b1901d64f
|
/froide_exam/__init__.py
|
e9ae091b0acbcbf56486c18daf32536a8c039176
|
[
"MIT"
] |
permissive
|
okfde/froide-exam
|
081315f061d54969ad2441c8411f7e8a8915ae81
|
1f7baf4c2e72a623e17962d111efcd9dbf3ad875
|
refs/heads/main
| 2023-01-28T16:13:23.636740
| 2023-01-24T17:38:51
| 2023-01-24T17:38:51
| 159,523,223
| 2
| 1
|
MIT
| 2023-01-23T16:48:07
| 2018-11-28T15:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 79
|
py
|
__version__ = '0.0.1'
default_app_config = 'froide_exam.apps.FroideExamConfig'
|
[
"12673799+krmax44@users.noreply.github.com"
] |
12673799+krmax44@users.noreply.github.com
|
f589c68eff5b987ab19d8b991f42b49334791897
|
3a913e87c7301b43c995482246394efac042db26
|
/venv/bin/pip3.7
|
0ad8aedd5f40a6cbf006c1c23103341bdc0b77bd
|
[] |
no_license
|
ananthgoyal/Raga_Indentification
|
bdf1119f3672cb0ccc6707ff5a31e387b5aa9449
|
40276340be12a1b62e2fca4d4bc2ae1f7e6a0fcc
|
refs/heads/master
| 2023-02-06T11:00:48.431506
| 2020-12-28T21:32:50
| 2020-12-28T21:32:50
| 324,671,425
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 426
|
7
|
#!/Users/ananthgoyal/PycharmProjects/Raga_Indentification/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
|
[
"ananthgoyal@gmail.com"
] |
ananthgoyal@gmail.com
|
8e532955036b1f55a5d628d121dbb98e2d470b75
|
b0ad03dc1ad141ef9d669758e6f295787b71a721
|
/api/server.py
|
c87aee62570101ef9d27226fda49a110d0c0b796
|
[] |
no_license
|
AlecAivazis/recipeBook
|
5e375fd01813a19eb15f3e74033211ba685b4b74
|
92522c5d678b85b9f03c7d8815c765dcbf30f911
|
refs/heads/master
| 2023-03-12T14:15:59.887428
| 2017-12-18T02:01:36
| 2017-12-18T02:01:36
| 52,736,616
| 0
| 4
| null | 2017-12-18T02:01:37
| 2016-02-28T18:33:32
|
Python
|
UTF-8
|
Python
| false
| false
| 298
|
py
|
# external imports
import nautilus
# create a nautilus service with just the schema
class RecipeBookAPI(nautilus.APIGateway):
@nautilus.auth_criteria('recipe')
async def auth_recipe(self, model, user_id):
# only recipes with id 2 are visible
return await model._has_id(2)
|
[
"alec@aivazis.com"
] |
alec@aivazis.com
|
955c655db3b4b204697aa1b3c2295968e255b9a7
|
7d0c6df6480402642a327b1e6bca353047cea444
|
/app/.~c9_invoke_NNKGkQ.py
|
550f3e20837a23b76f47741511c1cb799a97820a
|
[] |
permissive
|
HaywardPeirce/battlesnake
|
b7ab99016ea9d624e71c1969cfb1e56d0e9bcf5c
|
759c399c60a1f292a621dc276cb1a02f164e1134
|
refs/heads/master
| 2020-04-21T04:37:02.342034
| 2019-02-13T05:28:37
| 2019-02-13T05:28:37
| 169,317,516
| 1
| 0
|
MIT
| 2019-02-13T05:28:38
| 2019-02-05T21:40:48
|
Python
|
UTF-8
|
Python
| false
| false
| 16,102
|
py
|
from battlesnakeClasses import *
import random, math
#import each of the component files running sections of the snake
#check if squatchy will hit himself
def squatchyHitCheck(squatchy, score):
#get current squatchy head location
print("------------------------------------------------")
print("Checking whether squatchy will hit himself")
print("squatchy head: [{}]".format(squatchy.head()))
#see if squatchy's head will hit his body
tempDirection = squatchy.conllisionCheck(squatchy.head(), score)
#print(tempDirection)
tempDirection.printMoves()
return tempDirection
#get list of squatchy non-head locations
#bodyLocations =
#check if squatchy will run into a wall
def wallHitCheck(squatchy, height, width, freespaceScore):
print("------------------------------------------------")
print("Checking whether squatchy will hit the wall")
print("squatchy head: [{}]".format(squatchy.head()))
#how wide is the board?
#freespaceScore = 1
tempDirection = MoveChoices()
#if squatchy isnt right up againts to left side of the board
if squatchy.head()[0] > 0:
tempDirection.left = freespaceScore
#if squatchy isn't right up againt the right side (assuming board locations are indexed starting at 0)
if squatchy.head()[0] < (width - 1):
tempDirection.right = freespaceScore
if squatchy.head()[1] > 0:
tempDirection.up = freespaceScore
if squatchy.head()[1] < (height - 1):
tempDirection.down = freespaceScore
tempDirection.printMoves()
return tempDirection
def enemyHitCheck(squatchy, enemies, score):
print("------------------------------------------------")
print("Check whether squatchy will hit any other snakes")
#loop through each enemy, `conllisionCheck` for our snake head
print("squatchy head: [{}]".format(squatchy.head()))
#value to use when weighting enemy detection
#score = 1
#set initial enemy direction to "safe" as hit detection will set unsafe options to 0
enemyDirections = MoveChoices(score, score, score, score)
#loop through each enemy snake
for enemy in enemies:
print("------------------------")
print("Checking whether squatchy will hit '{}'".format(enemy.name))
#see if squatchy's head will collide with any part of this enemy snake
tempDirection = enemy.conllisionCheck(squatchy.head(), score)
tempDirection.printMoves()
enemyDirections.boolDownMoves(tempDirection)
#if the scores for the hit checks againts this snake turn up new collisons, count out those moves. (less than means a new dangerous move)
# if tempDirection.up < enemyDirections.up:
# enemyDirections.up = tempDirection.up
# if tempDirection.down < enemyDirections.down:
# enemyDirections.down = tempDirection.down
# if tempDirection.right < enemyDirections.right:
# enemyDirections.right = tempDirection.right
# if tempDirection.left < enemyDirections.left:
# enemyDirections.left = tempDirection.left
enemyDirections.printMoves("After `enemyDirections` for {}".format(enemy.name))
return enemyDirections
def moveToSameCheck(squatchy, enemies, score):
print("------------------------------------------------")
print("Check whether squatchy might be moving to the same square as another snake")
enemyDirections = MoveChoices(score, score, score, score)
directions = [(0,-1),(1,0),(1,1),(-1,0)]
#TODO:
squatchyNextTurn = squatchy.possibleMoves()
print("squatchy head: [{}]".format(squatchy.head()))
#loop through each enemy
for enemy in enemies:
print("------------------------")
print("Checking whether squatchy might move into '{}'".format(enemy.name))
#print("{} head: [{}]".format(enemy.name, enemy.head()))
enemyNextTurn = enemy.possibleMoves()
#loop through the enemies possible moves, and check if it would collide with where squatchy would be
for position in enemyNextTurn:
tempDirection = conllisionCheck(position, squatchy.head(), score)
#tempDirection.printMoves()
enemyDirections.boolDownMoves(tempDirection)
#if the scores for the hit checks againts this snake turn up new collisons, count out those moves. (less than means a new dangerous move)
# if tempDirection.up < enemyDirections.up:
# enemyDirections.up = tempDirection.up
# if tempDirection.down < enemyDirections.down:
# enemyDirections.down = tempDirection.down
# if tempDirection.right < enemyDirections.right:
# enemyDirections.right = tempDirection.right
# if tempDirection.left < enemyDirections.left:
# enemyDirections.left = tempDirection.left
enemyDirections.printMoves("After `enemyDirections` for {}".format(enemy.name))
return enemyDirections
def foodCheck(squatchy, height, width, food, score):
print("------------------------------------------------")
print("Check whether squatchy should head towards any food")
foodDirections = MoveChoices()
bestDist = max(height, width)
bestFood = []
#calculate directions to closest food. Maybe see of we are the closest as well?
for nibble in food:
#tempDistance = math.sqrt( ((squatchy.head()[0]-nibble[0])**2)+((squatchy.head()[1]-nibble[1])**2) )
#just sum each component of the distance rather than finding the diagonal route
tempDistance = abs(squatchy.head()[0]-nibble[0]) + abs(squatchy.head()[1]-nibble[1])
if tempDistance < bestDist:
bestFood = [tuple((nibble[0],nibble[1]))]
bestDist = tempDistance
elif tempDistance == bestDist:
bestFood.append(tuple((nibble[0],nibble[1])))
#print(squatchy.health, score)
print("The best food items are {}, and are {} spaces away.".format(bestFood, bestDist))
#TODO: work out scaling/weighting factor based on how hungry squatchy is. Maybe look at using the distance to the food
if squatchy.health > 30:
score = score/2
#elif squatchy.health < 20 and bestDist > 10:
elif squatchy.health < 20:
score = score*3
print("The weighted food score is now {}".format(score))
#loop through the food items that are closest
for item in bestFood:
tempDirection = squatchy.directionCheck(item, score)
#tempDirection.printMoves("after best direction")
#take all the directions which lead to the closest foods, but don't go above the value of `score`
if tempDirection.up > foodDirections.up:
foodDirections.up = tempDirection.up
if tempDirection.down > foodDirections.down:
foodDirections.down = tempDirection.down
if tempDirection.right > foodDirections.right:
foodDirections.right = tempDirection.right
if tempDirection.left > foodDirections.left:
foodDirections.left = tempDirection.left
foodDirections.printMoves("The recommendation for `foodCheck` is ")
return foodDirections
#return the direction of the quadant with the least other snakes in it
def findOpenSpace(squatchy, enemies, gameBoard, score):
print("------------------------------------------------")
print("Check which directions lead towards open space")
tempDirection = MoveChoices(score, score, score, score)
#q1, q2, q3, q4 = 0, 0, 0, 0
#set the x and y values that the higher-numbered quadrants will start on
#tempDirection.addMoves(gameBoard.wayToMin(squatchy.head(), enemies))
tempDirection.boolDownMoves(gameBoard.wayToMin(squatchy.head(), enemies, 100))
#TODO: check which quadrant has the least snake parts, move there. If there are two equally empty ones?
#find min value
#find which quadrants have this least value
#find squatchy quadrants
#return directionToQuadrant()
tempDirection.printMoves("After `findOpenSpace`(inside): ")
return tempDirection
#return a string of which way squatchy should move
def turn(turnData, gameBoard, squatchy, enemies):
#TODO: setup snakes based on the their initial positions
#print('hello world')
#print(turnData)
# myLocation = []
# for point in turnData['you']['body']['data']:
# myLocation.append(tuple((point['x'],point['y'])))
#
# squatchy.locations = myLocation
# squatchy.health = turnData['you']['health']
# squatchy.length = turnData['you']['length']
#update positions, length, and health of each of the opponent snakes. Maybe check if it's turn 1 to initilize (name, ID...) the snakes, otherwise just update the values
#first turn, setup opponent snakes
#if turnData['turn'] == 0:
#TODO: setup initialization info about each snake
#add food points into game boad declaration
gameBoard.food = []
gameBoard.addFood(turnData['food']['data'])
enemies = []
for snake in turnData['snakes']['data']:
tempSnake = Snake(snake['id'], snake['name'])
tempLocations = []
for location in snake['body']['data']:
tempLocations.append(tuple((location['x'],location['y'])))
tempSnake.locations = tempLocations
tempSnake.length = snake['length']
tempSnake.health = snake['health']
if snake['id'] == turnData['you']['id']:
print("Adding data in for snake: {} (squatchy)".format(snake['name']))
squatchy = tempSnake
squatchy.isUs = True
else:
print("Adding data in for snake: {}".format(snake['name']))
enemies.append(tempSnake)
#non-first turn snake updates: positions, length, and health
# else:
# #TODO: second turn+ snake info updates
#
# for snake in turnData['snakes']['data']:
#
# for enemy in enemies:
# #print(enemy.id)
# #print(enemy.name)
#
# if enemy.id == snake['id']:
# print(snake['id'])
# print(snake['name'])
#
# print(enemy.locations)
#
# tempLocations = []
# for location in snake['body']['data']:
# tempLocations.append(tuple((location['x'],location['y'])))
#
# enemy.locations = tempLocations
#
# enemy.length = snake['length']
#
# enemy.health = snake['health']
#initialize the security score for the turn as being zero
securityScore = MoveChoices(100,100,100,100)
strategyScore = MoveChoices()
#list of moves which are allowed from a security standpoint, and are optimal from a strategy standpoint
finalDirectionList = []
#securityScore checks, to make sure this next turn's move is safe
#check to see we wont hit ourself
#securityScore.addMoves(squatchyHitCheck(squatchy, 100))
securityScore.boolDownMoves(squatchyHitCheck(squatchy, 100))
securityScore.printMoves("After `squatchyHitCheck`: ")
#check to see we won't hit a wall
#securityScore.addMoves(wallHitCheck(squatchy, gameBoard.height, gameBoard.width, 100))
securityScore.boolDownMoves(wallHitCheck(squatchy, gameBoard.height, gameBoard.width, 100))
securityScore.printMoves("After `wallHitCheck`: ")
#check to see we won't hit another snake
#securityScore.addMoves(enemyHitCheck(squatchy, enemies, 100))
securityScore.boolDownMoves(enemyHitCheck(squatchy, enemies, 100))
securityScore.printMoves("After `enemyHitCheck`: ")
#TODO: check to see if another snake might move into the same spot as us next turns
securityScore.boolDownMoves(moveToSameCheck(squatchy, enemies, 100))
securityScore.printMoves("After `moveToSameCheck`: ")
#if the `securityScore.bestDirection` is a list, move on to the strategyScore direction
#returns a list of directions that should be moved in
securityScoreDirections = securityScore.bestDirection()
if len(securityScoreDirections) == 1:
print("------------------------------------------------")
print("The only safe direction to move is: {}".format(securityScoreDirections))
#return the only element of the list of available moves
return securityScoreDirections[0]
#else, if the `securityScore.bestDirection` is only one direction, return that
elif len(securityScoreDirections) > 1:
print("------------------------------------------------")
print("There is more than one safe choice({}), moving on to use strategy choices".format(securityScoreDirections))
#strategyScore calculations
#TODO: check to see if another snake might move into the same spot as us next turns
# strategyScore.addMoves(moveToSameCheck(squatchy, enemies, 100))
# strategyScore.printMoves("After `moveToSameCheck`: ")
#TODO: check to see if the snake will have moved away from a spot next turn. maybe have this and `moveToSameCheck` in security check?
#TODO: check if they are moving into an enclosed space, don't do that (flood fill?)
#TODO: check to see if we an eat next turn
print("------------------------------------------------")
#print(gameBoard.food)
if gameBoard.food:
if squatchy.health < 50:
strategyScore.addMoves(foodCheck(squatchy, gameBoard.height, gameBoard.width, gameBoard.food, 100))
strategyScore.printMoves("After `foodCheck`: ")
else: print("Squatchy's health is above 50, so no food moves will be recommended")
else: print("There is no food on the board")
# simplest stategy play is moving to the empty quadrant
strategyScore.addMoves(findOpenSpace(squatchy, enemies, gameBoard, 100))
print("------------------------------------------------")
strategyScore.printMoves("After `findOpenSpace`: ")
#TODO: advanced strategy move checks
#reconcile security and strategy scores
print("------------------------------------------------")
print("strategyScore bestDirection:")
strategyScoreDirections = strategyScore.bestDirection()
#loop through each of the good security moves, and better strategy moves
for secureDirection in securityScoreDirections:
for stratDirection in strategyScoreDirections:
#if the move in strategy is also safe, add it to the final list of moves that could be made
if secureDirection == stratDirection: finalDirectionList.append(secureDirection)
else:
print("------------------------------------------------")
print("No safe move available, returning totally random direction")
#fallback, return a random direction
returnValue = random.choice("up", "right", "down", "left")
print("Returning totally random direction: {}".format(returnValue))
return returnValue
print("------------------------------------------------")
print("finalDirectionList: {}".format(finalDirectionList))
#if there is only one "final" approved move, return that
if len(finalDirectionList) == 1:
return finalDirectionList[0]
#if there are multiple options for safe and strategic moves, return a random one
elif len(finalDirectionList) > 1:
returnValue = random.choice(finalDirectionList)
print("Returning random move from multiple finalDirectionList options: {}".format(returnValue))
return returnValue
else:
print("------------------------------------------------")
print("couldn't work out a strategic move to make, falling back to random safe move")
#fallback, return a random direction
returnValue = random.choice(securityScoreDirections)
print("Returning random safe move: {}".format(returnValue))
return returnValue
|
[
"noreply@github.com"
] |
HaywardPeirce.noreply@github.com
|
47d46c7d33dabb7020a5068dbdaaf4be937aceac
|
fb9371054b6ce236c6b202cc51c6bc98a3483060
|
/Python_code/温度转换.py
|
140faf39bf17196b0dd75cfc0e3cf9b6cc6990b5
|
[] |
no_license
|
skymoonfp/python_learning
|
621d5e72c5b356fd507e4a00c463ea8d565588fb
|
1e8340303809d8c7c3af3201084b158c1784f22e
|
refs/heads/master
| 2020-06-04T10:07:27.009212
| 2019-06-19T13:52:44
| 2019-06-19T13:52:44
| 191,978,868
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
# TempConvert.py
for i in range(3):
val = input("请输入带温度表示符号的温度值(例如:32C):")
if val[-1] in ['C', 'c']:
f = 1.8 * float(val[0:-1]) + 32
print("转换后的温度为:%.2fF" % f)
elif val[-1] in ['F', 'f']:
c = (float(val[0:-1]) - 32) / 1.8
print("转换后的温度为:%.2fC" % c)
else:
print("输入有误")
print('\n')
input("请按回车键退出!")
|
[
"954918@qq.com"
] |
954918@qq.com
|
abf2922e39479afdfe5a6e1d45f967bfc3638731
|
aec4af85df25a0ddda5d67fdf29299e0831a6233
|
/controller/auth_decorator.py
|
4c0e3d112e80133110c73144092d07ae13de45b1
|
[] |
no_license
|
charliebrown314/StartupCentral
|
9068875cb0c1288b2ed8b05ef153051316115032
|
d5c22be1d565a3b5bcfe2a8993541b589d92da61
|
refs/heads/master
| 2023-01-02T21:15:09.520376
| 2020-10-25T13:57:47
| 2020-10-25T13:57:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 869
|
py
|
from flask import session
from functools import wraps
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
user = dict(session).get('profile', None)
# You would add a check here and usethe user id or something to fetch
# the other data for that user/check if they exist
if user:
return f(*args, **kwargs)
return 'You need to login to view this page'
return decorated_function
def requires_access_level(access_level):
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
user = dict(session).get('profile', None)
if user and user["access_level"] >= access_level:
return f(*args, **kwargs)
return 'You need higher permissions to view this'
return decorated_function
return decorator
|
[
"jmacias@buffalo.edu"
] |
jmacias@buffalo.edu
|
c40701fb8d96eab1d4aaa9bee3007096c7172f85
|
7310939920afe496c04b955f0f27635ea7d5336d
|
/lists/migrations/0003_list.py
|
1ec3b29a4b30595a91697a574afba98e9e9c41fd
|
[] |
no_license
|
liuxscn/superlists_dev
|
d0948c33b9652f0694a6934de45de4d2c5c8b4c1
|
dd58546751f82e83fe363f0c4e8a80d37423371e
|
refs/heads/master
| 2021-06-19T11:04:02.701819
| 2019-06-26T09:19:59
| 2019-06-26T09:19:59
| 186,582,675
| 0
| 0
| null | 2021-06-10T21:28:07
| 2019-05-14T08:48:33
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 507
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2019-05-06 11:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lists', '0002_item_text'),
]
operations = [
migrations.CreateModel(
name='List',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
|
[
"emlliuxiang@126.com"
] |
emlliuxiang@126.com
|
59e7790c6c4ef66d796b0e1c45ec5b6d63f65425
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_055/ch164_2020_06_15_19_30_32_293205.py
|
ac84822dea86b620649440fd541237100d42a2d1
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
def traduz(words, eng2port):
palavras = []
for word in words:
for ing, port in eng2port.items():
if word == ing:
palavras.append(port)
return palavras
|
[
"you@example.com"
] |
you@example.com
|
e2434f40a00d22e0c2f82535ba496a02aa1cc1a5
|
714537c7579a79dcd1a3337de12d183e7bbdb103
|
/data_operation/augmentation/mosaic_instance_coco.py
|
170079e32533f8ea9232a5035733f3c1928b8fee
|
[
"MIT"
] |
permissive
|
lyw615/code_accumulation
|
5cfba3260bcd53a191886f57770c5fcaa34dad0b
|
5b2a5c039a725def37e19f226906ed1087880118
|
refs/heads/main
| 2023-09-06T03:58:12.907777
| 2021-11-16T04:07:52
| 2021-11-16T04:07:52
| 366,018,841
| 3
| 0
|
MIT
| 2021-11-16T04:07:53
| 2021-05-10T11:26:53
|
Python
|
UTF-8
|
Python
| false
| false
| 28,635
|
py
|
import sys,os
file_path = os.path.abspath(__file__)
sys.path.append(os.path.abspath(os.path.join(file_path, "..", "..","..", "..")))
from code_aculat.visualize.visual_base import draw_multi_bboxes
import numpy as np
import json,copy
import cv2 as cv
from PIL import Image,ImageDraw
import random
import matplotlib.pyplot as plt
from albumentations import Resize,HorizontalFlip,VerticalFlip,ChannelShuffle
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
from pycocotools.coco import COCO
def rand(a=0, b=1):
# 总区间的比例再加上区间下限,生成随机数
return np.random.rand() * (b - a) + a
def merge_bboxes(bboxes, cutx, cuty, h, w, save_proportion):
"""
对于合成到新图像中的bbox,单个部分超出cutx,y以及新图片宽高的进行删除或者修改,保留不低于原来面积指定比例的bbox
"""
merge_bbox = []
for i in range(len(bboxes)):
for box in bboxes[i]:
tmp_box = []
x1, y1, x2, y2 = box[0], box[1], box[2], box[3]
raw_x1, raw_y1, raw_x2, raw_y2 = box[0], box[1], box[2], box[3]
clipped = False
if i == 0:
# 合成后的图被cutx,cuty分成四个部分,单个图超出界限的将被覆盖,这里y1,x1就是判断是否超出该部分界限,以下i==之后的第一个判断都是这个意图
if y1 > cuty or x1 > cutx:
continue
# ***处理x和y方向上,跨越界限的y2或者x2,如果这个bbox这样处理之后在x,y任一方向上小于5个像素,那就舍弃改bbox,而我也是在这里判断保留比例
if y2 >= cuty and y1 <= cuty:
clipped = True
y2 = cuty
if x2 >= cutx and x1 <= cutx:
clipped = True
x2 = cutx
if clipped:
cut_area = (x2 - x1) * (y2 - y1)
raw_area = (raw_x2 - raw_x1) * (raw_y2 - raw_y1)
if not (cut_area / raw_area >= save_proportion):
continue
elif i == 1:
if y2 < cuty or x1 > cutx or y1 > h:
continue
if y2 >= cuty and y1 <= cuty:
clipped = True
y1 = cuty
if x2 >= cutx and x1 <= cutx:
clipped = True
x2 = cutx
if clipped:
cut_area = (x2 - x1) * (y2 - y1)
raw_area = (raw_x2 - raw_x1) * (raw_y2 - raw_y1)
if not (cut_area / raw_area >= save_proportion):
continue
elif i == 2:
if y2 < cuty or x2 < cutx or x1 > w or y1 > h:
continue
if y2 >= cuty and y1 <= cuty:
clipped = True
y1 = cuty
if x2 >= cutx and x1 <= cutx:
clipped = True
x1 = cutx
if clipped:
cut_area = (x2 - x1) * (y2 - y1)
raw_area = (raw_x2 - raw_x1) * (raw_y2 - raw_y1)
if not (cut_area / raw_area >= save_proportion):
continue
elif i == 3:
if y1 > cuty or x2 < cutx or x1 > w:
continue
if y2 >= cuty and y1 <= cuty:
clipped = True
y2 = cuty
if x2 >= cutx and x1 <= cutx:
clipped = True
x1 = cutx
if clipped:
cut_area = (x2 - x1) * (y2 - y1)
raw_area = (raw_x2 - raw_x1) * (raw_y2 - raw_y1)
if not (cut_area / raw_area >= save_proportion):
continue
tmp_box.append(x1)
tmp_box.append(y1)
tmp_box.append(x2)
tmp_box.append(y2)
tmp_box.append(box[-1]) # bbox有5列,最后一列是类别编号
merge_bbox.append(tmp_box)
return merge_bbox
def show_two_image(image1, image2, title=None):
# 同时可视化两个RGB或者mask
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(10, 10))
ax1 = plt.subplot(1, 2, 1)
ax2 = plt.subplot(1, 2, 2)
plt.sca(ax1)
plt.imshow(image1)
plt.sca(ax2)
plt.imshow(image2)
if title:
plt.title(title)
plt.show()
def get_rgb(v):
"""
获取RGB颜色
:param v: 十六进制颜色码
:return: RGB颜色值
"""
r, g, b = v[1:3], v[3:5], v[5:7]
return int(r, 16), int(g, 16), int(b, 16)
# def get_resize_scale(index,):
def get_mosaic_coco(save_proportion=0.9,
hue=.1, sat=1.5, val=1.5, proc_img=True, color_p=(3, 6), color_aug=False):
json_path=r"G:\hrsc\annotations\train.json"
image_dir=r"G:\hrsc\images\val"
coco = COCO(json_path)
imgIds=coco.getImgIds()
catIds=coco.getCatIds()
seleted_imgids = np.random.choice(imgIds, 4)
min_offset_x = 0.4
min_offset_y = 0.4
scale_low = 1 - min(min_offset_x, min_offset_y)
scale_high = scale_low + 0.2
image_datas = []
mask_datas = []
bbox_datas = []
index = 0
# place_x,y组合起来就是这张图复制到新图上,所在的左上角坐标
# images=jf['images']
# imgid2img_info={}
# imgid2annos={}
# for anno in jf['annotations']:
# if anno['image_id'] not in imgid2annos.keys():
# imgid2annos[anno['image_id']]=[]
#
# imgid2annos[anno['image_id']].append(anno)
#
# for img in jf['images']:
# imgid2img_info[img['id']]=img
h,w=0,0
for imid in seleted_imgids: #选中图片里最大的作为最终合成图片的size
img=coco.loadImgs(int(imid))
if not img:
break
img=img[0]
h=max(img['height'],h)
w=max(img['width'],w)
place_x = [0, 0, int(w * min_offset_x), int(w * min_offset_x)]
place_y = [0, int(h * min_offset_y), int(h * min_offset_y), 0]
#处理单张数据
for imid in seleted_imgids:
#获取该图上的mask
img=coco.loadImgs(int(imid))
if not img:
break
img=img[0]
annIds=coco.getAnnIds(imgIds=img['id'],catIds=catIds,iscrowd=None)
annos=coco.loadAnns(annIds)
# 获取该图上的bbox和class_id
cat_ids_img=[]
bboxes_img=[]
if len(annos)>0:
mask_raw=coco.annToMask(annos[0])*annos[0]['category_id']
cat_ids_img.append(annos[0]['category_id'])
bboxes_img.append(annos[0]['bbox'])
for i in range(len(annos)-1):
mask_raw+=coco.annToMask(annos[i+1])*annos[i+1]['category_id']
cat_ids_img.append(annos[i+1]['category_id'])
bboxes_img.append(annos[i+1]['bbox'])
img_origin_path=os.path.join(image_dir,img['file_name'])
# 打开图片
image = Image.open(img_origin_path)
image = image.convert("RGB")
# 打开图片的大小
iw, ih = image.size
if len(bboxes_img) < 1:
"没有标签,那后续的操作无法进行"
break
bbox = np.array(bboxes_img)
bbox[:,2]+=bbox[:,0]
bbox[:,3]+=bbox[:,1]
# shw_img = image
# draw_multi_bboxes(shw_img, bbox)
# drawed_img = np.array(shw_img, dtype=np.uint8)
# show_two_image(drawed_img, drawed_img)
bbox = np.insert(bbox, 4, cat_ids_img, axis=1)
# image.save(str(index)+".jpg")
# 是否翻转图片
flip = rand() < .5
if flip and len(bbox) > 0:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
mask_raw=cv.flip(mask_raw, flipCode=1)
bbox[:, [0, 2]] = iw - bbox[:, [2, 0]]
# 将图片进行放置,分别对应四张分割图片的左上角位置
dx = place_x[index]
dy = place_y[index]
# 缩放后的图片应该能够填充满指定区域
if index == 0:
min_x, min_y = place_x[2], place_y[2]
elif index == 1:
min_x, min_y = place_x[2], h - place_y[2]
elif index == 2:
min_x, min_y = w - place_x[2], h - place_y[2]
elif index == 3:
min_x, min_y = w - place_x[2], place_y[2]
scale = rand(scale_low, scale_high)
nw,nh=int(iw*scale),int(ih*scale)
min_scale=min(nw/min_x,nh/min_y,1)+0.001
nw,nh=int(nw/min_scale),int(nh/min_scale)
# show_two_image(image,mask_raw)
# 图片resize到指定尺寸
image = image.resize((nw, nh), Image.BICUBIC)
mask = cv.resize(mask_raw, dsize=(nw, nh), interpolation=cv.INTER_NEAREST)
# show_two_image(image,mask)
# 进行色域变换,用现成的
hue = rand(-hue, hue)
sat = rand(1, sat) if rand() < .5 else 1 / rand(1, sat)
val = rand(1, val) if rand() < .5 else 1 / rand(1, val)
x = rgb_to_hsv(np.array(image) / 255.)
x[..., 0] += hue
x[..., 0][x[..., 0] > 1] -= 1
x[..., 0][x[..., 0] < 0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x > 1] = 1
x[x < 0] = 0
image = hsv_to_rgb(x)
image = Image.fromarray((image * 255).astype(np.uint8))
# **image.show()
# w h 是设定的合成后图片的宽高
new_image = Image.new('RGB', (w, h))
new_mask = np.zeros(shape=(h,w),dtype=np.uint8)
new_mask=Image.fromarray(new_mask)
# dx, dy是左上角的坐标点,paste会有损失吗?有的,只指定了左上角,溢出的就浪费了
new_image.paste(image, (dx, dy))
mask = Image.fromarray(mask)
new_mask.paste(mask, (dx, dy))
#用mask去填new mask那点右下部分,填不满就算了,超过就取部分
# new_mask[dx:,dy:]=mask[:w-dx,:h-dy]
image_data = np.array(new_image)
if color_aug:
"在拼图中随机使用颜色增强"
apply_p = random.randint(color_p[0], color_p[1]) / 10
if random.randint(0, 10) / 10 > apply_p:
color_change = random.randint(0, 1)
if color_change == 0:
image_data = ChannelShuffle(p=1)(image=image_data)["image"]
elif color_change == 1:
image_gray = cv.cvtColor(image_data, cv.COLOR_RGB2GRAY)
image_data = cv.cvtColor(image_gray, cv.COLOR_GRAY2RGB)
mask_data = np.array(new_mask,dtype=np.uint8)
# show_two_image(image_data,mask_data)
image_data = image_data / 255
index = index + 1
bbox_data = []
# 对bbox进行重新处理
if len(bbox) > 0:
# 根据新的宽高与原来宽高的比例得到缩放后的坐标,再加上这张图片在新的大图上的左上角坐标
bbox[:, [0, 2]] = bbox[:, [0, 2]] * nw / iw + dx
bbox[:, [1, 3]] = bbox[:, [1, 3]] * nh / ih + dy
raw_bbox = copy.deepcopy(bbox)
# 小于0的都变成0,超过w,h的bbox值都变成w,h
bbox[:, 0:2][bbox[:, 0:2] < 0] = 0
##超过像素索引,255之后会从0开始,-1已经到像素的最大索引了
bbox[:, 2][bbox[:, 2] > (w - 1)] = w - 1
bbox[:, 3][bbox[:, 3] > (h - 1)] = h - 1
# 计算前后的面积比例,保留大于一定比例的标注
now_areas = (bbox[:, 2] - bbox[:, 0]) * (bbox[:, 3] - bbox[:, 1])
raw_areas = (raw_bbox[:, 2] - raw_bbox[:, 0]) * (raw_bbox[:, 3] - raw_bbox[:, 1])
save_index = np.where(now_areas / raw_areas > save_proportion)[0]
bbox = bbox[save_index]
# #可视化缩放后的bbox是否能重叠到贴上去的图
# shw_img = Image.fromarray(np.array(image_data *255,dtype=np.uint8))
# draw_multi_bboxes(shw_img, bbox)
# drawed_img = np.array(shw_img, dtype=np.uint8)
# show_two_image(drawed_img, new_mask)
# ** "用面积过滤指定类别对象"
# both_save_inds=np.array([])
# bask_inds=np.where(bbox[:,4]==bask_num)[0]
# bask_save_inds=np.where(now_areas[bask_inds]>3200)[0]
# both_save_inds=both_save_inds+bask_inds[bask_save_inds]
#
# trace_inds = np.where(bbox[:, 4] == trace_num)[0]
# trace_save_inds = np.where(now_areas[trace_inds] > 43000)[0]
# both_save_inds = both_save_inds + trace_inds[trace_save_inds]
#
# bbox=bbox[both_save_inds]
if len(bbox) > 0:
bbox_w = bbox[:, 2] - bbox[:, 0]
bbox_h = bbox[:, 3] - bbox[:, 1]
# 需要bbox的宽高都大于1的bbox才能进入下一步,and表示为与操作,两者都正确则结果正确,bbox[True,True,True...]
bbox = bbox[np.logical_and(bbox_w > 1, bbox_h > 1)]
bbox_data = np.zeros((len(bbox), 5))
bbox_data[:len(bbox)] = bbox
image_datas.append(image_data)
mask_datas.append(mask_data)
bbox_datas.append(bbox_data)
# 将图片分割,放在一起
cutx = np.random.randint(int(w * min_offset_x), int(w * (1 - min_offset_x)))
cuty = np.random.randint(int(h * min_offset_y), int(h * (1 - min_offset_y)))
new_image = np.zeros([h, w, 3])
new_image[:cuty, :cutx, :] = image_datas[0][:cuty, :cutx, :]
new_image[cuty:, :cutx, :] = image_datas[1][cuty:, :cutx, :]
new_image[cuty:, cutx:, :] = image_datas[2][cuty:, cutx:, :]
new_image[:cuty, cutx:, :] = image_datas[3][:cuty, cutx:, :]
new_image = (new_image * 255).astype(np.uint8)
# **#检验第四个顺序的图bbox对应标签
# mmask=Image.fromarray((mask_datas[3]*255).astype(np.uint8))
# for num in range(len(bbox_datas[3])):
# left, top, right, bottom = bbox_datas[3][num]
# draw = ImageDraw.Draw(mmask)
# draw.rectangle([left , top , right, bottom ], outline="green",width=2)
#
# mmask.show()
new_mask = np.zeros([h, w])
new_mask[:cuty, :cutx] = mask_datas[0][:cuty, :cutx]
new_mask[cuty:, :cutx] = mask_datas[1][cuty:, :cutx]
new_mask[cuty:, cutx:] = mask_datas[2][cuty:, cutx:]
new_mask[:cuty, cutx:] = mask_datas[3][:cuty, cutx:]
new_mask = new_mask.astype(np.uint8)
# shw_img = Image.fromarray(new_image)
# for i in range(len(bbox_datas)):
# draw_multi_bboxes(shw_img, bbox_datas[i])
# drawed_img = np.array(shw_img, dtype=np.uint8)
# show_two_image(drawed_img, new_mask)
new_bbox = merge_bboxes(bbox_datas, cutx, cuty, h, w, save_proportion=save_proportion)
# if len(new_bbox)>0:
# **"合并bbox后,再次用面积过滤指定类别对象"
# new_bbox=np.array(new_bbox)
# now_areas = (new_bbox[:, 2] - new_bbox[:, 0]) * (new_bbox[:, 3] - new_bbox[:, 1])
#
if len(new_bbox) > 0:
new_bbox = np.array(new_bbox).astype(np.int)
# [xmin:xmax,ymin:ymax]没取到xmax,ymax
new_bbox[:, 2] = new_bbox[:, 2] + 1
new_bbox[:, 3] = new_bbox[:, 3] + 1
new_bbox[:, 2][new_bbox[:, 2] > w - 1] = w - 1
new_bbox[:, 3][new_bbox[:, 3] > h - 1] = h - 1
clean_mask=np.zeros(shape=new_mask.shape,dtype=np.uint8)
masks=[]
for idx in range(len(new_bbox)):
rectangle=new_bbox[idx]
xmin, ymin, xmax, ymax,cls_num = rectangle[0], rectangle[1], rectangle[2], rectangle[3],rectangle[4]
mask = np.zeros_like(new_mask, np.uint8)
mask[ymin:ymax, xmin:xmax] = new_mask[ymin:ymax, xmin:xmax]
# 计算矩形中点像素值
mean_x = (xmin + xmax) // 2
mean_y = (ymin + ymax) // 2
end = min((mask.shape[1], round(xmax) + 1))
start = max((0, round(xmin) - 1))
flag = True
for i in range(mean_x, end):
x_ = i
y_ = mean_y
pixels = new_mask[y_, x_]
if pixels != 0: # 0 对应背景
mask = (mask == pixels).astype(np.uint8)
flag = False
break
if flag:
for i in range(mean_x, start, -1):
x_ = i
y_ = mean_y
pixels = new_mask[y_, x_]
if pixels != 0:
mask = (mask == pixels).astype(np.uint8)
break
masks.append(mask)
clean_mask+=mask*cls_num
shw_img=Image.fromarray(new_image)
draw_multi_bboxes(shw_img,new_bbox)
shw_mask = Image.fromarray(clean_mask)
draw_multi_bboxes(shw_mask, new_bbox)
drawed_img=np.array(shw_img,dtype=np.uint8)
drawed_mask=np.array(shw_mask,dtype=np.uint8)
show_two_image(drawed_img,drawed_mask)
return new_image, new_mask, new_bbox
else:
return [], [], []
for i in range(11):
get_mosaic_coco()
# def get_mosaic_data(images_dir, files, class_dict, input_shape, save_proportion=0.9,
# hue=.1, sat=1.5, val=1.5, proc_img=True, color_p=(3, 6), color_aug=False):
# '''random preprocessing for real-time data augmentation'''
# h, w = input_shape
# min_offset_x = 0.4
# min_offset_y = 0.4
# scale_low = 1 - min(min_offset_x, min_offset_y)
# scale_high = scale_low + 0.2
#
# image_datas = []
# mask_datas = []
# bbox_datas = []
# index = 0
# # place_x,y组合起来就是这张图复制到新图上,所在的左上角坐标
# place_x = [0, 0, int(w * min_offset_x), int(w * min_offset_x)]
# place_y = [0, int(h * min_offset_y), int(w * min_offset_y), 0]
# img_suffix = os.listdir(images_dir)
# img_suffix = img_suffix[-1].split(".")[-1]
# for mask_path in files:
# # 每一行进行分割,一行对应一个图片的名称和图片里的bboxes
#
# img_file = mask_file.replace("png", img_suffix)
# img_path = os.path.join(images_dir, img_file)
#
# xml_file = mask_file.replace("png", "xml")
# xml_path = os.path.join(xmls_dir, xml_file)
# # 打开图片
# image = Image.open(img_path)
# image = image.convert("RGB")
# # 打开图片的大小
# iw, ih = image.size
# mask_raw =
#
# # 解析出的bbox与要求的一致
# class_name, bbox =
# if len(bbox) < 1:
# "没有标签,那后续的操作无法进行"
# return [], [], []
# class_id = np.array([class_dict[x] for x in class_name])
# bbox = np.array(bbox)
# bbox = np.insert(bbox, 4, class_id, axis=1)
#
# # image.save(str(index)+".jpg")
# # 是否翻转图片
# flip = rand() < .5
# if flip and len(bbox) > 0:
# image = image.transpose(Image.FLIP_LEFT_RIGHT)
# cv.flip(mask_raw, flipCode=1, dst=mask_raw)
# bbox[:, [0, 2]] = iw - bbox[:, [2, 0]]
#
# # 对输入进来的图片进行缩放,缩放的比例跟合并后的图像宽高相关,与图片本身的宽高无关
# new_ar = w / h
# scale = rand(scale_low, scale_high)
# # 对长度更多的那一边先缩放,在利用宽高比例来得到短边
# if new_ar < 1:
# nh = int(scale * h)
# # 保持宽高比例,用缩放后的高求宽
# nw = int(nh * new_ar)
# else:
# nw = int(scale * w)
# nh = int(nw / new_ar)
# # 图片resize到指定尺寸
# image = image.resize((nw, nh), Image.BICUBIC)
# mask = cv.resize(mask_raw, dsize=(nh, nw), interpolation=cv.INTER_NEAREST)
#
# # 进行色域变换,用现成的
# hue = rand(-hue, hue)
# sat = rand(1, sat) if rand() < .5 else 1 / rand(1, sat)
# val = rand(1, val) if rand() < .5 else 1 / rand(1, val)
# x = rgb_to_hsv(np.array(image) / 255.)
# x[..., 0] += hue
# x[..., 0][x[..., 0] > 1] -= 1
# x[..., 0][x[..., 0] < 0] += 1
# x[..., 1] *= sat
# x[..., 2] *= val
# x[x > 1] = 1
# x[x < 0] = 0
# image = hsv_to_rgb(x)
#
# image = Image.fromarray((image * 255).astype(np.uint8))
# # **image.show()
# # 将图片进行放置,分别对应四张分割图片的位置
# dx = place_x[index]
# dy = place_y[index]
# # w h 是设定的合成后图片的宽高,128设定的是color
# new_image = Image.new('RGB', (w, h), (128, 128, 128))
# new_mask = Image.new('RGB', (w, h), (128, 128, 128))
# # dx, dy是左上角的坐标点,paste会有损失吗?有的,只指定了左上角,溢出的就浪费了
# new_image.paste(image, (dx, dy))
# mask = Image.fromarray(mask)
# new_mask.paste(mask, (dx, dy))
# image_data = np.array(new_image)
#
# if color_aug:
# "在拼图中随机使用颜色增强"
# apply_p = random.randint(color_p[0], color_p[1]) / 10
# if random.randint(0, 10) / 10 > apply_p:
# color_change = random.randint(0, 1)
# if color_change == 0:
# image_data = ChannelShuffle(p=1)(image=image_data)["image"]
# elif color_change == 1:
# image_gray = cv.cvtColor(image_data, cv.COLOR_RGB2GRAY)
# image_data = cv.cvtColor(image_gray, cv.COLOR_GRAY2RGB)
#
# image_data = image_data / 255
# mask_data = np.array(new_mask)
#
# index = index + 1
# bbox_data = []
# # 对bbox进行重新处理
# if len(bbox) > 0:
# # 根据新的宽高与原来宽高的比例得到缩放后的坐标,再加上这张图片在新的大图上的左上角坐标
# bbox[:, [0, 2]] = bbox[:, [0, 2]] * nw / iw + dx
# bbox[:, [1, 3]] = bbox[:, [1, 3]] * nh / ih + dy
#
# raw_bbox = copy.deepcopy(bbox)
#
# # 小于0的都变成0,超过w,h的bbox值都变成w,h
# bbox[:, 0:2][bbox[:, 0:2] < 0] = 0
#
# ##超过像素索引,255之后会从0开始,-1已经到像素的最大索引了
# bbox[:, 2][bbox[:, 2] > (w - 1)] = w - 1
# bbox[:, 3][bbox[:, 3] > (h - 1)] = h - 1
#
# # 计算前后的面积比例,保留大于一定比例的标注
# now_areas = (bbox[:, 2] - bbox[:, 0]) * (bbox[:, 3] - bbox[:, 1])
# raw_areas = (raw_bbox[:, 2] - raw_bbox[:, 0]) * (raw_bbox[:, 3] - raw_bbox[:, 1])
#
# save_index = np.where(now_areas / raw_areas > 0.95)[0]
# bbox = bbox[save_index]
# # ** "用面积过滤指定类别对象"
# # both_save_inds=np.array([])
# # bask_inds=np.where(bbox[:,4]==bask_num)[0]
# # bask_save_inds=np.where(now_areas[bask_inds]>3200)[0]
# # both_save_inds=both_save_inds+bask_inds[bask_save_inds]
# #
# # trace_inds = np.where(bbox[:, 4] == trace_num)[0]
# # trace_save_inds = np.where(now_areas[trace_inds] > 43000)[0]
# # both_save_inds = both_save_inds + trace_inds[trace_save_inds]
# #
# # bbox=bbox[both_save_inds]
#
# if len(bbox) > 0:
# bbox_w = bbox[:, 2] - bbox[:, 0]
# bbox_h = bbox[:, 3] - bbox[:, 1]
# # 需要bbox的宽高都大于1的bbox才能进入下一步,and表示为与操作,两者都正确则结果正确,bbox[True,True,True...]
# bbox = bbox[np.logical_and(bbox_w > 1, bbox_h > 1)]
#
# bbox_data = np.zeros((len(bbox), 5))
# bbox_data[:len(bbox)] = bbox
#
# image_datas.append(image_data)
# mask_datas.append(mask_data)
# bbox_datas.append(bbox_data)
#
# # **mask_show = Image.fromarray((mask_data * 255).astype(np.uint8))
# # for j in range(len(bbox_data)):
# #
# # left, top, right, bottom = bbox_data[j][0:4]
# # draw = ImageDraw.Draw(mask_show)
# # draw.rectangle([left , top , right, bottom ], outline="green",width=2)
# # # #thickness这里控制了i的值,会使得bbox相对于真实值往内收缩
# # # thickness = 3
# # # for i in range(thickness):
# # # draw.rectangle([left + i, top + i, right - i, bottom - i], outline=(255, 255, 255))
# # mask_show.show()
# # **print("fsdff")
#
# # **img = Image.fromarray((image_data * 255).astype(np.uint8))
#
# # **img.show()
# # 经过以上处理就是把一张图片处理完了
#
# # 将图片分割,放在一起
# cutx = np.random.randint(int(w * min_offset_x), int(w * (1 - min_offset_x)))
# cuty = np.random.randint(int(h * min_offset_y), int(h * (1 - min_offset_y)))
#
# new_image = np.zeros([h, w, 3])
# new_image[:cuty, :cutx, :] = image_datas[0][:cuty, :cutx, :]
# new_image[cuty:, :cutx, :] = image_datas[1][cuty:, :cutx, :]
# new_image[cuty:, cutx:, :] = image_datas[2][cuty:, cutx:, :]
# new_image[:cuty, cutx:, :] = image_datas[3][:cuty, cutx:, :]
#
# new_image = (new_image * 255).astype(np.uint8)
#
# # **#检验第四个顺序的图bbox对应标签
# # mmask=Image.fromarray((mask_datas[3]*255).astype(np.uint8))
# # for num in range(len(bbox_datas[3])):
# # left, top, right, bottom = bbox_datas[3][num]
# # draw = ImageDraw.Draw(mmask)
# # draw.rectangle([left , top , right, bottom ], outline="green",width=2)
# #
# # **mmask.show()
#
# new_mask = np.zeros([h, w, 3])
# new_mask[:cuty, :cutx, :] = mask_datas[0][:cuty, :cutx, :]
# new_mask[cuty:, :cutx, :] = mask_datas[1][cuty:, :cutx, :]
# new_mask[cuty:, cutx:, :] = mask_datas[2][cuty:, cutx:, :]
# new_mask[:cuty, cutx:, :] = mask_datas[3][:cuty, cutx:, :]
# new_mask = new_mask.astype(np.uint8)
#
# new_bbox = merge_bboxes(bbox_datas, cutx, cuty, h, w, save_proportion=save_proportion)
# # if len(new_bbox)>0:
# # **"合并bbox后,再次用面积过滤指定类别对象"
# # new_bbox=np.array(new_bbox)
# # now_areas = (new_bbox[:, 2] - new_bbox[:, 0]) * (new_bbox[:, 3] - new_bbox[:, 1])
# #
# # both_save_inds = np.array([])
# # bask_inds = np.where(new_bbox[:, 4] == bask_num)[0]
# # bask_save_inds = np.where(now_areas[bask_inds] > 2000)[0]
# # # bask_save_inds = np.where(now_areas[bask_inds] > 3200)[0]
# # both_save_inds = np.concatenate((both_save_inds , bask_inds[bask_save_inds]),axis=0)
# #
# # trace_inds = np.where(new_bbox[:, 4] == trace_num)[0]
# # trace_save_inds = np.where(now_areas[trace_inds] > 16000)[0]
# # both_save_inds = np.concatenate((both_save_inds , trace_inds[trace_save_inds]),axis=0)
# # both_save_inds=both_save_inds.astype(np.uint8)
# # if len(both_save_inds)>0:
# # new_bbox = list(new_bbox[both_save_inds])
# # else:
# # return [],[],[]
# if len(new_bbox) > 0:
# new_bbox = np.array(new_bbox).astype(np.uint16)
# # [xmin:xmax,ymin:ymax]没取到xmax,ymax
# new_bbox[:, 2] = new_bbox[:, 2] + 1
# new_bbox[:, 3] = new_bbox[:, 3] + 1
#
# new_bbox[:, 2][new_bbox[:, 2] > w - 1] = w - 1
# new_bbox[:, 3][new_bbox[:, 3] > h - 1] = h - 1
#
# # **for j in range(len(new_bbox)):
# # left, top, right, bottom = new_bbox[j][0:4]
# # draw = ImageDraw.Draw(show_mask)
# # draw.rectangle([left, top, right, bottom], outline="red", width=1)
# #
# # **show_mask.show()
#
# return new_image, new_mask, new_bbox
# else:
# return [], [], []
|
[
"shwwgrshbd@qq.com"
] |
shwwgrshbd@qq.com
|
3c6c662ac03d2f07a98ed0f8b23024a2730b8ee7
|
dbc60695216bcb29feeb6b383f7acef045853363
|
/train.py
|
49c2649408d2073c82ca5ccb52fd6eec241f8ea6
|
[] |
no_license
|
Hauf3n/Categorical-DQN-Atari-PyTorch
|
c0e4e173664a52b9b89adedd21f30adf24b09621
|
aed0e99b1f1648667442aad33240075af7ac4b96
|
refs/heads/master
| 2022-12-11T12:30:48.912889
| 2020-09-14T15:56:22
| 2020-09-14T15:56:22
| 260,343,819
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,203
|
py
|
import argparse
import numpy as np
import gym
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import time
import random
import os
from Agents import C51_Agent
from Atari_Wrapper import Atari_Wrapper
from Env_Runner import Env_Runner
from Experience_Replay import Experience_Replay
device = torch.device("cuda:0")
dtype = torch.float
def make_transitions(obs, actions, rewards, dones):
# observations are in uint8 format
tuples = []
steps = len(obs) - 1
for t in range(steps):
tuples.append((obs[t],
actions[t],
rewards[t],
obs[t+1],
int(not dones[t])))
return tuples
def train(hyperparameters):
# ARGS
env_name = hyperparameters.env
env_with_lives = hyperparameters.lives
v_min = hyperparameters.v_min
v_max = hyperparameters.v_max
num_atoms = hyperparameters.atoms
num_stacked_frames = hyperparameters.stacked_frames
replay_memory_size = hyperparameters.replay_memory_size
min_replay_size_to_update = hyperparameters.replay_size_to_update
lr = hyperparameters.lr
gamma = hyperparameters.gamma
minibatch_size = hyperparameters.minibatch_size
steps_rollout = hyperparameters.steps_rollout
start_eps = hyperparameters.start_eps
final_eps = hyperparameters.final_eps
final_eps_frame = hyperparameters.final_eps_frame
total_steps = hyperparameters.total_steps
target_net_update = hyperparameters.target_net_update
save_model_steps = hyperparameters.save_model_steps
report = hyperparameters.report
# INIT
delta_z = (v_max - v_min)/ (num_atoms - 1)
value_support = torch.tensor([ v_min + (i * delta_z) for i in range(num_atoms)]).to(device)
raw_env = gym.make(env_name)
env = Atari_Wrapper(raw_env, env_name, num_stacked_frames, use_add_done=env_with_lives)
in_channels = num_stacked_frames
num_actions = env.action_space.n
eps_interval = start_eps-final_eps
agent = C51_Agent(in_channels, num_actions, num_atoms, value_support, start_eps).to(device)
target_agent = C51_Agent(in_channels, num_actions, num_atoms, value_support, start_eps).to(device)
target_agent.load_state_dict(agent.state_dict())
replay = Experience_Replay(replay_memory_size)
runner = Env_Runner(env, agent)
optimizer = optim.Adam(agent.parameters(), lr=lr)
num_steps = 0
num_model_updates = 0
corrected_index = torch.tensor([num_atoms*i for i in range(minibatch_size)]).to(device)
corrected_index = corrected_index.repeat_interleave(num_atoms).reshape(minibatch_size, num_atoms)
# TRAIN
start_time = time.time()
while num_steps < total_steps:
# set agent exploration | cap exploration after x timesteps to final epsilon
new_epsilon = np.maximum(final_eps, start_eps - ( eps_interval * num_steps/final_eps_frame))
agent.set_epsilon(new_epsilon)
# get data
obs, actions, rewards, dones = runner.run(steps_rollout)
transitions = make_transitions(obs, actions, rewards, dones)
replay.insert(transitions)
# add
num_steps += steps_rollout
# check if update
if num_steps < min_replay_size_to_update:
continue
# update
for update in range(4):
optimizer.zero_grad()
minibatch = replay.get(minibatch_size)
# uint8 to float32 and normalize to 0-1
obs = (torch.stack([i[0] for i in minibatch]).to(device).to(dtype)) / 255
actions = np.stack([i[1] for i in minibatch])
rewards = torch.tensor([i[2] for i in minibatch]).to(device)
# uint8 to float32 and normalize to 0-1
next_obs = (torch.stack([i[3] for i in minibatch]).to(device).to(dtype)) / 255
dones = torch.tensor([i[4] for i in minibatch]).to(device)
# *** C51 ***
# get atom probabilities for obs and next obs
obs_Ps = agent(obs)[range(minibatch_size), actions] # get atoms from used action
next_obs_Ps = target_agent(next_obs).detach() # will be used as label later
# get a* from target network
best_a = target_agent.greedy(next_obs_Ps)
# get next_obs atoms from a*
next_obs_Ps = next_obs_Ps[range(minibatch_size), best_a]
# ^T_z
tmp=torch.ones(minibatch_size, num_atoms).to(device).to(dtype) * (gamma * value_support)
rewards = rewards.repeat_interleave(num_atoms).reshape(minibatch_size,num_atoms)
dones = dones.repeat_interleave(num_atoms).reshape(minibatch_size,num_atoms)
T_z = rewards + tmp * dones
# clip to value interval
T_z = torch.clamp(T_z, min=v_min, max=v_max)
# b
b = (T_z - torch.tensor(v_min).to(device)) / torch.tensor(delta_z).to(device)
# l , u
l = torch.floor(b)
u = torch.ceil(b)
# distribute probability
m_l = next_obs_Ps * (u-b)
m_u = next_obs_Ps * (b-l)
# much faster than using a loop
m = torch.zeros(minibatch_size*num_atoms).to(device).to(dtype)
l = l + corrected_index
u = u + corrected_index
m = m.index_add(0,l.reshape(-1).long(),m_l.reshape(-1))
m = m.index_add(0,u.reshape(-1).long(),m_u.reshape(-1))
m = m.reshape(minibatch_size,num_atoms)
# cross entropy loss
loss = torch.mean( - torch.sum( m * torch.log(obs_Ps) ,dim=1))
loss.backward()
optimizer.step()
num_model_updates += 1
# update target network
if num_model_updates%target_net_update == 0:
target_agent.load_state_dict(agent.state_dict())
# print time
if num_steps%report < steps_rollout:
end_time = time.time()
print(f'*** total steps: {num_steps} | time: {end_time - start_time} ***')
start_time = time.time()
# save the dqn after some time
if num_steps%save_model_steps < steps_rollout:
torch.save(agent,f"{env_name}-{num_steps}.pt")
env.close()
if __name__ == "__main__":
hyperparameters = argparse.ArgumentParser()
# set hyperparameter
hyperparameters.add_argument('-lr', type=float, default=2.5e-4)
hyperparameters.add_argument('-v_min', type=float, default=-10)
hyperparameters.add_argument('-v_max', type=float, default=10)
hyperparameters.add_argument('-atoms', type=int, default=51)
hyperparameters.add_argument('-env', default='PongNoFrameskip-v4')
hyperparameters.add_argument('-lives', type=bool, default=False)
hyperparameters.add_argument('-stacked_frames', type=int, default=4)
hyperparameters.add_argument('-replay_memory_size', type=int, default=250000)
hyperparameters.add_argument('-replay_size_to_update', type=int, default=20000)
hyperparameters.add_argument('-gamma', type=float, default=0.99)
hyperparameters.add_argument('-minibatch_size', type=int, default=32)
hyperparameters.add_argument('-steps_rollout', type=int, default=16)
hyperparameters.add_argument('-start_eps', type=float, default=1)
hyperparameters.add_argument('-final_eps', type=float, default=0.05)
hyperparameters.add_argument('-final_eps_frame', type=int, default=1000000)
hyperparameters.add_argument('-total_steps', type=int, default=25000000)
hyperparameters.add_argument('-target_net_update', type=int, default=625)
hyperparameters.add_argument('-save_model_steps', type=int, default=500000)
hyperparameters.add_argument('-report', type=int, default=50000)
train(hyperparameters.parse_args())
|
[
"53398268+Hauf3n@users.noreply.github.com"
] |
53398268+Hauf3n@users.noreply.github.com
|
cfae4a8360740edba9366fc11b72439016faa4be
|
5960271837100a5008870fb05300d75a9230f56d
|
/test-scripts/completeTest.py
|
7b5acb01f0e6d443542e35cc511f87ebe9b23f33
|
[
"MIT"
] |
permissive
|
speakerbug/ChristmasPi
|
3f55fd660911a831054860d79216559f13b49d39
|
9277b19047b2b3fa8beb39d0251fa53607c58998
|
refs/heads/master
| 2021-01-19T02:48:17.659593
| 2016-06-16T23:09:15
| 2016-06-16T23:09:15
| 46,230,957
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,300
|
py
|
import RPi.GPIO as GPIO, time
one = 11
two = 12
three = 13
four = 15
five = 16
six = 18
seven = 22
eight = 7
nine = 29
ten = 31
eleven = 32
twelve = 33
fastspeed = 0.2
slowspeed = 2.0
GPIO.setmode(GPIO.BOARD)
print("Getting Started")
time.sleep(fastspeed)
print("Turning on 1")
GPIO.setup(one, GPIO.OUT)
GPIO.output(one, True)
time.sleep(slowspeed)
print("Turning off 1")
GPIO.output(one, False)
print("Turning on 2")
GPIO.setup(two, GPIO.OUT)
GPIO.output(two, True)
time.sleep(slowspeed)
print("Turning off 2")
GPIO.output(two, False)
print("Turning on 3")
GPIO.setup(three, GPIO.OUT)
GPIO.output(three, True)
time.sleep(slowspeed)
print("Turning off 3")
GPIO.output(three, False)
print("Turning on 4")
GPIO.setup(four, GPIO.OUT)
GPIO.output(four, True)
time.sleep(slowspeed)
print("Turning off 4")
GPIO.output(four, False)
print("Turning on 5")
GPIO.setup(five, GPIO.OUT)
GPIO.output(five, True)
time.sleep(slowspeed)
print("Turning off 5")
GPIO.output(five, False)
print("Turning on 6")
GPIO.setup(six, GPIO.OUT)
GPIO.output(six, True)
time.sleep(slowspeed)
print("Turning off 6")
GPIO.output(six, False)
print("Turning on 7")
GPIO.setup(seven, GPIO.OUT)
GPIO.output(seven, True)
time.sleep(slowspeed)
print("Turning off 7")
GPIO.output(seven, False)
print("Turning on 8")
GPIO.setup(eight, GPIO.OUT)
GPIO.output(eight, True)
time.sleep(slowspeed)
print("Turning off 8")
GPIO.output(eight, False)
print("Turning on 9")
GPIO.setup(nine, GPIO.OUT)
GPIO.output(nine, True)
time.sleep(slowspeed)
print("Turning off 9")
GPIO.output(nine, False)
print("Turning on 10")
GPIO.setup(ten, GPIO.OUT)
GPIO.output(ten, True)
time.sleep(slowspeed)
print("Turning off 10")
GPIO.output(ten, False)
print("Turning on 11")
GPIO.setup(eleven, GPIO.OUT)
GPIO.output(eleven, True)
time.sleep(slowspeed)
print("Turning off 11")
GPIO.output(eleven, False)
print("Turning on 12")
GPIO.setup(twelve, GPIO.OUT)
GPIO.output(twelve, True)
time.sleep(slowspeed)
print("Turning off 12")
GPIO.output(twelve, False)
#all on then off
print("Turning on 1")
GPIO.setup(one, GPIO.OUT)
GPIO.output(one, True)
time.sleep(fastspeed)
print("Turning on 2")
GPIO.setup(two, GPIO.OUT)
GPIO.output(two, True)
time.sleep(fastspeed)
print("Turning on 3")
GPIO.setup(three, GPIO.OUT)
GPIO.output(three, True)
time.sleep(fastspeed)
print("Turning on 4")
GPIO.setup(four, GPIO.OUT)
GPIO.output(four, True)
time.sleep(fastspeed)
print("Turning on 5")
GPIO.setup(five, GPIO.OUT)
GPIO.output(five, True)
time.sleep(fastspeed)
print("Turning on 6")
GPIO.setup(six, GPIO.OUT)
GPIO.output(six, True)
time.sleep(fastspeed)
print("Turning on 7")
GPIO.setup(seven, GPIO.OUT)
GPIO.output(seven, True)
time.sleep(fastspeed)
print("Turning on 8")
GPIO.setup(eight, GPIO.OUT)
GPIO.output(eight, True)
time.sleep(fastspeed)
print("Turning on 9")
GPIO.setup(nine, GPIO.OUT)
GPIO.output(nine, True)
time.sleep(fastspeed)
print("Turning on 10")
GPIO.setup(ten, GPIO.OUT)
GPIO.output(ten, True)
time.sleep(fastspeed)
print("Turning on 11")
GPIO.setup(eleven, GPIO.OUT)
GPIO.output(eleven, True)
time.sleep(fastspeed)
print("Turning on 12")
GPIO.setup(twelve, GPIO.OUT)
GPIO.output(twelve, True)
time.sleep(fastspeed)
print("Turning off 1")
GPIO.output(one, False)
print("Turning off 2")
GPIO.output(two, False)
print("Turning off 3")
GPIO.output(three, False)
print("Turning off 4")
GPIO.output(four, False)
print("Turning off 5")
GPIO.output(five, False)
print("Turning off 6")
GPIO.output(six, False)
print("Turning off 7")
GPIO.output(seven, False)
print("Turning off 8")
GPIO.output(eight, False)
print("Turning off 9")
GPIO.output(nine, False)
print("Turning off 10")
GPIO.output(ten, False)
print("Turning off 11")
GPIO.output(eleven, False)
print("Turning off 12")
GPIO.output(twelve, False)
#all on then off
print("Turning on 1")
GPIO.setup(one, GPIO.OUT)
GPIO.output(one, True)
time.sleep(fastspeed)
print("Turning on 2")
GPIO.setup(two, GPIO.OUT)
GPIO.output(two, True)
time.sleep(fastspeed)
print("Turning on 3")
GPIO.setup(three, GPIO.OUT)
GPIO.output(three, True)
time.sleep(fastspeed)
print("Turning on 4")
GPIO.setup(four, GPIO.OUT)
GPIO.output(four, True)
time.sleep(fastspeed)
print("Turning on 5")
GPIO.setup(five, GPIO.OUT)
GPIO.output(five, True)
time.sleep(fastspeed)
print("Turning on 6")
GPIO.setup(six, GPIO.OUT)
GPIO.output(six, True)
time.sleep(fastspeed)
print("Turning on 7")
GPIO.setup(seven, GPIO.OUT)
GPIO.output(seven, True)
time.sleep(fastspeed)
print("Turning on 8")
GPIO.setup(eight, GPIO.OUT)
GPIO.output(eight, True)
time.sleep(fastspeed)
print("Turning on 9")
GPIO.setup(nine, GPIO.OUT)
GPIO.output(nine, True)
time.sleep(fastspeed)
print("Turning on 10")
GPIO.setup(ten, GPIO.OUT)
GPIO.output(ten, True)
time.sleep(fastspeed)
print("Turning on 11")
GPIO.setup(eleven, GPIO.OUT)
GPIO.output(eleven, True)
time.sleep(fastspeed)
print("Turning on 12")
GPIO.setup(twelve, GPIO.OUT)
GPIO.output(twelve, True)
time.sleep(fastspeed)
print("Turning off 1")
GPIO.output(one, False)
print("Turning off 2")
GPIO.output(two, False)
print("Turning off 3")
GPIO.output(three, False)
print("Turning off 4")
GPIO.output(four, False)
print("Turning off 5")
GPIO.output(five, False)
print("Turning off 6")
GPIO.output(six, False)
print("Turning off 7")
GPIO.output(seven, False)
print("Turning off 8")
GPIO.output(eight, False)
print("Turning off 9")
GPIO.output(nine, False)
print("Turning off 10")
GPIO.output(ten, False)
print("Turning off 11")
GPIO.output(eleven, False)
print("Turning off 12")
GPIO.output(twelve, False)
#all on then off
print("Turning on 1")
GPIO.setup(one, GPIO.OUT)
GPIO.output(one, True)
time.sleep(fastspeed)
print("Turning on 2")
GPIO.setup(two, GPIO.OUT)
GPIO.output(two, True)
time.sleep(fastspeed)
print("Turning on 3")
GPIO.setup(three, GPIO.OUT)
GPIO.output(three, True)
time.sleep(fastspeed)
print("Turning on 4")
GPIO.setup(four, GPIO.OUT)
GPIO.output(four, True)
time.sleep(fastspeed)
print("Turning on 5")
GPIO.setup(five, GPIO.OUT)
GPIO.output(five, True)
time.sleep(fastspeed)
print("Turning on 6")
GPIO.setup(six, GPIO.OUT)
GPIO.output(six, True)
time.sleep(fastspeed)
print("Turning on 7")
GPIO.setup(seven, GPIO.OUT)
GPIO.output(seven, True)
time.sleep(fastspeed)
print("Turning on 8")
GPIO.setup(eight, GPIO.OUT)
GPIO.output(eight, True)
time.sleep(fastspeed)
print("Turning on 9")
GPIO.setup(nine, GPIO.OUT)
GPIO.output(nine, True)
time.sleep(fastspeed)
print("Turning on 10")
GPIO.setup(ten, GPIO.OUT)
GPIO.output(ten, True)
time.sleep(fastspeed)
print("Turning on 11")
GPIO.setup(eleven, GPIO.OUT)
GPIO.output(eleven, True)
time.sleep(fastspeed)
print("Turning on 12")
GPIO.setup(twelve, GPIO.OUT)
GPIO.output(twelve, True)
time.sleep(fastspeed)
print("Turning off 1")
GPIO.output(one, False)
print("Turning off 2")
GPIO.output(two, False)
print("Turning off 3")
GPIO.output(three, False)
print("Turning off 4")
GPIO.output(four, False)
print("Turning off 5")
GPIO.output(five, False)
print("Turning off 6")
GPIO.output(six, False)
print("Turning off 7")
GPIO.output(seven, False)
print("Turning off 8")
GPIO.output(eight, False)
print("Turning off 9")
GPIO.output(nine, False)
print("Turning off 10")
GPIO.output(ten, False)
print("Turning off 11")
GPIO.output(eleven, False)
print("Turning off 12")
GPIO.output(twelve, False)
GPIO.cleanup()
|
[
"henry@henrysaniuk.com"
] |
henry@henrysaniuk.com
|
52f77928fb0b623c06887de428aff8930460aeee
|
cac5c013d2ec78bb291b9c023987c215f6979799
|
/magic/python/tcpClient.py
|
4943a0b5a087290183191bfa2e66cf2c6bada85e
|
[] |
no_license
|
elitecodegroovy/MagicPython
|
94dc9f55bee699ef978bcd8de72488042a836f59
|
7ef6ffc059c042ab1c81185b4b69ecfbeb794a0d
|
refs/heads/master
| 2021-01-22T20:44:20.322734
| 2015-04-10T07:19:24
| 2015-04-10T07:19:24
| 8,220,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
__author__ = 'JohnLiu'
from socket import *
HOST = 'localhost'
PORT = 21567
BUFSIZ = 1024
ADDR = (HOST, PORT)
tcpCliSock = socket(AF_INET, SOCK_STREAM)
tcpCliSock.connect(ADDR)
while True:
data = raw_input('> ')
if not data:
break
tcpCliSock.send(data)
data = tcpCliSock.recv(BUFSIZ)
if not data:
break
print data
tcpCliSock.close()
|
[
"elite_jigang@163.com"
] |
elite_jigang@163.com
|
07df02977d52ce2be8857d94da104372134c3813
|
fd64e364368bcb2cdcf77ab1e0fc234a6b698f69
|
/Python/PERFCONT.py
|
43eb9b9989a3d5b14e0e975b890428725a70f37f
|
[] |
no_license
|
Parizval/CodeChefCodes
|
57712069f3d56cc42282f9e35c6ddd9398e4a5bf
|
cfd2876816be806882650b6ea51431b1f8d6bec5
|
refs/heads/master
| 2021-07-16T13:10:15.668713
| 2020-07-06T21:40:09
| 2020-07-06T21:40:09
| 188,693,667
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
for a in range(int(input())):
n,p = map(int,input().split())
val = list(map(int,input().split()))
cakewalk = 0
hard = 0
for i in val :
if i >= p //2 :
cakewalk += 1
elif i <= p // 10 :
hard += 1
if cakewalk > 1 and hard > 3 :
break
if cakewalk == 1 and hard == 2 :
print("yes")
else:
print("no")
|
[
"anmolgoyal@gmail.com"
] |
anmolgoyal@gmail.com
|
b73e2bb9b0234950feed26a11ecc351761a01508
|
be19bb8435a33965682571cb7a286f728d27cfe9
|
/Main2.py
|
c3d4684ae1ddd7d2d83474123055b89a9b0b9a4d
|
[] |
no_license
|
Papyrosaurus/Aprrendre_python
|
0b1c83c19aaf7315ec06f30bc0c79a35fd9754ac
|
c80ea2de3408c9466454a5a0e2183981f77eb2c4
|
refs/heads/main
| 2022-12-09T15:16:22.949442
| 2022-11-24T15:14:38
| 2022-11-24T15:14:38
| 257,275,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22
|
py
|
#Exercice day 001-002
|
[
"59684985+Papyrosaurus@users.noreply.github.com"
] |
59684985+Papyrosaurus@users.noreply.github.com
|
c783c7ed8acee2bf4687d046333989bc8c85a5fb
|
7974cd9076dcd50f52f16ce618d94c14de36674d
|
/saved/_vim.py
|
df4ee182f5f1e48cf3919047b1d7c56746a949c6
|
[] |
no_license
|
grant-h/vocode
|
501f78ed72595cfa43ee186a4875ad855ce7483b
|
c35e90b29eb5a203fc01d85ab1547cff31fc5bb9
|
refs/heads/master
| 2021-01-11T18:48:19.297703
| 2017-01-22T17:47:55
| 2017-01-22T17:47:55
| 79,630,403
| 2
| 1
| null | 2017-01-22T07:10:39
| 2017-01-21T07:15:08
|
Python
|
UTF-8
|
Python
| false
| false
| 487
|
py
|
from dragonfly import *
# modes
# 0 - normal
# 1 - insert
# 2 - visual
mode = 0
grammar = Grammar("vim_example")
ex_rule = MappingRule(
name="insert",
mapping={
"insert mode": Key("I"),
"select all text": Key("g, g, V, G"),
"undo": Key("u"),
"redo": Key("c-r"),
"escape" : Key("escape"),
"end command" : Key("escape")
}
)
grammar.add_rule(ex_rule)
grammar.load()
def unload():
global grammar
if grammar: grammar.unload()
grammar = None
|
[
"elanrasabi80@gmail.com"
] |
elanrasabi80@gmail.com
|
4a3a7618fb14017d8c2c38dfba10422ff2fa9bb2
|
999d971820020a50e1a13046c4df255c71f0f5f9
|
/Week 6/richTask 6/redoingStaticEquilibrium.py
|
f843efd2c851d8f50631498fd15f524c8560bdee
|
[] |
no_license
|
kelvincaoyx/UTEA-PYTHON
|
062019f1ea79b98ee8a5f1ecf41d0fdda27cabda
|
65891610cc2ca1f662082785eb44eac1368ccc8e
|
refs/heads/master
| 2023-07-08T02:57:30.991448
| 2021-08-13T01:34:02
| 2021-08-13T01:34:02
| 382,920,297
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,125
|
py
|
'''
This program is used to help the user practice calculating net torque
and estimating which direction the metre stick is going to tip
'''
#importing the random library to get random number generator
import pyfiglet
import math
from library import cls
from library import randomDigit
#function that generates a list of weights that the game will be based on.
#Just input the number of weights you want
def weightGenerator(numberOfWeightsRequired):
#defining some variables
thisRoundsindexes = [0,1,2,3,4,6,7,8,9,10]
thisRoundsList = [0,0,0,0,0,"_/\_",0,0,0,0,0]
weightsToBeAdded = []
#gives us random numbers that will be added to the scale
while numberOfWeightsRequired > 0:
weightsToBeAdded.append(randomDigit(1,5))
numberOfWeightsRequired -= 1
#Puts the random weights generated into the list. it uses
#another list to determine indexes because we don't want overlap
counter = 0
while counter < len(weightsToBeAdded):
index = thisRoundsindexes[randomDigit(0,len(thisRoundsindexes))]
thisRoundsindexes.remove(index)
thisRoundsList.insert(index,weightsToBeAdded[counter])
thisRoundsList.pop(index+1)
counter += 1
#returning the list for use
return thisRoundsList
#function that takes in a list a=of weights and calculates the which way it will turn
def netForceCalc(weightList):
#calculates the torque that causes the scale to turn in a negative direction
negativeTorque = 0
negativeTorqueList = weightList[:5]
counter = 4
runcount = 1
while counter >= 0:
negativeTorque += negativeTorqueList[counter]*0.1*runcount
counter -=1
runcount += 1
#calculates the torque that causes the scale to turn in a postitive direction
positiveTorque = 0
positiveTorqueList = weightList[6:]
counter = 0
runcount = 1
while counter < 5:
positiveTorque += positiveTorqueList[counter]*0.1*runcount
counter += 1
runcount += 1
#returns the predicted position that the scale will turn
if negativeTorque > positiveTorque:
return "a"
elif negativeTorque == positiveTorque:
return "b"
else:
return "c"
#helper function that will start the entire program and access the other functions
def helper():
#defining some variables that will be used throughout the function
start = 2
winCounter = 0
gameCounter = 1
continueGame = input("Would you like to continue (y/n)? ")
#while loop to keep the game going until they don't want to play
while continueGame.lower() == "y":
cls()
print(pyfiglet.figlet_format("Game " + str(gameCounter) + " !"))
#Printing the header for the scale
print("0.0m 0.1m 0.2m 0.3m 0.4m 0.5m 0.6m 0.7m 0.8m 0.9m 1.0m")
#generation of the weightlist that will be used throughout
weightList = weightGenerator(start)
#caluculating the direction that the scale will go
answer = netForceCalc(weightList)
#printing out the weightlist in a way that it lines up with the header from above
counter = 0
display = ""
while True:
if weightList[counter] == 0:
weightList[counter] = " "
elif counter == 5:
weightList[counter] = str(weightList[counter] + " ")
else:
weightList[counter] = str(weightList[counter]) + "N "
counter += 1
if counter == 11:
display = display.join(weightList)
print(display)
break
#retreiving the user's guess
userAnswer = input("\n\nChoose the correct answer (a,b,c):\n \
a)spin left\t b)equilibrium\t c)spin right\t")
#comparing the user's input with the answer. Will increase the weights
#depending on the result of the game
if userAnswer == answer:
print("\nCorrect\n")
#keeps track of the wins
winCounter += 1
start += randomDigit(1,3)
if start > 10:
start = 10
else:
print("\nIncorrect\n")
start -= randomDigit(1,3)
if start <=0:
start = 1
#keeping track of the total number of games
gameCounter += 1
#asks if the user wants to continue or not.
continueGame = input("Would you like to continue (y/n)? ")
#prints the results of the games
cls()
if gameCounter-1 == 0:
print(pyfiglet.figlet_format("Sorry to see you go without playing"))
else:
score = winCounter/(gameCounter-1)*100
print(pyfiglet.figlet_format("You got " + str(winCounter) + " out of " + str(gameCounter-1) + " !"))
print("That is a " + str(math.trunc(score)) + "% win rate!")
#printing the header for the game
print(pyfiglet.figlet_format("Welcome to the static equilbrium quiz!"))
#activating the helper function
helper()
|
[
"kelvincaoyx@gmail.com"
] |
kelvincaoyx@gmail.com
|
fdf3e8e89abd0314c8aaf816005a86c63aa0ef28
|
25ea73671a2556652afb7805bc164adaff2f1cee
|
/sigecc/settings.py
|
d0bc282182f177dc5f6683ab172c8c9b6043eb67
|
[] |
no_license
|
lyralemos/sigecc
|
852857c63b5b81782b6208faefa1cc93e2656c3b
|
c90bed4a3e40f1cdf89a3a7cbcf450bec9d9d859
|
refs/heads/master
| 2022-12-15T11:15:49.254269
| 2019-05-23T11:46:33
| 2019-05-23T11:46:33
| 138,098,152
| 0
| 0
| null | 2022-12-08T05:41:22
| 2018-06-20T23:59:06
|
Vue
|
UTF-8
|
Python
| false
| false
| 4,675
|
py
|
"""
Django settings for sigecc project.
Generated by 'django-admin startproject' using Django 2.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(4gryvk*gdj$5dor!wap)ap&411$c7pzs10=19(w5@r%r&e&2-'
# SECURITY WARNING: don't run with debug turned on in production!
ENV = os.environ.get('APP_ENV', 'dev')
if ENV == 'dev':
DEBUG = True
STATS_FILE = os.path.join(BASE_DIR, 'webpack-stats.json')
else:
DEBUG = False
STATS_FILE = os.path.join(BASE_DIR, 'webpack-stats-prod.json')
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'webpack_loader',
'apps.core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sigecc.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sigecc.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'HOST': 'db',
'PORT': 5432,
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'dist'),
os.path.join(BASE_DIR, 'static'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'public/media')
STATIC_ROOT = os.path.join(BASE_DIR, 'public')
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': '',
'STATS_FILE': STATS_FILE,
}
}
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication'
]
}
if not DEBUG:
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'filters': ['require_debug_false'],
'class': 'logging.StreamHandler',
}
},
'loggers': {
'django': {
'level': 'DEBUG',
'handlers': ['console'],
}
}
}
|
[
"lyralemos@gmail.com"
] |
lyralemos@gmail.com
|
4dda8de1c3e32995b1e0f278b75c069fe523ad7b
|
ce074998469af446e33d0fab7adb01320ccc77ed
|
/src_procedures/Winlogon Userinit Key Persistence- PowerShell.py
|
c5c9fac1511cb362c646b704a32ea46482bba7dd
|
[] |
no_license
|
parahaoer/detection_rules
|
1341063568b0ccfa180da129a29aeec0a62e679e
|
c9f3408eccbcb4b61d1d441af31839872f9bb26c
|
refs/heads/master
| 2023-02-09T13:54:40.254874
| 2020-12-28T09:25:31
| 2020-12-28T09:25:31
| 265,990,044
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,576
|
py
|
from elasticsearch import Elasticsearch
es = Elasticsearch('10.25.23.161:9200')
doc = {
"query": {
"constant_score": {
"filter": {
"bool": {
"must": [
{
"match_phrase": {
"event_id": "4104"
}
},
{
"bool": {
"should": [
{
"multi_match": {
"query": "*Set-ItemProperty*",
"fields": [],
"type": "phrase"
}
},
{
"multi_match": {
"query": "*New-Item*",
"fields": [],
"type": "phrase"
}
}
]
}
},
{
"multi_match": {
"query": "*CurrentVersion\\Winlogon*",
"fields": [],
"type": "phrase"
}
}
]
}
}
}
}
}
res = es.search(index="logs-endpoint-winevent-*",body=doc)
count = res['hits']['total']['value']
tactic = "Persistence"
technique = "Winlogon Helper DLL"
procedure = "Winlogon Userinit Key Persistence- PowerShell"
tech_code = "T1004"
action ={
"Tactic": tactic,
"Technique": technique,
"Tech_code": tech_code,
"Procedure": procedure,
"EventCount": count,
}
es.index(index="represent_5",body = action, id = 117)
|
[
"33771109+parahaoer@users.noreply.github.com"
] |
33771109+parahaoer@users.noreply.github.com
|
a91769c383e39d1b234b1d9b68cefbe2d29c1618
|
143bccd5ca0aefed912bdeefe353a6a3382ba55e
|
/Unideadline/settings.py
|
e9eb022be01b2b4e37f51a49931d0fc16c150068
|
[
"MIT"
] |
permissive
|
marvinguelzow/Unideadline
|
aacc049999d8c71f04fc682e30fd4d0ae2087ffa
|
4abbb1109cb9cc0f7cbbcf744683c7ab486e716a
|
refs/heads/master
| 2021-05-28T05:34:30.146237
| 2015-03-13T18:54:01
| 2015-03-13T18:54:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,073
|
py
|
"""
Django settings for Unideadline project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8%tbsd2g-ls_ok&55jk3s3lvsuvff@6mrgsy*)ptg0z-*frz%a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'todolist',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'Unideadline.urls'
WSGI_APPLICATION = 'Unideadline.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
|
[
"marvinguelzow@googlemail.com"
] |
marvinguelzow@googlemail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.