blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e89ba4a21bdbcf548d0de67eaa71958580fdd3d4
|
1a623ec3cc9319bb1b3d2204b2163730c180ab2f
|
/PyDSS/reports.py
|
48f88e2f86a8b5ae8b249190e581680dd99fb696
|
[
"BSD-3-Clause"
] |
permissive
|
ann-sherin/PyDSS
|
aa5713191b274a867f708e19a08fb024e8962df7
|
2cc245daa9639d3c7d91b9980b0879525f719268
|
refs/heads/master
| 2023-07-08T15:27:03.866534
| 2021-06-02T23:20:26
| 2021-06-02T23:20:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,002
|
py
|
"""Creates reports on data exported by PyDSS"""
import abc
import logging
import os
from PyDSS.exceptions import InvalidConfiguration, InvalidParameter
from PyDSS.utils.dataframe_utils import write_dataframe
from PyDSS.utils.utils import dump_data
REPORTS_DIR = "Reports"
logger = logging.getLogger(__name__)
class Reports:
"""Generate reports from a PyDSS project"""
def __init__(self, results):
self._results = results
self._report_names = []
self._report_options = results.simulation_config["Reports"]
for report in self._report_options["Types"]:
if report["enabled"]:
self._report_names.append(report["name"])
self._output_dir = os.path.join(results.project_path, REPORTS_DIR)
os.makedirs(self._output_dir, exist_ok=True)
@staticmethod
def append_required_exports(exports, options):
"""Append export properties required by the configured reports.
Parameters
----------
exports : ExportListReader
options : dict
Simulation options
"""
report_options = options.get("Reports")
if report_options is None:
return
for report in report_options["Types"]:
if not report["enabled"]:
continue
name = report["name"]
if name not in REPORTS:
raise InvalidConfiguration(f"{name} is not a valid report")
required = REPORTS[name].get_required_reports()
for elem_class, required_properties in required.items():
for req_prop in required_properties:
found = False
store_type = req_prop["store_values_type"]
for prop in exports.list_element_properties(elem_class):
if prop.name == req_prop["property"] and \
prop.store_values_type.value == store_type:
found = True
break
if not found:
exports.append_property(elem_class, req_prop)
logger.debug("Add required property: %s %s", elem_class, req_prop)
@classmethod
def generate_reports(cls, results):
"""Generate all reports specified in the configuration.
Parameters
----------
results : PyDssResults
Returns
-------
list
list of report filenames
"""
reports = Reports(results)
return reports.generate()
def generate(self):
"""Generate all reports specified in the configuration.
Returns
-------
list
list of report filenames
"""
filenames = []
for name in self._report_names:
report = REPORTS[name](self._results, self._report_options)
filename = report.generate(self._output_dir)
filenames.append(filename)
return filenames
class ReportBase(abc.ABC):
"""Base class for reports"""
def __init__(self, results, report_options):
self._results = results
self._report_options = report_options
@abc.abstractmethod
def generate(self, output_dir):
"""Generate a report in output_dir.
Returns
-------
str
path to report
"""
@staticmethod
@abc.abstractmethod
def get_required_reports():
"""Return the properties required for the report for export.
Returns
-------
dict
"""
class PvClippingReport(ReportBase):
"""Reports PV Clipping for the simulation."""
FILENAME = "pv_clipping.json"
def __init__(self, results, report_options):
super().__init__(results, report_options)
assert len(results.scenarios) == 2
self._pf1_scenario = results.scenarios[0]
self._control_mode_scenario = results.scenarios[1]
self._pv_system_names = self._control_mode_scenario.list_element_names("PVSystems")
self._pf1_pv_systems = {
x["name"]: x for x in self._pf1_scenario.read_pv_profiles()["pv_systems"]
}
self._control_mode_pv_systems = {
x["name"]: x for x in self._control_mode_scenario.read_pv_profiles()["pv_systems"]
}
def _get_pv_system_info(self, pv_system, scenario):
if scenario == "pf1":
pv_systems = self._pf1_pv_systems
else:
pv_systems = self._control_mode_pv_systems
return pv_systems[pv_system]
def calculate_pv_clipping(self, pv_system):
"""Calculate PV clipping for one PV system.
Returns
-------
int
"""
cm_info = self._get_pv_system_info(pv_system, "control_mode")
pmpp = cm_info["pmpp"]
irradiance = cm_info["irradiance"]
total_irradiance = cm_info["load_shape_pmult_sum"]
annual_dc_power = pmpp * irradiance * total_irradiance
pf1_real_power = self._pf1_scenario.get_dataframe(
"PVSystems", "Powers", pv_system, real_only=True
)
annual_pf1_real_power = sum([abs(x) for x in pf1_real_power.sum()])
clipping = annual_dc_power - annual_pf1_real_power
logger.debug("PV clipping for %s = %s", pv_system, clipping)
return clipping
def generate(self, output_dir):
data = {"pv_systems": []}
for name in self._pv_system_names:
clipping = {
"name": name,
"pv_clipping": self.calculate_pv_clipping(name),
}
data["pv_systems"].append(clipping)
filename = os.path.join(output_dir, self.FILENAME)
dump_data(data, filename, indent=2)
logger.info("Generated PV Clipping report %s", filename)
return filename
@staticmethod
def get_required_reports():
return {
"PVSystems": [
{
"property": "Powers",
"store_values_type": "all",
}
]
}
class PvCurtailmentReport(ReportBase):
"""Reports PV Curtailment at every time point in the simulation."""
FILENAME = "pv_curtailment"
def __init__(self, results, report_options):
super().__init__(results, report_options)
assert len(results.scenarios) == 2
self._pf1_scenario = results.scenarios[0]
self._control_mode_scenario = results.scenarios[1]
self._pv_system_names = self._control_mode_scenario.list_element_names("PVSystems")
self._pf1_pv_systems = {
x["name"]: x for x in self._pf1_scenario.read_pv_profiles()["pv_systems"]
}
self._control_mode_pv_systems = {
x["name"]: x for x in self._control_mode_scenario.read_pv_profiles()["pv_systems"]
}
def _get_pv_system_info(self, pv_system, scenario):
if scenario == "pf1":
pv_systems = self._pf1_pv_systems
else:
pv_systems = self._control_mode_pv_systems
return pv_systems[pv_system]
def generate(self, output_dir):
df = self.calculate_pv_curtailment()
filename = os.path.join(
output_dir,
self.FILENAME
) + "." + self._report_options["Format"]
write_dataframe(df, filename, compress=True)
logger.info("Generated PV Clipping report %s", filename)
return filename
@staticmethod
def get_required_reports():
return {
"PVSystems": [
{
"property": "Powers",
"store_values_type": "all",
}
]
}
def calculate_pv_curtailment(self):
"""Calculate PV curtailment for all PV systems.
Returns
-------
pd.DataFrame
"""
pf1_power = self._pf1_scenario.get_full_dataframe(
"PVSystems", "Powers", real_only=True
)
control_mode_power = self._control_mode_scenario.get_full_dataframe(
"PVSystems", "Powers", real_only=True
)
# TODO: needs work
return (pf1_power - control_mode_power) / pf1_power * 100
class CapacitorStateChangeReport(ReportBase):
"""Reports the state changes per Capacitor."""
FILENAME = "capacitor_state_changes.json"
def generate(self, output_dir):
data = {"scenarios": []}
for scenario in self._results.scenarios:
scenario_data = {"name": scenario.name, "capacitors": []}
for capacitor in scenario.list_element_names("Capacitors"):
try:
change_count = int(scenario.get_element_property_number(
"Capacitors", "TrackStateChanges", capacitor
))
except InvalidParameter:
change_count = 0
changes = {"name": capacitor, "change_count": change_count}
scenario_data["capacitors"].append(changes)
data["scenarios"].append(scenario_data)
filename = os.path.join(output_dir, self.FILENAME)
dump_data(data, filename, indent=2)
logger.info("Generated %s", filename)
return filename
@staticmethod
def get_required_reports():
return {
"Capacitors": [
{
"property": "TrackStateChanges",
"store_values_type": "change_count",
}
]
}
class RegControlTapNumberChangeReport(ReportBase):
"""Reports the tap number changes per RegControl."""
FILENAME = "reg_control_tap_number_changes.json"
def generate(self, output_dir):
data = {"scenarios": []}
for scenario in self._results.scenarios:
scenario_data = {"name": scenario.name, "reg_controls": []}
for reg_control in scenario.list_element_names("RegControls"):
change_count = int(scenario.get_element_property_number(
"RegControls", "TrackTapNumberChanges", reg_control
))
changes = {"name": reg_control, "change_count": change_count}
scenario_data["reg_controls"].append(changes)
data["scenarios"].append(scenario_data)
filename = os.path.join(output_dir, self.FILENAME)
dump_data(data, filename, indent=2)
logger.info("Generated %s", filename)
return filename
@staticmethod
def get_required_reports():
return {
"RegControls": [
{
"property": "TrackTapNumberChanges",
"store_values_type": "change_count",
}
]
}
REPORTS = {
"PV Clipping": PvClippingReport,
"PV Curtailment": PvCurtailmentReport,
"Capacitor State Change Counts": CapacitorStateChangeReport,
"RegControl Tap Number Change Counts": RegControlTapNumberChangeReport,
}
|
[
"daniel.thom@nrel.gov"
] |
daniel.thom@nrel.gov
|
4c06b68ae3162adb63425cc3f9e85961b0a97746
|
113e4f837612592b16f658513b7e886206e6f0ba
|
/Chapter3/26.py
|
aac92416791830a0dc0bb10589a234afe56013b3
|
[] |
no_license
|
Akai-Kumako/100knock
|
95027cee8990bb1cec79e086dd74a166097d4307
|
81308ee1c32c4218c55efee40995a9763f65bbd4
|
refs/heads/master
| 2021-05-15T09:10:08.313556
| 2018-04-18T09:04:42
| 2018-04-18T09:04:42
| 107,860,206
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 504
|
py
|
#26.強調マークアップの除去
import json
import re
with open("jawiki-country.json", "r") as f:
for i in f:
a = json.loads(i)
if a.get("title") == "イギリス":
b = a.get("text").replace("<br/>\n", "").split("\n")
info = {}
regex = re.compile(u"^\|(.*?)\s*=\s*(.*?)$")
emph = re.compile("'{2,5}")
for j in b:
c = regex.search(j)
if c != None: info[c.group(1)] = emph.sub("", c.group(2))
if j == "}}": break
for k, v in info.items():
print("{0}: {1}".format(k, v))
|
[
"e1433@s.akashi.ac.jp"
] |
e1433@s.akashi.ac.jp
|
c189f11c67e177b0831501cb13638f53957f2ef2
|
9eddcc9a7d1743d6d032627d496fda99bf0f4852
|
/函数编程/三元运算.py
|
242872554054fcf7aa5e5ba5b8ff09e6c9b1a5e3
|
[] |
no_license
|
zzylydx/source
|
9011423d11eb2b84b512d4e28496a4fb03db01c6
|
0a6f05dee3e1f6230f62b14a2f8105e991af7738
|
refs/heads/master
| 2022-03-13T11:06:48.575143
| 2019-10-30T07:25:25
| 2019-10-30T07:25:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 375
|
py
|
#!/usr/bin/env python3
# coding utf-8
'''
三元运算
三元运算又成三目运算,是对简单的条件语句的简写。如
简单条件语句:
if 条件成立:
val = 1
else:
val = 2
改为三元运算
val = 1 if 条件成立 else 2
'''
#
# if 0 ==1:
# print('ok')
# else:
# print('no')
val = 1 if 0 else 2
print(val)
|
[
"kevinlandun@gmail.com"
] |
kevinlandun@gmail.com
|
445353aa38922b97f88dd2d3de88276532a44b3b
|
2f4c51d4d153a7409559b86edbea8db7244fc115
|
/GTM.py
|
3a9cd8bbf16f3a8c13d0cae14f6f78c412f7cb7b
|
[] |
no_license
|
algator/Python
|
88b0a4bec33a8eacfa25c9dcbf141851dacedf05
|
21fbda53d76abc6f7d2f35d2b59f52375c0243d4
|
refs/heads/master
| 2020-05-31T03:26:55.793504
| 2019-01-01T16:36:58
| 2019-01-01T16:36:58
| 37,400,708
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,924
|
py
|
# "Guess the number" mini-project
# http://www.codeskulptor.org/#user40_pnN71lvBlH_3.py
# import libraries
import math
import random
import simplegui
# helper function to start and restart the game
def new_game():
# initialize global variables used in your code here
global guess
global secret_number
global max_guess
global guess_inc
max_guess = 7
guess = 0
guess_inc = 0
secret_number = random.randrange(0, 100)
# define event handlers for control panel
# button that changes the range to [0,100) and starts a new game
def range100():
global secret_number
global max_guess
new_game()
max_guess = 7
secret_number = random.randrange(0, 100)
# button that changes the range to [0,1000) and starts a new game
def range1000():
global secret_number
global max_guess
new_game()
max_guess = 10
secret_number = random.randrange(0, 1000)
def input_guess(g):
global guess
global secret_number
global max_guess
global guess_inc
int_guess = int(g)
print "Guess was", int_guess
# main game logic
if int_guess < secret_number:
print "Higher"
elif int_guess > secret_number:
print "Lower"
elif int_guess == secret_number:
print "Correct"
guess_inc = guess_inc + 1
print "Have guessed", guess_inc,"time(s)."
if guess_inc >= max_guess:
print "Sorry. Out of guesses."
new_game()
# create frame
f = simplegui.create_frame("Guess the number",300,300)
# register event handlers for control elements and start frame
f.add_button("Start new game",new_game,200)
f.add_button("Range 0 to 100",range100,200)
f.add_button("Range 0 to 1000",range1000,200)
f.add_input("Input guess",input_guess,200)
# get frame rolling
f.start()
# call new_game
new_game()
|
[
"noreply@github.com"
] |
algator.noreply@github.com
|
c773ffd4894eaa9dd7f0f650b0dd3984c7006fba
|
d1d94aee03d5b3217c8482bffba11afa1de7f5fe
|
/Username Check.py
|
6443a30f132355ac64f5df1ea1ef652e8a525bf6
|
[] |
no_license
|
Cpasgrave/Python-Challenges
|
990384b81b992377ac7142d19ce9f83a4a198a2f
|
7f0feb563fe427c4f93fd2d4b253478cec53debb
|
refs/heads/master
| 2020-03-23T02:36:56.783571
| 2018-09-05T09:32:04
| 2018-09-05T09:32:04
| 140,982,674
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,134
|
py
|
# -*- coding: utf-8 -*-
# Answer to CHALLENGE (description after the code)
# Proposed by George Victor
user1 = "Goubi3#"
user2 = "troubi#4"
user3 = "Fou fobi@5"
user4 = "Bi4*"
users = [user1, user2, user3, user4]
def check(user):
if 4 < len(user) < 11 and any(c.isupper() for c in user) and any(c.isdigit() for c in user) and any(c in "@#*=" for c in user) and all(c != " " for c in user):
return "PASS"
else:
return "FAIL"
n = 0
for u in users:
n += 1
print("user", u, ":", check(u))
# Sam wants to select a username to register on a website. The rules for selecting username:
# 1. The minimum length of the username must be 5 characters and the maximum may be 10.
# 2. It should contain at least one letter from A-Z
# 3. It should have at least one digit from 0-9
# 4. It should have at least one character from amongst @#*=
# 5. It should not have any spaces
# Write a program which accepts 4 usernames (one username per line)
# as input and checks whether each of them satisfy the above mentioned conditions.
# If it does, the program should print PASS (in uppercase) else print FAIL
|
[
"noreply@github.com"
] |
Cpasgrave.noreply@github.com
|
bd38c971346bda534845f0fffebce8903ed7ea77
|
5bca2d5a7615d2783fae7a7569d57a9a9eb3d604
|
/modules/store/domain/exceptions/product_exceptions.py
|
d82245c0b61c5e6b72da88f9cb469dc95b185e7d
|
[] |
no_license
|
eduardolujan/product_hub
|
9ff3fbf11b4703993c1efb2a6202ed3b1c446cda
|
0bfe0059ab0d59243794b03f70ceffe3a1a263be
|
refs/heads/main
| 2023-03-01T15:58:05.014636
| 2021-02-03T03:04:31
| 2021-02-03T03:04:31
| 330,279,522
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
# -*- coding: utf-8 -*-
from .http_error import HttpError
from modules.shared.infrastructure.http import status as http_status
class ProductNotExist(HttpError):
"""
Product not exists
"""
def __init__(self,
message: str,
http_status=http_status.HTTP_404_NOT_FOUND):
"""
Constructor product not exists
@param message: message
@type message: str
@param http_status: http status
@type http_status: int
"""
super(ProductNotExist, self).__init__(message, http_status=http_status)
class ProductAlreadyExists(HttpError):
"""
Product already exists
"""
def __init__(self,
message: str,
http_status=http_status.HTTP_409_CONFLICT):
"""
Constructor product already exists
@param message: message
@type message: str
@param http_status: http status
@type http_status: int
"""
super(ProductAlreadyExists, self).__init__(message, http_status=http_status)
|
[
"eduardo.lujan.p@gmail.com"
] |
eduardo.lujan.p@gmail.com
|
861fb2d23738f620fb3a8f6581f425a5cf4b3ea4
|
22c62c9422570db36c95162ffa4b04c277967e55
|
/data/overwatch_scrape.py
|
e74062f3a2b4115876e39bca090fff1077829f57
|
[] |
no_license
|
solejar/SoCharM
|
c90610562ca66a53464749d5c295f5b678297d2b
|
418ad0f1f68f57994d8e5f30a8f51a3ee1044098
|
refs/heads/master
| 2021-04-29T15:21:01.100789
| 2018-02-23T18:16:39
| 2018-02-23T18:16:39
| 121,796,367
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 159
|
py
|
#!C:/Python27/python
import urllib2
import BeautifulSoup
wiki = "https://overwatch.gamepedia.com/Ana"
page = urllib2.urlopen(wiki)
soup = BeautifulSoup(page)
|
[
"solejar236@gmail.com"
] |
solejar236@gmail.com
|
bb9db42c363b3da060934859a493a98d2fa3c1d2
|
505a5884fc67f98ef55f245947e4445f95f74127
|
/app/project/urls.py
|
a846c6cd3b7a24f3f936395acf687ed448dc2e36
|
[] |
no_license
|
palmbeach-interactive/django-admin-kitchen-sink
|
ff711b3164a16807150dc6083cc279cbf4a79439
|
1136d4a2df1ca395a27786cfecc9108da8b20375
|
refs/heads/master
| 2022-08-30T19:00:51.980997
| 2017-03-02T13:05:00
| 2017-03-02T13:05:00
| 75,930,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,031
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from cms.sitemaps import CMSSitemap
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
admin.autodiscover()
urlpatterns = [
url(r'^sitemap\.xml$', 'django.contrib.sitemaps.views.sitemap',
{'sitemaps': {'cmspages': CMSSitemap}}),
url(r'^select2/', include('django_select2.urls')),
]
urlpatterns += i18n_patterns('',
url(r'^admin_tools/', include('admin_tools.urls')),
url(r'^admin/', include(admin.site.urls)), # NOQA
url(r'^', include('cms.urls')),
)
# This is only needed when using runserver.
if settings.DEBUG:
urlpatterns = [
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
] + staticfiles_urlpatterns() + urlpatterns
|
[
"ohrstrom@gmail.com"
] |
ohrstrom@gmail.com
|
a902738fdbaa00a0b3d311c6177b51cdf99ac8d8
|
be46fdadaccc3c06a4704dee26c2efc11a77b2d0
|
/api/migrations/0009_auto_20200415_0856.py
|
381a7359e3bbb4e462d8a9987c6d32589d96d539
|
[] |
no_license
|
nforesperance/wiagate_backend
|
52a542a8c3ff20800fe6c363dd0d9192d2f353e9
|
e52155c766884db61e04906c8a27a62b463d8e90
|
refs/heads/master
| 2023-08-02T11:54:39.504829
| 2020-07-16T15:07:22
| 2020-07-16T15:07:22
| 280,182,472
| 0
| 0
| null | 2021-09-22T19:27:03
| 2020-07-16T14:57:19
|
Python
|
UTF-8
|
Python
| false
| false
| 936
|
py
|
# Generated by Django 3.0.5 on 2020-04-15 07:56
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('api', '0008_exam'),
]
operations = [
migrations.AddField(
model_name='quiz',
name='end_date',
field=models.DateTimeField(blank=True, default=django.utils.timezone.now),
),
migrations.AddField(
model_name='quiz',
name='start_date',
field=models.DateTimeField(blank=True, default=django.utils.timezone.now),
),
migrations.AddField(
model_name='quiz',
name='time_minutes',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='quiz',
name='use_date',
field=models.BooleanField(default=False),
),
]
|
[
"nforesperance1@gmail.com"
] |
nforesperance1@gmail.com
|
eb31174e56fd716e1d91766d6d4f1a0062df364e
|
d43d06d6a39f4d841ee75867edd5f52e25b1468f
|
/manage.py
|
51229c4ae8963b21a845eafee49114e449911222
|
[] |
no_license
|
monteskier/ArxiuDjango
|
8f09ff8c61610e76c73b4b57e3358f644027cb4b
|
b49dd9e3a9e619d0c3d1b4c972c91c9b80cc9fa5
|
refs/heads/master
| 2021-09-01T21:14:21.200420
| 2017-12-28T16:40:41
| 2017-12-28T16:40:41
| 115,601,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ArxiuDjango.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"jmontoliu7@gmail.com"
] |
jmontoliu7@gmail.com
|
07e8019ac9555197e8cff4f5fabe20dbc5a62c73
|
b9245d71d3c124110e282145fb54f3a220f4e8d9
|
/main.py
|
9fc8dee235de320f4fcc0ecfd83232e2198cdfa0
|
[] |
no_license
|
KevinYuimin/SemiStarGAN
|
c97f237756a732e6ec6abaf011848aa6eedbca45
|
1e860668473662936e3c39a53d21ecd02ef59748
|
refs/heads/master
| 2020-04-02T04:03:06.852265
| 2019-05-12T06:40:39
| 2019-05-12T06:40:39
| 153,997,264
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,672
|
py
|
import argparse
import os
import tensorflow as tf
from model import semi_stargan
# argument parser
parser = argparse.ArgumentParser(description='')
parser.add_argument('--phase', type=str, default='train')
parser.add_argument('--dataset', type=str, default='celebA')
parser.add_argument('--data_dir', type=str, default=os.path.join('.','data','celebA'))
parser.add_argument('--log_dir', type=str, default='log')
parser.add_argument('--ckpt_dir', type=str, default='checkpoint')
parser.add_argument('--sample_dir', type=str, default='sample')
parser.add_argument('--test_dir', type=str, default='test')
parser.add_argument('--epoch', type=int, default=20)
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--image_size', type=int, default=128)
parser.add_argument('--image_channel', type=int, default=3)
# number of filters
parser.add_argument('--nf', type=int, default=64)
parser.add_argument('--n_label', type=int, default=6)
parser.add_argument('--lambda_gp', type=int, default=10)
parser.add_argument('--lambda_cls', type=float, default=1)
parser.add_argument('--lambda_rec', type=int, default=10)
parser.add_argument('--lambda_id', type=float, default=1)
parser.add_argument('--lambda_adv', type=int, default=1)
parser.add_argument('--lambda_ucls', type=int, default=1)
# learning_rate
parser.add_argument('--lr', type=float, default=0.0001)
parser.add_argument('--beta1', type=float, default=0.5)
parser.add_argument('--continue_train', type=bool, default=False)
# number of iterations to save files
parser.add_argument('--snapshot', type=int, default=500)
# number of iterations to test auxiliary classifier accuracy
parser.add_argument('--snapshot_test', type=int, default=5000)
parser.add_argument('--binary_attrs', type=str, default='100')
parser.add_argument('--d_steps', type=int, default=5)
parser.add_argument('--c_method', type=str, default='Sigmoid')
args = parser.parse_args()
def main(_):
assets_dir = os.path.join(
'.','assets','label{}_img{}_{}'.format(
args.n_label, args.image_size, args.dataset))
args.log_dir = os.path.join(assets_dir, args.log_dir)
args.ckpt_dir = os.path.join(assets_dir, args.ckpt_dir)
args.sample_dir = os.path.join(assets_dir, args.sample_dir)
args.test_dir = os.path.join(assets_dir, args.test_dir)
if args.n_label == 3:
args.attr_keys = ['Black_Hair','Blond_Hair','Brown_Hair']
else:
args.attr_keys = ['Black_Hair','Blond_Hair','Brown_Hair', 'Male', 'Young']
# make directory if not exist
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
if not os.path.exists(args.ckpt_dir):
os.makedirs(args.ckpt_dir)
if not os.path.exists(args.sample_dir):
os.makedirs(args.sample_dir)
if not os.path.exists(args.test_dir):
os.makedirs(args.test_dir)
tfconfig = tf.ConfigProto()
tfconfig.gpu_options.allow_growth = True
with tf.Session(config=tfconfig) as sess:
model = semi_stargan(sess,args)
if args.phase == 'train':
model.train()
elif args.phase == 'test':
model.test()
elif args.phase == 'test_all':
model.test_all()
elif args.phase == 'aux_test':
model.test_aux_accuracy()
else:
raise ValueError(
"Phase {} does not exist".format(args.phase))
# run main function
if __name__ == '__main__':
tf.app.run()
|
[
"space83725@gmail.com"
] |
space83725@gmail.com
|
191b438f2c74a540c7414317704b4433f0ba0aea
|
4b4dec45e98431fa067fc081408cc43f7c8c1a81
|
/tests/unit/test_natural_query_field.py
|
493e4de8d67076566c9e68e5e587a7cd8ee2333b
|
[
"BSD-3-Clause"
] |
permissive
|
djangosporti/django-natural-query
|
38cc09b1522d5cfe8a5f55c5700ecdc003e5633e
|
e169f088b07d2aab4998d964abf1f44f0a5e22ff
|
refs/heads/master
| 2021-01-12T15:40:59.185042
| 2015-03-23T13:36:30
| 2015-03-23T13:36:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 895
|
py
|
from django.test import SimpleTestCase
from natural_query.fields import NaturalQueryField, NaturalQueryField
from natural_query.query import NaturalQueryDescriptor
from tests.common.support.models import TestModel
# class NaturalQueryFieldTestCase(SimpleTestCase):
# def test_a_query_descriptor_is_added_to_the_model(self):
# sut = NaturalQueryField(name='foo')
#
# del TestModel.foo
#
# sut.contribute_to_class(TestModel, 'foo')
#
# try:
# _ = TestModel.foo
# except AttributeError:
# self.fail('TestModel has no attribute named foo')
#
# def test_a_query_descriptor_is_not_added_to_the_model_when_a_class_attribute_already_exists(self):
# sut = NaturalQueryField(name='clean')
#
# sut.contribute_to_class(TestModel, 'clean')
#
# self.assertNotIsInstance(TestModel.clean, NaturalQueryDescriptor)
|
[
"omer.drow@gmail.com"
] |
omer.drow@gmail.com
|
6d288d510a9baa6010315e3c0fce5153ddcc9e22
|
289bf24b7a995977dbc13a6dc1fa99bdccf928ab
|
/seaweed/__main__.py
|
1cee1c49bdfa86084dfc153714987e74a4ea33ed
|
[
"MIT"
] |
permissive
|
3ng7n33r/seaweed-cli
|
0abadb457a4c26ea60217ebb816cadcdfaa7457d
|
d91f255b30f714f481eaf8e26512ba42422770be
|
refs/heads/master
| 2021-09-20T17:35:25.819160
| 2018-08-13T15:29:16
| 2018-08-13T15:29:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,105
|
py
|
'''
This is the entry point for the CLI
'''
import argparse
from .surfline import search, spotSuggestion, createForecast
def main():
args = parseArgs()
getForecast(args.spot, args.timeframe)
def parseArgs():
# Parse args passed to the cli
parser = argparse.ArgumentParser(description="Get a surf forecast! Enter when and where you want to surf.")
parser.add_argument('timeframe', help='when do you want to go surfing - today, tomorrow, weekend or forecast')
parser.add_argument('spot', help='where do you want to surf')
return parser.parse_args()
def getForecast(spot, forecastType):
searchSpots = search(spot)
hits = searchSpots[0]["hits"]["hits"]
if len(hits) == 1:
# Pull out the spot id for the single hit
spotId = hits[0]["_id"]
createForecast(spotId, forecastType)
elif len(hits) > 1:
# The user has received multiple spot suggestions, they'll need to narrow down the spot they want
spotSuggestion(searchSpots)
else:
print("Couldn't find that spot")
if __name__ == '__main__':
main()
|
[
"cooper.samuel@outlook.com"
] |
cooper.samuel@outlook.com
|
610819fb82fa07e59f4289726928b9b196c69dc8
|
6b83bb51b89bfa9eb7f3ad501292b7ddec8693ef
|
/ex21.py
|
b4f80813eb4b538cad908006d84e4192ac212165
|
[] |
no_license
|
KeoneShyGuy/lpthw
|
0d75a792699fcd726e14c20a545fae11b9ca0620
|
32f297a1bcd2d974535bef8196d6803d2415ea0d
|
refs/heads/master
| 2020-04-06T07:05:03.372583
| 2016-09-19T04:39:14
| 2016-09-19T04:39:14
| 65,874,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 753
|
py
|
def add(a, b):
print "ADDING {} + {}".format(a, b)
return a + b
def subtract(a, b):
print "SUBTRACTING {} - {}".format(a, b)
return a - b
def multiply(a, b):
print "MULTIPLYING {} 8 {}".format(a, b)
return a * b
def divide(a, b):
print "DIVIDING {} / {}".format(a, b)
return a / b
print "Let's do some math with just functions!"
age = add(20, 7)
height = subtract(72, 6)
weight = multiply(20, 8)
iq = divide(240, 2.1)
print "Age: {}, Height: {}, Weight: {}, IQ: {}".format(age, height, weight, iq)
# A puzzle for the extra credit, type it in anyway
print "Here is a puzzle."
what = add(age, subtract(height, multiply(weight, divide(iq, 2))))
print "That becomes: {}. Can you do it by hand?".format(what)
|
[
"noreply@github.com"
] |
KeoneShyGuy.noreply@github.com
|
f8d547c326d934f12534d8bd88fa02e475028d11
|
872f757843aa9a2c0d35d91ee6f6f45e5924be51
|
/fizzbuzz/teste.py
|
1681a1818dd28b170dc74801e2b0fbeb3dc5a492
|
[] |
no_license
|
marcusgabrields/unicap-dojo
|
4ab37bfb8bd9e9ece2931d58d8c7472fd92a2f07
|
5bcdc0d2866395ee4b8232c8c52864be533672bc
|
refs/heads/master
| 2020-05-17T15:13:06.552318
| 2019-05-14T02:45:33
| 2019-05-14T02:45:33
| 183,784,132
| 1
| 1
| null | 2019-04-27T21:10:22
| 2019-04-27T14:36:12
|
Python
|
UTF-8
|
Python
| false
| false
| 993
|
py
|
import unittest
from fizzbuzz import fizzBuzz
class FizzBuzz(unittest.TestCase):
def test_numero_quise(self):
resultado = fizzBuzz(15)
self.assertEqual('FizzBuzz', resultado)
def test_numero_zero(self):
resultado = fizzBuzz(0)
self.assertEqual('FizzBuzz', resultado)
def test_numero_tres(self):
resultado = fizzBuzz(3)
self.assertEqual('Fizz', resultado)
def test_numero_cinco(self):
resultado = fizzBuzz(5)
self.assertEqual('Buzz', resultado)
def test_numero_seis(self):
resultado = fizzBuzz(6)
self.assertEqual('Fizz', resultado)
def test_numero_50(self):
resultado = fizzBuzz(50)
self.assertEqual('Buzz', resultado)
def test_numero_30(self):
resultado = fizzBuzz(30)
self.assertEqual('FizzBuzz', resultado)
def test_numero_7(self):
resultado = fizzBuzz(7)
self.assertEqual('7', resultado)
|
[
"marcusgabriel.ds@gmail.com"
] |
marcusgabriel.ds@gmail.com
|
ee09381c501c031c732f3ebfe284b99212a31e43
|
536cc168ce47cc0310087c02c57fd9b23ea972cc
|
/data_structures/trees/insert_node_binary_tree.py
|
f4786dfb93ae028dd829fde80a25d3d91290639b
|
[] |
no_license
|
asingh21/python
|
69a1763c04850aca7ccaf09ce7e006de9dea9734
|
90634abcdfb7dd0add46723ff15e5ef693cee8ce
|
refs/heads/master
| 2022-06-26T06:15:44.188147
| 2022-06-05T19:05:01
| 2022-06-05T19:05:01
| 128,602,621
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,191
|
py
|
import Queue
class BinaryTreeNode:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def insert_node_binary_tree(root, insert_data):
if not root:
return
q = Queue.Queue()
q.put(root)
while not q.empty():
node = q.get()
if node.left:
q.put(node.left)
elif node.right:
q.put(node.left)
else:
break
temp = BinaryTreeNode(insert_data)
if not node.left:
node.left = temp
else:
node.right = temp
def inorder_traversal_recursive(root, result=None):
if not root:
return
if result is None:
result = []
inorder_traversal_recursive(root.left, result)
result.append(root.data)
inorder_traversal_recursive(root.right, result)
return result
root = BinaryTreeNode(1)
root.left = BinaryTreeNode(2)
root.right = BinaryTreeNode(3)
root.left.left = BinaryTreeNode(4)
root.left.right = BinaryTreeNode(5)
root.right.left = BinaryTreeNode(6)
root.right.right = BinaryTreeNode(7)
print inorder_traversal_recursive(root)
insert_node_binary_tree(root, 8)
print inorder_traversal_recursive(root)
|
[
"asingh21@ncsu.edu"
] |
asingh21@ncsu.edu
|
9f060f448d79ec552a47c2882fe9b7784042e56b
|
a9d197b21b73b25b8b87d2711fa53531ff40e155
|
/15 Files/binary.py
|
47092a61eda9e33a8eed824726fc1cb9b54e96ed
|
[] |
no_license
|
CodedQuen/Python-essential-reference
|
460109acc901b687d9d4071331df488612e4f452
|
c2c222b6616d7ebe78bec3f0a90598582b8f6bf3
|
refs/heads/master
| 2022-06-15T12:19:34.475686
| 2020-05-04T08:29:25
| 2020-05-04T08:29:25
| 261,105,920
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 655
|
py
|
def main():
buffer_size = 50000
infile = open('olives.jpg', 'rb') # Python default is to use utf8, but this is binary, so we rb, read as binary
outfile = open('new.jpeg', 'wb')
buffer = infile.read(buffer_size) # Buffer is a binary object not a text object; so, buffer is a must
while len(buffer):
outfile.write(buffer)
print('.', end='')
buffer = infile.read(buffer_size) # Read the next buffer
infile.close() # I added this
outfile.close() # I added this
print()
print('Done.')
if __name__ == "__main__": main()
# read binary file example
handle = open("test.pdf", "rb")
|
[
"noreply@github.com"
] |
CodedQuen.noreply@github.com
|
f24d5094a90cf5df6749cb72a050e4e21a507a45
|
f62ff90d7850af458d8f12386fc9ee9134dbe7c1
|
/Plots/Paperplots/Model_11/Current_Voltage_Curves.py
|
034fdb756dbe154469192f36b3769056b04eb971
|
[] |
no_license
|
AlexSchmid22191/EIS_R_Sim
|
51b431f078cb455fc38637c192436c0523449565
|
851b061e60811e1e58a5b2fd4e393e529c3f86ac
|
refs/heads/master
| 2023-06-27T17:40:59.177270
| 2021-07-22T11:50:27
| 2021-07-22T11:50:27
| 380,768,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,611
|
py
|
from matplotlib.pyplot import subplots, show
from matplotlib.style import use
from numpy import load, log10
from Equations import e, k, T
from Semilog_Slope import semilog_slope
from matplotlib.ticker import LogLocator, NullFormatter
use('../Paper.mplstyle')
data = load('../../../Currents_Resistances_Model_11/Current_Data_Model_11.npy')
# ----------------------------------------------------------------------------------------------------------------------
fig_abs_uh, ax_abs_uh = subplots()
fig_abs_hi, ax_abs_hi = subplots()
fig_abs_me, ax_abs_me = subplots()
fig_abs_lo, ax_abs_lo = subplots()
# Highest oxygen partial pressures
for i in (2300, 2200, 2100, 2000, 1900):
ax_abs_uh.plot(data['overpotential'][1::25, i], abs(data['current'][1::25, i]), linestyle='-',
label='$10^{%d}$ bar' % log10(data['pressure'][1, i]))
# High oxygen partial pressures
for i in (1800, 1700, 1600, 1500, 1400):
ax_abs_hi.plot(data['overpotential'][1::25, i], abs(data['current'][1::25, i]), linestyle='-',
label='$10^{%d}$ bar' % log10(data['pressure'][1, i]))
# Medium oxygen partial pressures
for i in (1300, 1200, 1100, 1000, 900):
ax_abs_me.plot(data['overpotential'][1::25, i], abs(data['current'][1::25, i]), linestyle='-',
label='$10^{%d}$ bar' % log10(data['pressure'][1, i]))
# Low oxygen partial pressures
for i in (800, 700, 600, 500, 400):
ax_abs_lo.plot(data['overpotential'][1::25, i], abs(data['current'][1::25, i]), linestyle='-',
label='$10^{%d}$ bar' % log10(data['pressure'][1, i]))
semilog_slope(origin=(-0.05, 1e1), slope=-2*e/k/T, ax=ax_abs_uh, text=r'$\frac{2e}{kT}$', size=12, inverted=False)
semilog_slope(origin=(-0.025, 2e1), slope=-2*e/k/T, ax=ax_abs_hi, text=r'$\frac{2e}{kT}$', size=12, inverted=False)
semilog_slope(origin=(0.2, 1e-2), slope=2*e/k/T, ax=ax_abs_me, text=r'$\frac{2e}{kT}$', size=12, inverted=False)
semilog_slope(origin=(0.3, 5e-4), slope=2*e/k/T, ax=ax_abs_lo, text=r'$\frac{2e}{kT}$', size=12, inverted=False)
ax_abs_uh.set_yscale('log')
ax_abs_hi.set_yscale('log')
ax_abs_me.set_yscale('log')
ax_abs_lo.set_yscale('log')
ax_abs_uh.set_ylim(1e-5, 1e2)
ax_abs_hi.set_ylim(1e-3, 1e2)
ax_abs_me.set_ylim(1e-5, 1e1)
ax_abs_lo.set_ylim(1e-6, 1e1)
ax_abs_lo.yaxis.set_major_locator(LogLocator(numticks=13))
ax_abs_lo.yaxis.set_minor_locator(LogLocator(numticks=13, subs=range(1, 10)))
ax_abs_lo.yaxis.set_minor_formatter(NullFormatter())
ax_abs_uh.yaxis.set_major_locator(LogLocator(numticks=13))
ax_abs_uh.yaxis.set_minor_locator(LogLocator(numticks=13, subs=range(1, 10)))
ax_abs_uh.yaxis.set_minor_formatter(NullFormatter())
for ax in (ax_abs_uh, ax_abs_hi, ax_abs_me, ax_abs_lo):
ax.set_ylabel('Absolute current density (a.u.)')
ax.set_xlabel('Overpotential (V)')
ax.set_xlim(-0.65, 0.65)
ax.legend()
fig_abs_uh.tight_layout()
fig_abs_uh.savefig('Plots/Current_Voltage_Curves_Abs_Uh_Model_11.pdf')
fig_abs_uh.savefig('Plots/Current_Voltage_Curves_Abs_Uh_Model_11.png')
fig_abs_hi.tight_layout()
fig_abs_hi.savefig('Plots/Current_Voltage_Curves_Abs_Hi_Model_11.pdf')
fig_abs_hi.savefig('Plots/Current_Voltage_Curves_Abs_Hi_Model_11.png')
fig_abs_me.tight_layout()
fig_abs_me.savefig('Plots/Current_Voltage_Curves_Abs_Me_Model_11.pdf')
fig_abs_me.savefig('Plots/Current_Voltage_Curves_Abs_Me_Model_11.png')
fig_abs_lo.tight_layout()
fig_abs_lo.savefig('Plots/Current_Voltage_Curves_Abs_Lo_Model_11.pdf')
fig_abs_lo.savefig('Plots/Current_Voltage_Curves_Abs_Lo_Model_11.png')
# ----------------------------------------------------------------------------------------------------------------------
fig_abs_co, ax_abs_co = subplots(nrows=1, ncols=2, figsize=(6.5, 3.25))
# Highest oxygen partial pressures
for i in (2300, 2200, 2100, 2000, 1900):
ax_abs_co[0].plot(data['overpotential'][::25, i], abs(data['current'][::25, i]),
label='$10^{%d}$ bar' % log10(data['pressure'][1, i]))
for i in (2300, 2200, 2100, 2000, 1900):
ax_abs_co[0].plot(data['overpotential'][:, i], abs(data['current'][:, i]), linestyle='-', marker='')
# High oxygen partial pressures
for i in (1800, 1700, 1600, 1500, 1400):
ax_abs_co[1].plot(data['overpotential'][::25, i], abs(data['current'][::25, i]),
label='$10^{%d}$ bar' % log10(data['pressure'][1, i]))
for i in (1800, 1700, 1600, 1500, 1400):
ax_abs_co[1].plot(data['overpotential'][:, i], abs(data['current'][:, i]), linestyle='-', marker='')
semilog_slope(origin=(-0.05, 1e1), slope=-2*e/k/T, ax=ax_abs_co[0], text=r'$-\frac{2e}{kT}$', size=12, inverted=False)
semilog_slope(origin=(-0.025, 2e1), slope=-2*e/k/T, ax=ax_abs_co[1], text=r'$-\frac{2e}{kT}$', size=12, inverted=False)
ax_abs_co[0].set_yscale('log')
ax_abs_co[1].set_yscale('log')
ax_abs_co[0].set_title(r'a) Very high $p_\mathrm{O_2}$')
ax_abs_co[1].set_title(r'b) High $p_\mathrm{O_2}$')
ax_abs_co[0].set_ylim(1e-5, 1e2)
ax_abs_co[1].set_ylim(1e-2, 1e2)
ax_abs_co[0].yaxis.set_major_locator(LogLocator(numticks=13))
ax_abs_co[0].yaxis.set_minor_locator(LogLocator(numticks=13, subs=range(1, 10)))
ax_abs_co[0].yaxis.set_minor_formatter(NullFormatter())
for ax in (ax_abs_co[0], ax_abs_co[1]):
ax.set_ylabel('Absolute current density (a.u.)')
ax.set_xlabel('Overpotential (V)')
ax.set_xlim(-0.65, 0.65)
ax.legend()
fig_abs_co.tight_layout()
fig_abs_co.savefig('Plots/Current_Voltage_Curves_Co_Model_11.pdf')
fig_abs_co.savefig('Plots/Current_Voltage_Curves_Co_Model_11.png')
fig_abs_co.savefig('Plots/Current_Voltage_Curves_Co_Model_11.tiff', dpi=300)
show()
|
[
"Alex.Schmid91@gmail.com"
] |
Alex.Schmid91@gmail.com
|
3e429ba488e156608f0e560020ad4b887946774e
|
3519119dffe00df415311fc410d6307e1991f42d
|
/scrapnfving/openstack/common/threadgroup.py
|
a9eaaa7097565a2bd57e20d91d331d955913b2ac
|
[
"Apache-2.0"
] |
permissive
|
sbauza/gerrit-dashboard-nfv
|
d029c1f1518a91e4a6b341e73293a9ed4b40479f
|
d375b1e797f725941217f1c691a1e343ee52f409
|
refs/heads/master
| 2016-09-06T08:42:32.089693
| 2015-12-22T08:16:47
| 2015-12-22T08:16:47
| 21,006,345
| 0
| 1
| null | 2015-12-22T08:16:47
| 2014-06-19T15:53:38
|
Python
|
UTF-8
|
Python
| false
| false
| 4,812
|
py
|
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import eventlet
from eventlet import greenpool
from scrapnfving.openstack.common import log as logging
from scrapnfving.openstack.common import loopingcall
LOG = logging.getLogger(__name__)
def _thread_done(gt, *args, **kwargs):
"""Callback function to be passed to GreenThread.link() when we spawn()
Calls the :class:`ThreadGroup` to notify if.
"""
kwargs['group'].thread_done(kwargs['thread'])
class Thread(object):
"""Wrapper around a greenthread, that holds a reference to the
:class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
it has done so it can be removed from the threads list.
"""
def __init__(self, thread, group):
self.thread = thread
self.thread.link(_thread_done, group=group, thread=self)
def stop(self):
self.thread.kill()
def wait(self):
return self.thread.wait()
def link(self, func, *args, **kwargs):
self.thread.link(func, *args, **kwargs)
class ThreadGroup(object):
"""The point of the ThreadGroup class is to:
* keep track of timers and greenthreads (making it easier to stop them
when need be).
* provide an easy API to add timers.
"""
def __init__(self, thread_pool_size=10):
self.pool = greenpool.GreenPool(thread_pool_size)
self.threads = []
self.timers = []
def add_dynamic_timer(self, callback, initial_delay=None,
periodic_interval_max=None, *args, **kwargs):
timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs)
timer.start(initial_delay=initial_delay,
periodic_interval_max=periodic_interval_max)
self.timers.append(timer)
def add_timer(self, interval, callback, initial_delay=None,
*args, **kwargs):
pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
pulse.start(interval=interval,
initial_delay=initial_delay)
self.timers.append(pulse)
def add_thread(self, callback, *args, **kwargs):
gt = self.pool.spawn(callback, *args, **kwargs)
th = Thread(gt, self)
self.threads.append(th)
return th
def thread_done(self, thread):
self.threads.remove(thread)
def _stop_threads(self):
current = threading.current_thread()
# Iterate over a copy of self.threads so thread_done doesn't
# modify the list while we're iterating
for x in self.threads[:]:
if x is current:
# don't kill the current thread.
continue
try:
x.stop()
except Exception as ex:
LOG.exception(ex)
def stop_timers(self):
for x in self.timers:
try:
x.stop()
except Exception as ex:
LOG.exception(ex)
self.timers = []
def stop(self, graceful=False):
"""stop function has the option of graceful=True/False.
* In case of graceful=True, wait for all threads to be finished.
Never kill threads.
* In case of graceful=False, kill threads immediately.
"""
self.stop_timers()
if graceful:
# In case of graceful=True, wait for all threads to be
# finished, never kill threads
self.wait()
else:
# In case of graceful=False(Default), kill threads
# immediately
self._stop_threads()
def wait(self):
for x in self.timers:
try:
x.wait()
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
current = threading.current_thread()
# Iterate over a copy of self.threads so thread_done doesn't
# modify the list while we're iterating
for x in self.threads[:]:
if x is current:
continue
try:
x.wait()
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
|
[
"sbauza@redhat.com"
] |
sbauza@redhat.com
|
98b6f11759bb01e8fdbbe030d030d2dfd0da373b
|
54c5b8e78f822f37c5322de3a44379cd0c952db2
|
/lambda_functions.py
|
f3091d11b0e2ad874f67133958ae94218743646e
|
[] |
no_license
|
kranthy09/pythonpraccodes
|
4976203977f2a9bbae3fc6f1de8513a4ce74ab76
|
bb1a83c1eda4209045d96c631b24de333420f447
|
refs/heads/master
| 2021-09-21T16:53:18.174748
| 2018-08-29T06:24:19
| 2018-08-29T06:24:19
| 140,928,427
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
def square(x):
return x * x
test_list = [4, 6, 7, 8, 9]
result = map(square, test_list)
print(list(result))
result = map(lambda x: x*x, test_list)
sum = lambda x, y: x+y
print(sum)
print(sum(10, 30))
non = lambda : print("hello")
non()
|
[
"g.kranthi2507@gmail.com"
] |
g.kranthi2507@gmail.com
|
58253c38d0d61b8b816a414e2e2a0a659b6f8f76
|
c87a1593a7bf9bcafa1540db648c3677c8bac966
|
/day03/1深度爬.py
|
9d915509478fbc331222d65f57a2106c43509b56
|
[] |
no_license
|
songting77/pabug
|
cd8c19bdf5defde32cda1f3837cb328742dac244
|
7b93cc0a425e04b2db4758903d92fef1f69b5656
|
refs/heads/master
| 2020-03-26T16:26:09.366251
| 2018-08-17T09:37:22
| 2018-08-17T09:37:48
| 145,101,571
| 0
| 0
| null | 2018-08-17T11:03:21
| 2018-08-17T09:34:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,234
|
py
|
import requests
import re
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
#获取网页源码,返回html源码
def getHtml(url):
res = requests.get(url, headers=headers)
return res.content.decode('utf-8','ignore')
#筛选出HTML中的 URL(正则来写)
def getUrl(url):
html = getHtml(url)
urlre = '<a.*href=\"(https?://.*?)\".*>'
#预编译
urlc = re.compile(urlre)
urlList = urlc.findall(html)
return urlList
def getEmail():
pass
def getMovie(url):
re.search('tv',url)
pass
#开始深度爬
def deepSpider(url,deepth):
print('\t\t\t' * depthDict[url], "已经抓取了第%d层:%s" % (depthDict[url], url))
if depthDict[url] >= deepth:
return
#新URL
sonUrlList =getUrl(url)
for newUrl in sonUrlList:
if newUrl not in depthDict:
depthDict[newUrl] = depthDict[url]+1
deepSpider(newUrl,deepth)
if __name__ == '__main__':
#起始url
starturl = "http://www.baidu.com/?swd='二狗'"
#层级控制(是字典。可利用url做键)
depthDict = {}
depthDict[starturl] = 1
deepSpider(starturl,3)
|
[
"1820440070@qq.com"
] |
1820440070@qq.com
|
1cc9cfaecdbc241c0ae8c475450f64e32fdf7f36
|
c3756fd940a8c6ad4356e2ebb40b162dc9b02512
|
/script5.py
|
668c97b580e503c20779a4d476d01ac3eb8c75f1
|
[] |
no_license
|
pahharo/pruebaGIT
|
407661d10627438cb2433687ed023d9081464dc2
|
0cd3eb7886d99bcb8f3494f8190c7ffef7d24928
|
refs/heads/master
| 2021-01-10T05:19:57.880947
| 2016-01-29T18:10:35
| 2016-01-29T18:10:35
| 50,678,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 141
|
py
|
import sys
f1=open(sys.argv[1],'r')
f2=open(sys.argv[2],'w')
w=f1.readline()
while w:
f2.write(w)
w=f1.readline();
f1.close()
f2.close()
|
[
"manusl.teleco@gmail.com"
] |
manusl.teleco@gmail.com
|
ee7900ea86cdeace5869c8fa625f38230d748790
|
bc021e7b839af455945262679ffa8e84fd9bfb1d
|
/src/rest/ishipsafe/migrations/0006_auto_20151004_1932.py
|
1aaae80cdc0789930670336f8723b870c095ed95
|
[] |
no_license
|
ishipsafe/issapp
|
bd27fd72bd086a7e1befb8647ed6a8e5ea29d41a
|
d64fa1ab52305fe30268a18b5b6982f16ffb6b72
|
refs/heads/master
| 2021-01-10T10:23:52.276558
| 2015-11-09T01:43:27
| 2015-11-09T01:43:27
| 44,488,891
| 0
| 0
| null | 2015-12-14T02:01:03
| 2015-10-18T17:41:50
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 402
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ishipsafe', '0005_subscribe_role'),
]
operations = [
migrations.AlterField(
model_name='subscribe',
name='role',
field=models.CharField(max_length=256),
),
]
|
[
"gitlab@gitlab.com"
] |
gitlab@gitlab.com
|
beb12668ab1f1daec1f292ad5d423e079e5cd00e
|
de47c1573f3ae4f0124fd139d50b5d3caf6f37ea
|
/Lecture 1 - What is Computation/lec1.py
|
990604807adc3fcdc8d08b93079d8d6321c7e3ee
|
[] |
no_license
|
Jonatandb/MIT_6.0001_IntroComputerSciencePython
|
2b7186e8413d37d73259245ef99e9206f4e36390
|
324bf7f0c48409a7ce81cbc72a3a75657ab7264a
|
refs/heads/master
| 2022-10-01T16:38:39.475426
| 2020-06-12T02:04:19
| 2020-06-12T02:04:19
| 267,966,567
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,244
|
py
|
pi = 3.14159
radius = 2.2
# area of circle equation <- this is a comment
area = pi * (radius ** 2)
print(area)
# change values of radius <- another comment
# use comments to help others understand what you are doing in code
radius = radius + 1
print(area) # area doesn't change
area = pi * (radius ** 2)
print(area)
#############################
#### COMMENTING LINES #######
#############################
# to comment MANY lines at a time, highlight all of them then CTRL+1
# do CTRL+1 again to uncomment them
# try it on the next few lines below!
# area = pi*(radius**2)
# print(area)
# radius = radius + 1
# area = pi*(radius**2)
# print(area)
#############################
#### AUTOCOMPLETE #######
#############################
# Spyder can autocomplete names for you
# start typing a variable name defined in your program and hit tab
# before you finish typing -- try it below
# define a variable
a_very_long_variable_name_dont_name_them_this_long_pls = 0
# below, start typing a_ve then hit tab... cool, right!
# use autocomplete to change the value of that variable to 1
# use autocomplete to write a line that prints the value of that long variable
# notice that Spyder also automatically adds the closed parentheses for you!
|
[
"jonatandb@gmail.com"
] |
jonatandb@gmail.com
|
9c955e40eee17d84a46917f4a2f7965f8d6e3dd1
|
8e5bec5094a06a81ce832a6d26dee1d77b46b367
|
/backend/views/events.py
|
fb1580951a0e3f5de94637eeebe0c5428f31b176
|
[] |
no_license
|
dmitry-buraev/uuevent
|
8b0a1db7b83cc501749d753d03175462ecf83928
|
f395fb1c6cf1796fdc29f8f7411b68957745e178
|
refs/heads/master
| 2021-01-20T01:57:19.837937
| 2012-06-29T20:43:15
| 2012-06-29T20:43:15
| 4,735,196
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,515
|
py
|
from google.appengine.ext import ndb
from flask import json, Response, request, abort
from flask.views import MethodView
from backend import app
from backend.models import Event, Tag
from datetime import date
from settings import DATE_FORMAT as DF, TIME_FORMAT as TF
class EventREST(MethodView):
def get(self, id=None):
if id is None:
args = request.args
count = int(args.get('count', 15))
offset = int(args.get('offset', 0))
d = args.get('date', date.today().strftime(DF)).split('-')
dt = date(int(d[0]), int(d[1]), int(d[2]))
tagids = args.getlist('tags')
if tagids:
tagkeys = [ndb.Key('Tag', int(id)) for id in tagids]
events = Event.query(
ndb.AND(
Event.intervals.start_date == dt,
Event.tags.IN(tagkeys)
)
).fetch()
else:
events = Event.query(
Event.intervals.start_date == dt).fetch(
count+1, offset=offset)
r = [to_dict(e, dt) for e in events]
more = len(r) > count#Flag shows there are more results to display
res = { 'more': more, 'events': r[:-1] if more else r }
else:
res = to_dict(Event.get_by_id(int(id)))
return Response(json.dumps(res), mimetype='application/json')
event_view = EventREST.as_view('event_rest')
app.add_url_rule('/events/', view_func=event_view, methods=['GET',])
app.add_url_rule('/events/', view_func=event_view, methods=['POST',])
app.add_url_rule('/events/<id>', view_func=event_view,
methods=['GET', 'PUT', 'DELETE'])
def to_dict(o, dt=None):
return {
'item_id': o.key.id(), 'watchword': o.watchword,
'description': o.description,
'intervals': [{
'start_date': i.start_date.strftime(DF),
'start_time': i.start_time.strftime(TF
) if i.start_time is not None else None,
'end_date': i.start_date.strftime(DF
) if i.end_date is not None else None,
'end_time': i.end_time.strftime(TF
) if i.end_time is not None else None,
} for i in o.intervals if (
(i.start_date == dt) if dt is not None else True)], #FIXME: It's ugly
'company': o.company.id(),
'tags': [ t.id() for t in o.tags ],
}
|
[
"dmitry.buraev@gmail.com"
] |
dmitry.buraev@gmail.com
|
0961a25523b4e8ceb6bd6a4961d73a1d16e7d023
|
5d8cc97885f3cb3240e19e50bf8762ce2d185433
|
/ADDONS/plugin.video.arabicvideos/arabicvideos/DOWNLOAD.py
|
d754608d6f981a5c646506bd46984229be50caeb
|
[] |
no_license
|
kwuw/KODI
|
ea357215af9001e515fb415215fc5a326d2e4177
|
3a1721372023cf7982fdfa151166d207bd84022f
|
refs/heads/master
| 2023-04-01T23:13:48.717823
| 2021-04-16T20:43:10
| 2021-04-16T20:43:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,180
|
py
|
# -*- coding: utf-8 -*-
#from __future__ import unicode_literals
from LIBRARY import *
script_name='DOWNLOAD'
def MAIN(mode,url,context):
#DIALOG_OK(url,context)
#LOG_MENU_LABEL(script_name,menu_label,mode,menu_path)
if context=='6_REMOVE': results = DELETE_FILE(url)
elif mode==330: results = LIST_FILES()
elif mode==331: results = PLAY(url)
elif mode==332: results = CHANGE_FOLDER()
elif mode==333: results = TO_ADD()
else: results = False
return results
def DELETE_FILE(filenamepath):
try: os.remove(filenamepath.decode('utf8'))
except: os.remove(filenamepath)
return
def PLAY(url):
result = PLAY_VIDEO(url,script_name,'video')
#xbmc.Player().play(url)
#DIALOG_OK(url,result)
return
def TO_ADD():
message = 'أذهب الى رابط الفيديو او الصوت في الموقع المطلوب ثم أضغط على زر القائمة اليمين ثم أختار "تحميل ملفات فيديو" ثم اختار دقة الصورة واختار نوع ملف الصورة وبعدها سوف يبدأ التحميل'
DIALOG_OK('طريقة تحميل الملفات',message)
return
def LIST_FILES():
addMenuItem('link','[COLOR FFC89008]طريقة تحميل ملفات الفيديو[/COLOR]','',333)
downloadpath = GET_DOWNLOAD_FOLDER()
mtime = os.stat(downloadpath).st_mtime
files = []
for filename in os.listdir(unicode(downloadpath,'utf8')):
if not filename.startswith('file_'): continue
filepath = os.path.join(downloadpath,filename)
mtime = os.path.getmtime(filepath)
#ctime = os.path.getctime(filepath)
#mtime = os.stat(filepath).ct_mtime
#filename = filename.decode('utf8').encode('utf8')
files.append([filename,mtime])
files = sorted(files,reverse=True,key=lambda key: key[1])
for filename,mtime in files:
#DIALOG_OK(filename,filename)
filename = filename.decode('utf8').encode('utf8')
filepath = os.path.join(downloadpath,filename)
addMenuItem('video',filename,filepath,331)
return
def GET_DOWNLOAD_FOLDER():
downloadpath = settings.getSetting('download.path')
if downloadpath!='': return downloadpath
settings.setSetting('download.path',addoncachefolder)
return addoncachefolder
def CHANGE_FOLDER():
downloadpath = GET_DOWNLOAD_FOLDER()
change = DIALOG_YESNO(downloadpath,'هذا هو مكان تخزين ملفات الفيديو التي تحملها انت باستخدام هذا البرنامج . هل تريد تغيير المكان ؟','','','كلا','نعم')
if change:
newpath = DIALOG_BROWSESINGLE(3,'مكان تحميل ملفات الفيديو','local','',False,True,downloadpath)
yes = DIALOG_YESNO(newpath,'هذا هو المكان الجديد لتخزين ملفات الفيديو التي تحملها انت باستخدام هذا البرنامج . هل تريد استخدامه بدلا من المكان القديم ؟','','','كلا','نعم')
if yes:
settings.setSetting('download.path',newpath)
DIALOG_OK('رسالة من المبرمج','تم تغيير مكان تخزين الملفات المحملة')
#if not change or not yes: DIALOG_OK('رسالة من المبرمج','تم الغاء العملية')
return
def DOWNLOAD_VIDEO(url,videofiletype):
DIALOG_NOTIFICATION('يرجى الانتظار','جاري فحص ملف التحميل')
LOG_THIS('NOTICE',LOGGING(script_name)+' Preparing to download the video file URL: [ '+url+' ]')
#DIALOG_OK(url,videofiletype)
if videofiletype=='':
if 'mp4' in url.lower(): videofiletype = '.mp4'
elif 'm3u8' in url.lower(): videofiletype = '.m3u8'
elif 'webm' in url.lower(): videofiletype = '.webm'
else: videofiletype = 'مجهول'
if videofiletype not in ['.ts','.mkv','.mp4','.mp3','.flv','.m3u8','.avi','.webm']:
DIALOG_OK('تنزيل ملف الفيديو','الملف من نوع '+videofiletype+' والبرنامج حاليا غير جاهز لتحميل هذا النوع من الملفات')
LOG_THIS('ERROR_LINES',LOGGING(script_name)+' Video type/extension is not supported URL: [ '+url+' ]')
return
#DIALOG_OK('free space',str(freediskspace_MB))
filename = menu_label.replace(' ',' ').replace(' ','_')
filename = 'file_'+str(int(now))[-4:]+'_'+filename+videofiletype
downloadpath = GET_DOWNLOAD_FOLDER()
windowsfilename = windows_filename(filename).decode('utf8')
windowsfilenamepath = os.path.join(downloadpath,windowsfilename)
filenamepath = os.path.join(downloadpath,filename)
#DIALOG_OK(downloadpath,filename)
url = url.replace('verifypeer=false','')
if 'User-Agent=' in url:
url2,useragent = url.rsplit('User-Agent=',1)
useragent = useragent.replace('|','').replace('&','')
else: url2,useragent = url,None
if 'Referer=' in url2: url2,referer = url2.rsplit('Referer=',1)
else: url2,referer = url2,''
url2 = url2.strip('|').strip('&').strip('|').strip('&')
referer = referer.replace('|','').replace('&','')
headers = {'User-Agent':useragent}
if referer!='': headers['Referer'] = referer
LOG_THIS('NOTICE',LOGGING(script_name)+' Downloading video file URL: [ '+url2+' ] Headers: [ '+str(headers)+' ] File: [ '+filenamepath+' ]')
#DIALOG_OK(url2,str(headers))
#DIALOG_OK(xbmc.getInfoLabel('System.UsedSpace'),xbmc.getInfoLabel('System.TotalSpace'))
#DIALOG_OK(xbmc.getInfoLabel('System.UsedSpacePercent'),xbmc.getInfoLabel('System.FreeSpacePercent'))
MegaByte = 1024*1024
freediskspace = xbmc.getInfoLabel('System.FreeSpace')
freediskspace_MB = int(re.findall('\d+',freediskspace)[0])
if freediskspace_MB==0:
try:
st = os.statvfs(downloadpath)
freediskspace_MB = st.f_frsize*st.f_bavail/MegaByte
#DIALOG_OK(osname,str(freediskspace_MB))
#string = str(st.f_bavail)+','+str(st.f_frsize)+','+str(st.f_blocks)
#string += ','+str(st.f_bfree)+','+str(st.f_bsize)+','+str(st.f_ffree)
#DIALOG_OK(str(freeuserspace),str(dir(st)))
except: pass
if freediskspace_MB==0:
DIALOG_TEXTVIEWER_FULLSCREEN('مساحة التخزين مجهولة','للأسف البرنامج غير قادر أن يحدد مقدار مساحة التخزين الفارغة في جهازك وعليه فان تحميل الفيديوهات لن يعمل عندك إلى أن يقوم مبرمجي برنامج كودي بحل هذه المشكلة لان تحميل الفيديوهات قد يسبب امتلاء جهازك بالملفات وهذا فيه خطورة على عمل جهازك بصورة صحيحة ولهذا السبب قام المبرمج مؤقتا بمنع البرنامج من تحميل الفيديوهات','big','right')
LOG_THIS('ERROR_LINES',LOGGING(script_name)+' Unable to determine the disk free space')
return
import requests
headers['Accept-Encoding'] = ''
if videofiletype=='.m3u8':
windowsfilenamepath = windowsfilenamepath.rsplit('.m3u8')[0]+'.mp4'
response = OPENURL_REQUESTS_CACHED(SHORT_CACHE,'GET',url2,'',headers,'','','DOWNLOAD-DOWNLOAD_VIDEO-1st')
m3u8 = response.content
linkLIST = []
links = re.findall('\#EXTINF:.*?[\n\r](.*?)[\n\r]',m3u8+'\n\r',re.DOTALL)
if not links:
LOG_THIS('ERROR_LINES',LOGGING(script_name)+' The m3u8 file did not have the required links URL: [ '+url2+' ]')
return
try: file = open(windowsfilenamepath,'wb')
except: file = open(windowsfilenamepath.encode('utf8'),'wb')
response = requests.get(links[0])
chunk = response.content
response.close()
file.write(chunk)
chunksize = len(chunk)
chunksCount = len(links)
filesize = chunksize*chunksCount
else:
chunksize = 1*MegaByte
response = requests.request('GET',url2,headers=headers,verify=False,stream=True)
try: filesize = int(response.headers['Content-Length'])
except: filesize = 0
chunksCount = int(filesize/chunksize)
if filesize>102400:
try: file = open(windowsfilenamepath,'wb')
except: file = open(windowsfilenamepath.encode('utf8'),'wb')
filesize_MB = int(1+filesize/MegaByte)
if filesize<=102400:
LOG_THIS('ERROR_LINES',LOGGING(script_name)+' Video file is too small and/or something wrong URL: [ '+url2+' ] Video file size: [ '+str(filesize_MB)+' MB ] Available size: [ '+str(freediskspace_MB)+' MB ] File: [ '+filenamepath+' ]')
DIALOG_OK('رسالة من المبرمج','فشل في معرفة حجم ملف الفيديو ولهذا لا يمكن للبرنامج تحميل هذا الملف')
if videofiletype=='.m3u8': file.close()
return
freeafterdownload_MB = freediskspace_MB-filesize_MB
if freeafterdownload_MB<500:
LOG_THIS('ERROR_LINES',LOGGING(script_name)+' Not enough disk space to download the video file URL: [ '+url2+' ] Video file size: [ '+str(filesize_MB)+' MB ] Available size: [ '+str(freediskspace_MB)+' MB ] File: [ '+filenamepath+' ]')
DIALOG_OK('لا يوجد مساحة كافية للتحميل','الملف المطلوب تحميله حجمه '+str(filesize_MB)+' ميغابايت وجهازك فيه مساحة فارغة '+str(freediskspace_MB)+' ميغابايت وللمحافظة على عمل جهازك بدون مشاكل يجب ابقاء 500 ميغابايت فارغة دائما وهذا معناه جهازك لا توجد فيه مساحة كافية لتحميل ملف الفيديو المطلوب')
file.close()
return
yes = DIALOG_YESNO('هل تريد تحميل الملف ؟','الملف المطلوب حجمه تقريبا '+str(filesize_MB)+' ميغابايت وجهازك فيه مساحة فارغة تقريبا '+str(freediskspace_MB)+' ميغابايت وهذا الملف قد يحتاج بعض الوقت للتحميل من الأنترنيت إلى جهازك . هل انت متأكد وتريد الاستمرار بتحميل ملف الفيديو ؟','','','كلا','نعم')
if not yes:
DIALOG_OK('','تم إلغاء عملية تحميل ملف الفيديو')
file.close()
LOG_THIS('NOTICE',LOGGING(script_name)+' User refused to start the download of the video file URL: [ '+url2+' ] File: [ '+filenamepath+' ]')
return
LOG_THIS('NOTICE',LOGGING(script_name)+' Download started successfully')
pDialog = DIALOG_PROGRESS()
pDialog.create(windowsfilenamepath,'السطر فوق هو مكان تخزين ملف الفيديو')
Finished = True
t1 = time.time()
if videofiletype=='.m3u8': # m3u8 and multi chunks video files
for i in range(1,chunksCount):
link = links[i]
if 'http' not in link: link = url2.rsplit('/',1)[0]+'/'+link
response = requests.get(link)
chunk = response.content
response.close()
file.write(chunk)
t2 = time.time()
timeElapsed = t2-t1
chunkTime = timeElapsed/i
timeTotal = chunkTime*(chunksCount+1)
timeRemaining = timeTotal-timeElapsed
pDialog.update(int(100*i/(chunksCount+1)),'السطر فوق هو مكان تخزين ملف الفيديو','جلب ملف الفيديو:- الجزء رقم',str(i*chunksize/MegaByte)+'/'+str(filesize_MB)+' MB وقت متبقي: '+time.strftime("%H:%M:%S",time.gmtime(timeRemaining))+' ـ')
if pDialog.iscanceled():
Finished = False
break
else: # mp4 and other single file videos
i = 0
for chunk in response.iter_content(chunk_size=chunksize):
file.write(chunk)
#file = file+chunk
i = i+1
t2 = time.time()
timeElapsed = t2-t1
chunkTime = timeElapsed/i
timeTotal = chunkTime*(chunksCount+1)
timeRemaining = timeTotal-timeElapsed
pDialog.update(int(100*i/(chunksCount+1)),'السطر فوق هو مكان تخزين ملف الفيديو','جلب ملف الفيديو:- الجزء رقم',str(i*chunksize/MegaByte)+'/'+str(filesize_MB)+' MB وقت متبقي: '+time.strftime("%H:%M:%S",time.gmtime(timeRemaining))+' ـ')
if pDialog.iscanceled():
Finished = False
break
response.close()
#with open(filename,'w') as f: f.write(file)
file.close()
pDialog.close()
if not Finished:
LOG_THIS('NOTICE',LOGGING(script_name)+' User cancelled/interrupted the download process URL: [ '+url2+' ] File: [ '+filenamepath+' ]')
DIALOG_OK('','تم إلغاء عملية تحميل ملف الفيديو')
return
else:
LOG_THIS('NOTICE',LOGGING(script_name)+' Video file downloaded successfully URL: [ '+url2+' ] File: [ '+filenamepath+' ]')
DIALOG_OK('','تم تحميل ملف الفيديو بنجاح')
return
|
[
"emadmahdi@yahoo.com"
] |
emadmahdi@yahoo.com
|
370c6046f9cc332164d0d3cf3407fe10573ba20c
|
6775233e6819c4e90ac8acdecd912ae26c6158d8
|
/funny/dices.py
|
0c071a665afef39ff2e02b10872c2a9423235a24
|
[] |
no_license
|
gonzaponte/Python
|
ad8a3a364429ce53afe6b8cae2f993ffd0c1835c
|
ac80f79469d2f990608f23bcd1b9faf678fa59e6
|
refs/heads/master
| 2021-06-09T03:09:52.877156
| 2021-03-31T08:42:16
| 2021-03-31T08:42:16
| 23,758,152
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 636
|
py
|
from ROOT import TRandom3,TH1I
random = TRandom3()
random.SetSeed(10)
r = lambda: 0.5 + 6*random.Uniform()
N= 1000000
h = TH1I('a','a',7,0,7)
roll = lambda n: map( lambda x: int(round(r())), range(n) )
is6in4 = 0
is66in24 = 0
for i in range(N):
four = roll(4)
dice1 = roll(24)
dice2 = roll(24)
if 6 in four:
is6in4 += 1
for d1,d2 in zip(dice1,dice2):
if d1==d2 and d1==6:
is66in24 += 1
break
print 'Probabilidade de obter un 6 en 4 tiradas: ',is6in4,'/',N,' = ',float(is6in4)/N
print 'Probabilidade de obter dous 6s en 24 tiradas: ',is66in24,'/',N,' = ',float(is66in24)/N
|
[
"gonzaponte@gmail.com"
] |
gonzaponte@gmail.com
|
6ebdecce64d39a07496cecc992d2247df108558c
|
e7c576fadb4ee05e3a7dfa4594c0c7ef3e6e258e
|
/veryNewPrime.py
|
c759319af02e3dd6561d0a07585f1d0439f8c408
|
[] |
no_license
|
fhcwcsy/python_practice
|
9c3d559279c9d6d2fcc5f875110421137eead28d
|
a8b2e0bdf05cc372aa992058bc43949dbd99755d
|
refs/heads/master
| 2020-09-10T01:43:54.066192
| 2019-12-11T00:35:17
| 2019-12-11T00:35:17
| 221,618,373
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,049
|
py
|
def generating_prime( a, b ):
# 134 15183
# 21344 66666
# 5 1245122
# 3 310831
# 1 1
# prime_list = []
# not_prime_list = []
# if a % 2 :
# a += 1
# for num in range( a, b+1, 2 ) :
# if num in not_prime_list :
# continue
# not_prime = True
# for d in range( 3, int( a**0.5 ) + 1, 2 ) :
# if d in not_prime_list :
# continue
# if not num % d :
# not_prime = True
# break
# if not_prime:
# continue
prime_list = []
save = True
start = 0
next_num = 3
all_possibilities = set( range( a, b+1 ) )
if b < 2:
return []
while next_num <= b :
is_prime = True
for d in prime_list :
if d > next_num*0.5:
break
if next_num % d == 0 :
is_prime = False
break
if is_prime :
prime_list.append( next_num )
# print( next_num )
if save and prime_list[-1] >= a :
start = prime_list.index( next_num )
save = False
next_num += 2
if a <= 2:
prime_list.insert(0, 2 )
return prime_list[ (start) : ]
save = True
start = 0
next_num = 3
all_possibilities = set( range( a, b+1 ) )
if b < 2:
return []
while next_num <= b :
is_prime = True
for d in prime_list :
if d > next_num*0.5:
break
if next_num % d == 0 :
is_prime = False
break
if is_prime :
prime_list.append( next_num )
# print( next_num )
if save and prime_list[-1] >= a :
start = prime_list.index( next_num )
save = False
next_num += 2
if a <= 2:
prime_list.insert(0, 2 )
return prime_list[ (start) : ]
# if __name__ == "__main__":
# print( generating_prime( 5, 1240 ) )
# generating_prime( 5, 1245122 )
|
[
"fhcwcsy@gmail.com"
] |
fhcwcsy@gmail.com
|
fed7ba13a2cb534f67b93b5ade3810dc77297eb8
|
ec3b6cbbacc20e021b5819e6ae3fe405a75d2513
|
/Pong/squares.py
|
5c0800bc8584473321e19109e2df28401bf54c87
|
[] |
no_license
|
imschwartz89/pygame-learning
|
11505d4a78bc5bd79fbe4c1a5072a2647b30530c
|
33236e9ec0c465125258277c8998b26c32f2aa0b
|
refs/heads/main
| 2023-01-03T01:51:28.279959
| 2020-10-28T16:00:25
| 2020-10-28T16:00:25
| 308,057,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,587
|
py
|
import pygame
#width = 500
#height = 500
class Window():
def __init__(self, height, width, color):
self.height = height
self.width = width
self.color = color
self.win = pygame.display.set_mode((width, height))
self.win.fill(color)
self.clock = pygame.time.Clock()
pygame.display.set_caption("Pong Alpha")
self.score = [0,0]
def update(self):
pygame.display.update()
class Cube():
def __init__(self, color, posx, posy, dir, size):
self.rect = pygame.Rect(posx, posy, size, size)
self.color = color
self.direction = dir
self.size = size
#turn posx and posy into list?
def movingSquare(self, surface):
if self.rect.x + self.size >= surface.width or self.rect.x + self.direction[0] <= 0:
self.direction[0] *= -1
if self.rect.y + self.size >= surface.height or self.rect.y + self.direction[1] <= 0:
self.direction[1] *= -1
self.rect.move_ip(self.direction[0], self.direction[1])
def draw(self, surface): #add color, then can code to choose between background and actual color
pygame.draw.rect(surface.win, self.color, self.rect) #(0,255,0), self.rect)
def movingDraw(self, surface): #, dirx, diry):
pygame.draw.rect(surface.win, surface.color, self.rect)
self.movingSquare(surface)
self.draw(surface)
#pygame.draw.rect(surface, self.color, self.rect) # (0,255,0), self.rect)
def checkCollide(self, rect):
return self.rect.colliderect(rect)
class Ball():
def __init__(self, color, posx, posy, dir, size):
self.rect = pygame.Rect(posx, posy, size, size)
self.color = color
self.direction = dir
self.size = size
#turn posx and posy into list?
def movingSquare(self, surface, collided):
# need to factor in movement of paddle
# create function to check if paddle is moving then factor that in
if collided:
if self.direction[0] < 20 and self.direction[0] > -20:
if self.direction[0] < 0:
self.direction[0] -= 1
else:
self.direction[0] += 1
self.direction[0] *= -1
#self.direction[1] *=
#NEED TO CHANGE THIS TO DISPLAY SCORE AND RESET BALL
if self.rect.x + self.size >= surface.width: #or self.rect.x + self.direction[0] <= 0:
#self.direction[0] *= -1
#print("Score P1")
surface.score[0] += 1
self.direction[0] = 5
self.reset(surface)
elif self.rect.x + self.direction[0] <= 0:
#print("Score P2")
surface.score[1] += 1
self.direction[0] = -5
self.reset(surface)
if self.rect.y + self.size >= surface.height or self.rect.y + self.direction[1] <= 0:
self.direction[1] *= -1
self.rect.move_ip(self.direction[0], self.direction[1])
def draw(self, surface): #add color, then can code to choose between background and actual color
pygame.draw.rect(surface.win, self.color, self.rect) #(0,255,0), self.rect)
def movingDraw(self, surface, players): #, dirx, diry):
pygame.draw.rect(surface.win, surface.color, self.rect)
self.movingSquare(surface, self.checkCollideWithPlayers(players))
self.draw(surface)
#pygame.draw.rect(surface, self.color, self.rect) # (0,255,0), self.rect)
def checkCollide(self, rect):
return self.rect.colliderect(rect)
def checkCollideWithPlayers(self, players):
for i in range(len(players)):
if self.checkCollide(players[i].rect):
return True
return False
def reset(self, surface):
pygame.draw.rect(surface.win, surface.color, self.rect)
self.rect.x = 250
self.rect.y = 250
#self.draw(surface)
class Player():
def __init__(self, posx, posy, width, height, number):
#self.rect = Cube((255,255,255), 0, 250, 0, 50)
self.rect = pygame.Rect(posx, posy, width, height)
self.number = number
def draw(self, surface):
#self.rect.draw(surface) [if using cube]
pygame.draw.rect(surface.win, (255,255,255), self.rect)
def move(self, surface):
#for event in pygame.event.get():
#if event.type == pygame.QUIT:
# pygame.quit()
keys = pygame.key.get_pressed()
#for key in keys:
if self.number == 1:
if keys[pygame.K_w]:
pygame.draw.rect(surface.win, (0,0,0), self.rect)
self.rect.move_ip(0, -10)
pygame.draw.rect(surface.win, (255,255,255), self.rect)
elif keys[pygame.K_s]:
pygame.draw.rect(surface.win, (0,0,0), self.rect)
self.rect.move_ip(0, 10)
pygame.draw.rect(surface.win, (255,255,255), self.rect)
elif self.number == 2:
if keys[pygame.K_UP]:
pygame.draw.rect(surface.win, (0,0,0), self.rect)
self.rect.move_ip(0, -10)
pygame.draw.rect(surface.win, (255,255,255), self.rect)
elif keys[pygame.K_DOWN]:
pygame.draw.rect(surface.win, (0,0,0), self.rect)
self.rect.move_ip(0, 10)
pygame.draw.rect(surface.win, (255,255,255), self.rect)
#should make a redrawWindow() instead of drawing in each method
# should redraw each object again
def redrawWindow(win, players, ball):
drawScoreboard(win)
ball.draw(win)
for i in range(len(players)):
players[i].draw(win)
pygame.draw.line(win.win, (255,255,255), (250,0), (250,500), 5)
def drawScoreboard(win):
scoreStr = f"{win.score[0]} {win.score[1]}"
font = pygame.font.Font("freesansbold.ttf", 32)
text = font.render(scoreStr, True, (255,255,255), (0,0,0))
textRect = text.get_rect()
textRect.center = (250, 30)
win.win.blit(text, textRect)
def run():
pygame.init()
win = Window(500, 500, (0,0,0))
#win = pygame.display.set_mode((width, height))
#clock = pygame.time.Clock()
#win.fill((0,0,0))
#aRect = pygame.Rect(10, 10, 25, 50)
#aSquare = pygame.Rect(100, 100, 25, 25)
#movingSquare = pygame.Rect(100, 150, 25, 25)
#pygame.draw.rect(win.win, (255,255,255), aRect)
#pygame.draw.rect(win.win, (255,0,0), aSquare)
#pygame.draw.rect(win, (255,0,0), movingSquare)
#horiSquare = Cube((0,255,0), 100, 150, [5,0], 25)
#vertSquare = Cube((0, 0, 255), 200, 100, [0,5], 30)
#bothSquare = Cube((100, 100, 100), 300, 200, [5,5], 20)
#pygame.display.update()
ball = Ball((255,255,255), 250, 250, [5,5], 20)
playerOne = Player(0, 200, 25, 100, 1)
playerOne.draw(win)
playerTwo = Player(475, 200, 25, 100, 2)
playerTwo.draw(win)
playersList = [playerOne, playerTwo]
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit() # stops Segmentation fault from occurring
pygame.time.delay(20)
win.clock.tick(10)
#pygame.draw.rect(win, (0,0,0), movingSquare)
# pygame.draw.rect(win, (0,255,0), movingSquare)
#movingSquare.move_ip(5, 5) # if I wanted to use just the move(), I would need to reassign it to itself
#pygame.draw.rect(win, (0,255,0), movingSquare)
#movingSquare(win, movingSquare)
#movingSquare.movingSquare()
#movingSquare.draw(win)
# if horiSquare.rect.x + 25 >= win.width or horiSquare.rect.x - 5 <= 0:
# horiSquare.direction[0] *= -1
#horiSquare.movingDraw(win)
# if vertSquare.rect.y + 25 >= win.height or vertSquare.rect.y - 5 <= 0:
# vertSquare.direction[1] *= -1
#vertSquare.movingDraw(win)
# if bothSquare.rect.x + 25 >= win.width or bothSquare.rect.x - 5 <= 0:
# bothSquare.direction[0] *= -1
#bothSquare.movingDraw(win)
# if bothSquare.rect.y + 25 >= win.height or bothSquare.rect.y - 5 <= 0:
# bothSquare.direction[1] *= -1
#bothSquare.movingDraw(win)
#print("mSquare x:" + str(movingSquare.rect.x))
playerOne.move(win)
playerTwo.move(win)
ball.movingDraw(win, playersList)
#if bothSquare.checkCollide(playerOne.rect):
# print("Collision detected")
redrawWindow(win, playersList, ball)
#pygame.display.update()
win.update()
run()
|
[
"noreply@github.com"
] |
imschwartz89.noreply@github.com
|
3be59a2d25d31bad91c4f8c6b14ef1bd72bcf038
|
b38fb62950582664158327a2abf29c84cc59178b
|
/0x08-python-more_classes/7-rectangle.py
|
cc2f3ff9130c143fa88c6aa6ed73583c9fb7233a
|
[] |
no_license
|
MiguelCF06/holbertonschool-higher_level_programming
|
a39129cf355abe15e2caeb41cdef385ace53cfda
|
0bc44343cb20c97221d3886bafda6db7235bc13a
|
refs/heads/master
| 2022-12-18T00:12:52.498624
| 2020-09-24T17:00:24
| 2020-09-24T17:00:24
| 259,323,305
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,009
|
py
|
#!/usr/bin/python3
""" Create a rectangle class """
class Rectangle:
""" The rectangle class """
number_of_instances = 0
print_symbol = "#"
def __init__(self, width=0, height=0):
self.width = width
self.height = height
Rectangle.number_of_instances += 1
@property
def width(self):
""" Getter of width """
return self.__width
@width.setter
def width(self, value):
""" Setter of width """
if not isinstance(value, int):
raise TypeError("width must be an integer")
else:
if value < 0:
raise ValueError("width must be >= 0")
self.__width = value
@property
def height(self):
""" Getter of height """
return self.__height
@height.setter
def height(self, value):
""" Setter of height """
if not isinstance(value, int):
raise TypeError("height must be an integer")
else:
if value < 0:
raise ValueError("height must be >= 0")
self.__height = value
def area(self):
""" Area of the rectangle """
return self.__width * self.__height
def perimeter(self):
""" Rectangle perimeter """
if self.__width == 0 or self.__height == 0:
return 0
else:
return (self.__width * 2) + (self.__height * 2)
def __str__(self):
""" String representation of a Rectangle instance """
string = ""
if self.__width != 0 and self.__height != 0:
string += "\n".join(str(self.print_symbol) * self.__width for j in range(self.__height))
return string
def __repr__(self):
""" String representation of the rectangle instance """
return "Rectangle({}, {})".format(self.__width, self.__height)
def __del__(self):
""" Destructor method for a rectangle instance """
print("Bye rectangle...")
Rectangle.number_of_instances -= 1
|
[
"miguel.cipamocha@gmail.com"
] |
miguel.cipamocha@gmail.com
|
e2795adaec03b424a011e257759570bdf7c6c93a
|
4494bf81f01d4cdc664d9b55a106db2adce43b5f
|
/no3a.py
|
d1bca79b8902c54add3a9cce9ea044c82f1a99e4
|
[] |
no_license
|
L200180022/Praktikum-ASD
|
ce706e2edf3c94a77a5103e6c9d19c1b5c6c2843
|
479cb9fa2a4f1be836fcba07e5f2df93bfe4210e
|
refs/heads/master
| 2021-02-10T22:10:31.406411
| 2020-04-05T19:35:33
| 2020-04-05T19:35:33
| 244,424,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 557
|
py
|
class tugasLink(object):
def __init__(self, nama, next = None):
self.data = nama
self.next = next
def cari(x, y):
if y == 1:
print (x.data)
elif y == 2:
print (x.next.data)
elif y == 3:
print (x.next.next.data)
elif y == 4:
print (x.next.next.next.data)
else:
print ("data tidak tersedia" )
a = tugasLink(10)
b = tugasLink(20)
c = tugasLink(30)
d = tugasLink(40)
a.next = b
b.next = c
c.next = d
print ("Headnya a, cari(a, (urutan data yg dicari))")
|
[
"noreply@github.com"
] |
L200180022.noreply@github.com
|
6e7e04a0bda7b74b42412ce110b99447221900cf
|
ba5216488e8c08085633ebe7022c9609571f35c5
|
/test_ws/build/drone_control/catkin_generated/pkg.develspace.context.pc.py
|
0756ab54c94ea52c836c609058289167839f1d7a
|
[] |
no_license
|
MasonDMitchell/drone_simulation
|
82fb9737ba869e05fac6740e86024fa50135080a
|
99951aa4447c5bb2429a6d3bbcf7c76c402c9e94
|
refs/heads/master
| 2020-09-08T06:50:56.761789
| 2020-01-17T19:39:26
| 2020-01-17T19:39:26
| 221,051,124
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "drone_control"
PROJECT_SPACE_DIR = "/home/mason/drone_simulation/test_ws/devel"
PROJECT_VERSION = "0.0.0"
|
[
"mason.mitchellzone@gmail.com"
] |
mason.mitchellzone@gmail.com
|
a0254f535f2c1e312b00d3b0e539c566bcd54f3f
|
0c6b273899296883db1bc1e5938bc4ebd2a1760b
|
/backend/setup_code_tracker.py
|
2c9c2f0aae7035943516e098b2e89f93a9c0819b
|
[] |
no_license
|
kanishk1/comp9321-ass2
|
bace22396c6cbf8e60338f6b105a91ef05234671
|
0d280097e821e6164611670df9b8a6c165106518
|
refs/heads/master
| 2020-09-05T02:06:08.924724
| 2019-11-25T07:19:05
| 2019-11-25T07:19:05
| 219,952,527
| 0
| 4
| null | 2019-11-25T07:19:06
| 2019-11-06T08:53:35
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 300
|
py
|
# this code is to only be ran if you delete all database and migrations
# create tracker if doesn't exist
from run import db
from database.models import Status_tracker
if not Status_tracker.query.all():
status_tracker = Status_tracker()
db.session.add(status_tracker)
db.session.commit()
|
[
"peeter._@hotmail.com"
] |
peeter._@hotmail.com
|
ecf8867b525dca0b2313c2c1338ccb471691fd86
|
5a744ad8e42abd26abc819ab05ce6ca5cb4146c7
|
/interviews/weather_uk_dimitrii/app/weather/routes.py
|
cf29419c13fe43de5ebda26137f1bfc96fb0a448
|
[] |
no_license
|
peternortonuk/quiz-questions
|
c2db6e62747c6e809aad4d0674d4e9980008d656
|
d33cb23bf9c85ff8e41cb6721ca761beecf5c796
|
refs/heads/main
| 2023-04-16T18:30:13.956245
| 2021-05-06T12:27:51
| 2021-05-06T12:27:51
| 325,616,054
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,632
|
py
|
from datetime import datetime
from app.weather import bp
from app.weather.db_work import get_weather_from_db
from app.weather.services import get_weather_string_from_api
from utils.error_handlers import InvalidUsage, APIUnavailableError
from flask import request, jsonify, make_response
@bp.route('/<string:city>', methods=['GET'])
def get_weather(city):
unit = request.args.get('unit', 'C')
if unit not in ('K', 'F', 'C'):
raise InvalidUsage('Wrong temperature unit character. Please use - F, C or K', status_code=400)
try:
response = get_weather_string_from_api(city, unit)
except APIUnavailableError:
response = get_weather_from_db(city, unit)
return jsonify({'weather': response})
@bp.route('/<string:city>/<string:date_str>', methods=['GET'])
def get_specific_datetime_weather(city, date_str):
try:
unit = request.args.get('unit', 'C')
if unit not in ('K', 'F', 'C'):
raise InvalidUsage('Wrong temperature unit character. Please use - F, C or K', status_code=400)
datetime_obj = datetime.strptime(date_str, '%Y-%m-%d')
weather = get_weather_from_db(city, unit, datetime_obj)
return jsonify({'weather': weather})
except ValueError:
raise InvalidUsage('Date format is not recognised. Please use the YYYY-MM-DD format', status_code=400)
@bp.errorhandler(404)
def not_found():
return make_response(jsonify({'error': 'Not found'}), 404)
@bp.errorhandler(InvalidUsage)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
|
[
"peter.norton@gazprom-mt.com"
] |
peter.norton@gazprom-mt.com
|
6fd25c4f3840b07636abaf3db966d2c4e759195c
|
a823d9786b12c2e48b96216b9b58b531ab1ec354
|
/learn_python/basedemo/test12.py
|
3b337d8c585e4870580c0152bc463ee075544771
|
[] |
no_license
|
altraman00/mdl_python
|
3ded721ea714e74b4930d03355e4e159c638f04a
|
33fea9e097803f331e043b7e690fea4506a432bb
|
refs/heads/main
| 2023-07-03T23:58:05.479628
| 2021-08-12T05:44:49
| 2021-08-12T05:44:49
| 384,030,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
import time;
tick = time.time()
print(tick)
starttime = time.clock()
print('----',starttime)
localtime = time.localtime(time.time())
print(localtime)
print(time.localtime())
time2 = time.asctime(localtime)
print(time2)
time3 = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print(time.localtime())
print(time3)
endtime = time.clock()
print('----',endtime)
print(endtime-starttime)
|
[
"xiekun@sunlands.com"
] |
xiekun@sunlands.com
|
d69cec81dceb7ff29ec3b6e59d4abfe5207083f7
|
a8dbab6f5d707d6b8e281eb2a7086bd1e648223e
|
/solidforml/03 Open Closed Principle/openclosed.py
|
e91871223641484d70e2e841d3c2c5703e734413
|
[
"MIT"
] |
permissive
|
kyirong6/solidforml
|
da1cec783a54c5d73361038d5bb6031ba77de2b3
|
96445274cfbf4ce77bcf3bbfb83b26ec19f5b392
|
refs/heads/main
| 2023-08-21T03:58:17.936024
| 2021-10-18T09:22:42
| 2021-10-18T09:22:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,131
|
py
|
from abc import abstractmethod, ABC
################# OCP violation #############################
# class Extractor:
#
# def extract_spectrogram(self, data):
# print("Extracted spectrogram")
#
# def extract_mfcc(self, data):
# print("Extracted MFCCs")
#
# def extract_mel_spectrogram(self, data):
# print("Extracted mel spectrogram")
#
#
# class DLPipeline:
#
# def __init__(self, extractor, feature_type):
# self.extractor = extractor
# self.feature_type = feature_type
#
# def run(self, data):
# print("Running DL pipeline")
# features = self._extract(data)
# # Implementation of DL steps go here
#
# def _extract(self, data):
# if self.feature_type == "spectrogram":
# self.extractor.extract_spectrogram(data)
# elif self.feature_type == "mfcc":
# self.extractor.extract_mfcc(data)
# elif self.feature_type == "melspectrogram":
# self.extractor.extract_melspectrogram(data)
#
#
# if __name__ == "__main__":
# data = [1, 2, 3]
# extractor = Extractor()
# dl_pipeline = DLPipeline(extractor, "spectrogram")
# dl_pipeline.run(data)
################# OCP violation #############################
class Extractor(ABC):
@abstractmethod
def extract(self, data):
pass
class SpectrogramExtractor(Extractor):
def extract(self, data):
print("Extracted spectrogram")
class MFCCExtractor(Extractor):
def extract(self, data):
print("Extracted MFCC")
class MelSpectrogramExtractor(Extractor):
def extract(self, data):
print("Extracted mel spectrogram")
class DLPipeline:
def __init__(self, extractor):
self.extractor = extractor
def run(self, data):
print("Running DL pipeline")
features = self._extract(data)
# Implementation of DL steps go here
def _extract(self, data):
self.extractor.extract(data)
if __name__ == "__main__":
data = [1, 2, 3]
extractor = SpectrogramExtractor()
dl_pipeline = DLPipeline(extractor)
dl_pipeline.run(data)
|
[
"velardovalerio@gmail.com"
] |
velardovalerio@gmail.com
|
1a02d3c8f5643b42815c0a2184d1a488b1308724
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/intune/v20150114preview/android_mam_policy_by_name.py
|
a566fda17eaacffb12d4cc883b85ba9978375ea3
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773
| 2020-11-06T22:39:06
| 2020-11-06T22:39:06
| 312,993,761
| 0
| 0
|
Apache-2.0
| 2023-06-02T06:47:28
| 2020-11-15T09:04:00
| null |
UTF-8
|
Python
| false
| false
| 10,816
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['AndroidMAMPolicyByName']
class AndroidMAMPolicyByName(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_recheck_offline_timeout: Optional[pulumi.Input[str]] = None,
access_recheck_online_timeout: Optional[pulumi.Input[str]] = None,
app_sharing_from_level: Optional[pulumi.Input[str]] = None,
app_sharing_to_level: Optional[pulumi.Input[str]] = None,
authentication: Optional[pulumi.Input[str]] = None,
clipboard_sharing_level: Optional[pulumi.Input[str]] = None,
data_backup: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
device_compliance: Optional[pulumi.Input[str]] = None,
file_encryption: Optional[pulumi.Input[str]] = None,
file_sharing_save_as: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
host_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
managed_browser: Optional[pulumi.Input[str]] = None,
offline_wipe_timeout: Optional[pulumi.Input[str]] = None,
pin: Optional[pulumi.Input[str]] = None,
pin_num_retry: Optional[pulumi.Input[int]] = None,
policy_name: Optional[pulumi.Input[str]] = None,
screen_capture: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Android Policy entity for Intune MAM.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] host_name: Location hostName for the tenant
:param pulumi.Input[str] location: Resource Location
:param pulumi.Input[str] policy_name: Unique name for the policy
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource Tags
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['access_recheck_offline_timeout'] = access_recheck_offline_timeout
__props__['access_recheck_online_timeout'] = access_recheck_online_timeout
__props__['app_sharing_from_level'] = app_sharing_from_level
__props__['app_sharing_to_level'] = app_sharing_to_level
__props__['authentication'] = authentication
__props__['clipboard_sharing_level'] = clipboard_sharing_level
__props__['data_backup'] = data_backup
__props__['description'] = description
__props__['device_compliance'] = device_compliance
__props__['file_encryption'] = file_encryption
__props__['file_sharing_save_as'] = file_sharing_save_as
if friendly_name is None:
raise TypeError("Missing required property 'friendly_name'")
__props__['friendly_name'] = friendly_name
if host_name is None:
raise TypeError("Missing required property 'host_name'")
__props__['host_name'] = host_name
__props__['location'] = location
__props__['managed_browser'] = managed_browser
__props__['offline_wipe_timeout'] = offline_wipe_timeout
__props__['pin'] = pin
__props__['pin_num_retry'] = pin_num_retry
if policy_name is None:
raise TypeError("Missing required property 'policy_name'")
__props__['policy_name'] = policy_name
__props__['screen_capture'] = screen_capture
__props__['tags'] = tags
__props__['group_status'] = None
__props__['last_modified_time'] = None
__props__['name'] = None
__props__['num_of_apps'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:intune/v20150114privatepreview:AndroidMAMPolicyByName")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(AndroidMAMPolicyByName, __self__).__init__(
'azure-nextgen:intune/v20150114preview:AndroidMAMPolicyByName',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'AndroidMAMPolicyByName':
"""
Get an existing AndroidMAMPolicyByName resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return AndroidMAMPolicyByName(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accessRecheckOfflineTimeout")
def access_recheck_offline_timeout(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "access_recheck_offline_timeout")
@property
@pulumi.getter(name="accessRecheckOnlineTimeout")
def access_recheck_online_timeout(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "access_recheck_online_timeout")
@property
@pulumi.getter(name="appSharingFromLevel")
def app_sharing_from_level(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "app_sharing_from_level")
@property
@pulumi.getter(name="appSharingToLevel")
def app_sharing_to_level(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "app_sharing_to_level")
@property
@pulumi.getter
def authentication(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "authentication")
@property
@pulumi.getter(name="clipboardSharingLevel")
def clipboard_sharing_level(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "clipboard_sharing_level")
@property
@pulumi.getter(name="dataBackup")
def data_backup(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "data_backup")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "description")
@property
@pulumi.getter(name="deviceCompliance")
def device_compliance(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "device_compliance")
@property
@pulumi.getter(name="fileEncryption")
def file_encryption(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "file_encryption")
@property
@pulumi.getter(name="fileSharingSaveAs")
def file_sharing_save_as(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "file_sharing_save_as")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> pulumi.Output[str]:
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="groupStatus")
def group_status(self) -> pulumi.Output[str]:
return pulumi.get(self, "group_status")
@property
@pulumi.getter(name="lastModifiedTime")
def last_modified_time(self) -> pulumi.Output[str]:
return pulumi.get(self, "last_modified_time")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource Location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="managedBrowser")
def managed_browser(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "managed_browser")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="numOfApps")
def num_of_apps(self) -> pulumi.Output[int]:
return pulumi.get(self, "num_of_apps")
@property
@pulumi.getter(name="offlineWipeTimeout")
def offline_wipe_timeout(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "offline_wipe_timeout")
@property
@pulumi.getter
def pin(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "pin")
@property
@pulumi.getter(name="pinNumRetry")
def pin_num_retry(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "pin_num_retry")
@property
@pulumi.getter(name="screenCapture")
def screen_capture(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "screen_capture")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource Tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
[
"public@paulstack.co.uk"
] |
public@paulstack.co.uk
|
11436b5d9f827decfd2c482545cf163c056ba445
|
451d827326096f27dd912cd8323847c5483b3a8c
|
/backend/aleyna_27209/urls.py
|
47f42708ea6700ef21b36aa866d79c9d898ce35a
|
[] |
no_license
|
crowdbotics-apps/aleyna-27209
|
d839812b39bba3b3a53fe9ad39d0c8119274ee57
|
7972f0d6cbd531bbba48efd387ad70a936b182bb
|
refs/heads/master
| 2023-04-20T12:33:16.667809
| 2021-05-21T16:04:47
| 2021-05-21T16:04:47
| 369,582,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,199
|
py
|
"""aleyna_27209 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Aleyna"
admin.site.site_title = "Aleyna Admin Portal"
admin.site.index_title = "Aleyna Admin"
# swagger
api_info = openapi.Info(
title="Aleyna API",
default_version="v1",
description="API documentation for Aleyna App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
599d905038d248fb037ca50bc0780d275a941665
|
e68a40e90c782edae9d8f89b827038cdc69933c4
|
/res_bw/scripts/common/lib/lib2to3/fixes/fix_imports.py
|
93f3852497f4c5852f59a9a8367223073e4ff11c
|
[] |
no_license
|
webiumsk/WOT-0.9.16
|
2486f8b632206b992232b59d1a50c770c137ad7d
|
71813222818d33e73e414e66daa743bd7701492e
|
refs/heads/master
| 2021-01-10T23:12:33.539240
| 2016-10-11T21:00:57
| 2016-10-11T21:00:57
| 70,634,922
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 4,333
|
py
|
# 2016.10.11 22:20:59 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/lib2to3/fixes/fix_imports.py
"""Fix incompatible imports and module references."""
from .. import fixer_base
from ..fixer_util import Name, attr_chain
MAPPING = {'StringIO': 'io',
'cStringIO': 'io',
'cPickle': 'pickle',
'__builtin__': 'builtins',
'copy_reg': 'copyreg',
'Queue': 'queue',
'SocketServer': 'socketserver',
'ConfigParser': 'configparser',
'repr': 'reprlib',
'FileDialog': 'tkinter.filedialog',
'tkFileDialog': 'tkinter.filedialog',
'SimpleDialog': 'tkinter.simpledialog',
'tkSimpleDialog': 'tkinter.simpledialog',
'tkColorChooser': 'tkinter.colorchooser',
'tkCommonDialog': 'tkinter.commondialog',
'Dialog': 'tkinter.dialog',
'Tkdnd': 'tkinter.dnd',
'tkFont': 'tkinter.font',
'tkMessageBox': 'tkinter.messagebox',
'ScrolledText': 'tkinter.scrolledtext',
'Tkconstants': 'tkinter.constants',
'Tix': 'tkinter.tix',
'ttk': 'tkinter.ttk',
'Tkinter': 'tkinter',
'markupbase': '_markupbase',
'_winreg': 'winreg',
'thread': '_thread',
'dummy_thread': '_dummy_thread',
'dbhash': 'dbm.bsd',
'dumbdbm': 'dbm.dumb',
'dbm': 'dbm.ndbm',
'gdbm': 'dbm.gnu',
'xmlrpclib': 'xmlrpc.client',
'DocXMLRPCServer': 'xmlrpc.server',
'SimpleXMLRPCServer': 'xmlrpc.server',
'httplib': 'http.client',
'htmlentitydefs': 'html.entities',
'HTMLParser': 'html.parser',
'Cookie': 'http.cookies',
'cookielib': 'http.cookiejar',
'BaseHTTPServer': 'http.server',
'SimpleHTTPServer': 'http.server',
'CGIHTTPServer': 'http.server',
'commands': 'subprocess',
'UserString': 'collections',
'UserList': 'collections',
'urlparse': 'urllib.parse',
'robotparser': 'urllib.robotparser'}
def alternates(members):
return '(' + '|'.join(map(repr, members)) + ')'
def build_pattern(mapping = MAPPING):
mod_list = ' | '.join([ "module_name='%s'" % key for key in mapping ])
bare_names = alternates(mapping.keys())
yield "name_import=import_name< 'import' ((%s) |\n multiple_imports=dotted_as_names< any* (%s) any* >) >\n " % (mod_list, mod_list)
yield "import_from< 'from' (%s) 'import' ['(']\n ( any | import_as_name< any 'as' any > |\n import_as_names< any* >) [')'] >\n " % mod_list
yield "import_name< 'import' (dotted_as_name< (%s) 'as' any > |\n multiple_imports=dotted_as_names<\n any* dotted_as_name< (%s) 'as' any > any* >) >\n " % (mod_list, mod_list)
yield "power< bare_with_attr=(%s) trailer<'.' any > any* >" % bare_names
class FixImports(fixer_base.BaseFix):
BM_compatible = True
keep_line_order = True
mapping = MAPPING
run_order = 6
def build_pattern(self):
return '|'.join(build_pattern(self.mapping))
def compile_pattern(self):
self.PATTERN = self.build_pattern()
super(FixImports, self).compile_pattern()
def match(self, node):
match = super(FixImports, self).match
results = match(node)
if results:
if 'bare_with_attr' not in results and any((match(obj) for obj in attr_chain(node, 'parent'))):
return False
return results
return False
def start_tree(self, tree, filename):
super(FixImports, self).start_tree(tree, filename)
self.replace = {}
def transform(self, node, results):
import_mod = results.get('module_name')
if import_mod:
mod_name = import_mod.value
new_name = unicode(self.mapping[mod_name])
import_mod.replace(Name(new_name, prefix=import_mod.prefix))
if 'name_import' in results:
self.replace[mod_name] = new_name
if 'multiple_imports' in results:
results = self.match(node)
if results:
self.transform(node, results)
else:
bare_name = results['bare_with_attr'][0]
new_name = self.replace.get(bare_name.value)
if new_name:
bare_name.replace(Name(new_name, prefix=bare_name.prefix))
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\lib2to3\fixes\fix_imports.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.10.11 22:20:59 Střední Evropa (letní čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
2bf817a6155e3a01d75d49527e93cdf1126b8f45
|
cd895ae074ecb67ef886af8b26256a62f3c023dc
|
/src/NATS/Client/sample/DEMO_Procedure_Display/PlotHelpers.py
|
d5bcf51770c866847a2ca77b656702f4914961c4
|
[] |
no_license
|
mh-swri/NASA_ULI_InfoFusion
|
1361c25f8f19d31dc81cea05bdc704896690cbf1
|
52aa60454941fd65180ac348594dfee0c19398ab
|
refs/heads/master
| 2020-03-25T00:16:30.087618
| 2019-11-19T18:55:09
| 2019-11-19T18:55:09
| 143,177,424
| 5
| 1
| null | 2019-12-09T22:50:12
| 2018-08-01T15:53:01
|
C
|
UTF-8
|
Python
| false
| false
| 71,944
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
NATIONAL AIRSPACE TRAJECTORY-PREDICTION SYSTEM (NATS)
Copyright 2018 by Optimal Synthesis Inc. All rights reserved
Author: Jun Yang
Date: 2018-01-19
#Revision Logs
03/21/2018, Added AirportLayout,Node,Link classes to handle airport layout display
and taxi route design.
03/26/2018
Added TaxiPlan class in order to handle plotting and analyzing taxi plans.
"""
import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
import numpy as np
import h5py
import re
import os
import time
class OutFileHandler:
"""
This class is to aid parsing the trajectories from output files. This
supports. The supported file formats include: xml, h5, csv.
Each trajectory is read into an instance of ACTrajectory.
"""
#for plotting purpose
#lat_min=10.78;lat_max=51.28;lon_min=-150;lon_max=-45.25
def __init__(self):
"""
Initialize as a place holder.
Reading output file is done by calling read_xml_file.
"""
self.callsign_list=[]
self.traj_list=[]
@staticmethod
def get_column_defintion():
"""
This function returns the definition of column as a python dict variable.
:return: column defintions for numpy array inside ACTrajectory instance. ACtrajectory.colmap
"""
return ACTrajectory.colmap
def get_all_callsigns(self):
"""
functions to get all the callsigns included in the output file
:return: list of callsign(strings)
"""
return self.callsign_list
def get_callsigns_departing_from(self,org_arpt):
"""
function to get callsigns departing from the airport given as an airport argument
:param org_arpt: origin airport name
:return: list of callsigns departing from the given origin airport
"""
callsigns = []
for ii in range(len(self.callsign_list)):
if self.traj_list[ii].get_origin_airport() == org_arpt:
callsigns.append(self.callsign_list[ii])
return callsigns
def get_callsigns_arriving_at(self,dest_arpt):
"""
function to get call signs arrivaing at the given airport
:param dest_arpt: destination airport (name string)
:return: list of call signs that arrive at the airport
"""
callsigns = []
for ii in range(len(self.callsign_list)):
if self.traj_list[ii].get_destination_airport() == dest_arpt:
callsigns.append(self.callsign_list[ii])
return callsigns
def get_all_trajectories(self):
"""
get the list of ACtrajectory instances
:return: list of ACtrajectory instances
"""
return self.traj_list
def get_trajectories_departing_from(self,org_arpt):
"""
function to get the list of ACTrajectory instances departing from the origin airport
:param org_arpt: origin airport name
:return: list of ACTrajectory instances
"""
cs_list=self.get_callsigns_departing_from(org_arpt)
return self.get_trajectories_from_callsign_list(cs_list)
def get_trajectories_arriving_at(self,dest_arpt):
"""
function to get the list of ACTrajectory instances arriving at the destination airport
:param dest_arpt: the name of the destination airport
:return: list of ACTrajectory instances
"""
cs_list = self.get_callsigns_arriving_at(dest_arpt)
return self.get_trajectories_from_callsign_list(cs_list)
def get_trajectories_from_callsign_list(self,callsign_list):
"""
Given list of call signs, returns the list of ACTrajectory instances
:param callsign_list: list of call sign strings
:return: list of ACTrajectory instances
"""
return [self.get_trajectory_of(callsign) for callsign in callsign_list]
def get_callsigns_from_to_airports(self,orig_arpt,dest_arpt):
"""
Given origin & destination airport pairs, return the list of callsigns that depart from the origin
and arrive at the destination
:param orig_arpt: the name of the origin airport
:param dest_arpt: the name of the destination airport
:return: list of call signs
"""
org_arpt_list = self.get_callsigns_departing_from(orig_arpt)
dest_arpt_list = self.get_callsigns_arriving_at(dest_arpt)
return [callsign for callsign in org_arpt_list if callsign in dest_arpt_list]
def get_trajectories_from_to_airpots(self,orig_arpt,dest_arpt):
"""
Given origin & destination airport pairs, return the list of ACTrajectory instances that depart from the origin and arrive at the destination
:param orig_arpt: the name of the origin airport
:param dest_arpt: the name of the destination airport
:return: list of ACTrajectories
"""
cs_list = self.get_callsigns_from_to_airports(orig_arpt,dest_arpt)
return self.get_trajectories_from_callsign_list(cs_list)
def get_trajectory_of(self,callsign):
"""
Given call sign, return the correponding ACTrajectory instance
:param callsign: the call sign (string)
:return: ACTrajectory instance
"""
rtn_traj=None
if callsign in self.callsign_list:
cs_idx=self.callsign_list.index(callsign)
rtn_traj=self.traj_list[cs_idx]
else:
raise ValueError("callsign:{} is not found in stored trajectories".format(callsign))
return rtn_traj
def plot_trajectories_from_callsigns(self,callsigns, plt,color=None,legend=False):
"""
Plot trajectories specified by callsigns. If the color is specified, all the trajectories are plotted using
the same color. Otherwise, default colors are used.
The legend is off in default, if it is True, the legend is displayed with labels as call signs.
:param callsigns: list of call signs
:param plt: matplotlib.pyplot instance (needed to call plot functions)
:param color: color specification ex)'b'
:param legend: True or False for displaying the legend
:return: None
"""
trajs=self.get_trajectories_from_callsign_list(callsigns)
num_traj=len(trajs)
for ii in range(num_traj):
latlon_data = trajs[ii].get_latlon_data()
if color:
plt.plot(latlon_data[:,1],latlon_data[:,0],color=color)
else:
plt.plot(latlon_data[:,1],latlon_data[:,0],label=callsigns[ii])
plt.xlabel('lon(deg)');plt.ylabel('lat(deg)')
if legend:
plt.legend()
def read_xml_file(self,xml_file):
"""
Given xml_file name, read AC trajectories into lists.
xml.etree.ElementTree is used to parse the xml_file.
Note:
This must be called the instance to be non-empty
:param xml_file: string name of xml file (output from NATS)
:return: None (ACTrajectory instances are stored as internal variables)
"""
fh=open(xml_file) #to close the file later
tree=ET.parse(fh)
root=tree.getroot()
t_start_utc=int(root.get('simulation_start_time'))
traj_list=root.findall('trajectory')
print("will load {} ac trajectories...".format(len(traj_list)))
#for the first aircraft
for trajx in traj_list:
#read the trajectory as an ATCTrajectory instance
trajo=ACTrajectory(t_start_utc,trajx,'XML')
#adding the trajectory
callsign=trajo.get_callsign()
# self.callsign_list.append(trajo.get_callsign())
# self.traj_list.append(trajo)
self._add_trajectory(trajo)
# print ("callsign:{} org_arpt:{}, dest_arpt:{}".format(
# callsign,trajx.get('origin_airport'),trajx.get('destination_airport')
# ))
print('done.')
fh.close()
def read_hdf5_file(self, h5_file):
"""
Given h5_file name, read AC trajectories into lists
Note:
This must be called for the instance to be non-empty
:param h5_file: string name of hdf5 file (output from NATS)
:return: None (ACTrajectory instances are generated as internal varialbes)
"""
fh = h5py.File(h5_file) # File instance
#1.obtain dataset
datset=fh['/trajectories']
print("will load {} ac trajectories...".format(len(datset)))
# for the first aircraft
#later the h5 needs to include the following time.
#Right now, it does not have the value.
t_start_utc=1121238067
for ii in range(len(datset)):
# read the trajectory as an ATCTrajectory instance
trajo = ACTrajectory(t_start_utc,datset[ii],'HDF5' )
# adding the trajectory
callsign = trajo.get_callsign()
# self.callsign_list.append(trajo.get_callsign())
# self.traj_list.append(trajo)
self._add_trajectory(trajo)
# print ("callsign:{} org_arpt:{}, dest_arpt:{}".format(
# callsign,trajx.get('origin_airport'),trajx.get('destination_airport')
# ))
print('done.')
fh.close()
def read_csv_file(self, csv_file):
"""
Given csv_file name, read AC trajectories into lists
Note:
This must be called for the instance to be non-empty
:param csv_file: string name of the csv file (output from NATS)
:return:
"""
# 1. readout the header
fh = open(csv_file, 'r')
num_lines_discard = 7
for ii in range(num_lines_discard):
fh.readline()
# a.read the start utc time
line = fh.readline()
tokens=line.rstrip().split(',')
t_start_utc = int(tokens[0])
#b. next line is empty line
fh.readline()
#c. starts to read 'AC' line
ac_cursor=fh.tell()
line=fh.readline()
while line:
#a. put the cursor back so that 'AC' line can be read
fh.seek(ac_cursor)
#b. file handle keeps moving the cursor inside
trajo=ACTrajectory(t_start_utc,fh,type='CSV')
self._add_trajectory(trajo)
#c. read the next line
line=fh.readline()
#print("read line:{} len(line):{}".format(line,len(line)))
#In both cases: (a) eof is reached (b) read line between AC trajectories
if not line: #empty line (not '\n'), reached eof
break
else: #read the next line
line=fh.readline() #does this cause an error if the file handler
ac_cursor=fh.tell()
#print(line)
fh.close()
def _add_trajectory(self,new_traj):
"""
Since list is used to save trajectory, the name and the trajectory (call sign of aircraft are also
saved)
:param new_traj: ACTrajectory instance
:return: None
"""
self.callsign_list.append(new_traj.get_callsign())
self.traj_list.append(new_traj)
class Region:
"""
Both Center and Sector have the same format of data. So Region is introduced to handle the common
attributes of Center and Sector.
"""
def __init__(self,region_line):
"""
read region specification from a read line. Both center and sector share
the same format for their specification
:param region_line: lines start with SECTOR or CENTER
"""
tokens = region_line.rstrip().split(',')
self.region_index=int(tokens[1])
self.region_name=tokens[2]
self.num_wpts=int(tokens[3])
self.nwpts=None
self.region_type=tokens[0]
def get_retion_type(self):
"""
whether center or sector
:return: region type (string)
"""
return self.region_type
def get_region_name(self):
"""
returns the name of the region (sector name or center name)
:return: region_name (string)
"""
return self.region_name
def region_within_US(self):
"""
checks whether the region belongs to the region around USA.
This is to plot only those sectors and centers that are relevant US continent.
:return:True or False
"""
latlon_data=self.get_latlon_data()
lat_min=np.min(latlon_data[:,0])
lat_max=np.max(latlon_data[:,0])
lon_min=np.min(latlon_data[:,1])
lon_max=np.max(latlon_data[:,1])
#the following are checked manually by looking at
#google maps
LAT_MIN_THLD=24
LAT_MAX_THLD=68
LON_MIN_THLD=-171
LON_MAX_THLD=-54
rtn_value=(lat_min > LAT_MIN_THLD) and \
(lat_max < LAT_MAX_THLD) and \
(lon_min > LON_MIN_THLD) and \
(lon_max < LON_MAX_THLD)
return rtn_value
def get_index(self):
"""
returns the region index
:return: region_index (sector or center index)
"""
return self.region_index
def get_number_of_waypoints(self):
"""
tells the number of waypoints that compose the region (center or sector).
The first and last are different, meaning that in plotting, the last and the first
must be plotted to make a closed polygon.
:return: number_of_waypoints (int)
"""
return self.num_wpts
def get_latlon_data(self):
"""
Returns 2d array of (lat,lon)
:return: numpy arrray of size (number_of_waypoints,2)
"""
return self.nwpts
def read_waypoints(self,fh):
"""
This is a function that reads the given number of lines from a file
The data seems to make angles between -180 and 180. To make a close region,
when the longitude jumps by changing signs, subtract 360 degrees from them.
:param fh: file_handle for SectorData or CenterData
:return:
"""
wpts = []
num_lines = self.get_number_of_waypoints()
for ii in range(num_lines):
line = fh.readline()
tokens = line.rstrip().split(',')
lat = float(tokens[1])
lon = float(tokens[2])
if lon >-30:
lon-=360
wpts.append((lat, lon))
self.num_wpts=len(wpts)
self.nwpts=np.array(wpts) #numpy 2d array
def plot_region(self,plt,color='w',fig_handle=None,centerId=False):
"""
plot region (either sector or center)
:return: None
"""
#1.add the first point on the last again so that it can be closed
latlon_data=self.get_latlon_data()
lat_mean=np.mean(latlon_data[:,0])
lon_mean=np.mean(latlon_data[:,1])
first_latlon = latlon_data[0, :].reshape(1, 2)
latlon_data = np.vstack((latlon_data, first_latlon))
if len(latlon_data) > 1:
plt.plot(latlon_data[:, 1], latlon_data[:, 0], '--', color=color, linewidth=.3)
if centerId and fig_handle:
#print("will add region id")
ax0=fig_handle.get_axes()[0]
cent_str='{}'.format(self.get_index())
ax0.text(lon_mean,lat_mean,cent_str,color='green')
plt.xlabel('lon(deg)');
plt.ylabel('lat(deg)')
plt.grid(linestyle=':', color='w', linewidth=0.3)
class Center(Region):
"""
As of 01/12/2018, Region and Center are identical
in terms of functions. They are only used for plotting.
"""
def __init__(self,cent_line):
Region.__init__(self,cent_line)
class Sector(Region):
"""
Sector has specifications such as min and max altitudes.
It inherits Region.
"""
def __init__(self,sec_line):
"""
read Sector specification from a read line
:param sec_line: lines start with SECTOR
"""
#print("sec_line:{}".format(sec_line))
tokens = sec_line.rstrip().split(',')
Region.__init__(self,sec_line)
#besides, Sector also further specifies the following
self.altmin_ft=float(tokens[4])
self.altmax_ft=float(tokens[5])
def get_sector_index(self):
"""
return sector index
:return: sector_index(integer)
"""
return self.get_index()
def get_max_altitude_ft(self):
"""
:return: maximum altitude (ft)
"""
return self.altmax_ft
def get_min_altitude_tf(self):
"""
:return: minimum altitude (ft)
"""
return self.altmin_ft
class RegionHandler:
"""
RegionHandler reads Sector or Center data files and store them as internal variables.
The function 'plot_regions' displays the center or sector data.
"""
def __init__(self):
"""
do nothing
"""
self.regions=None
self.num_regions=None
def getRegion(self,region_index):
assert(len(self.regions)>0)
return self.regions[region_index]
def plot_regions(self,plt,color='m',fig_handle=None,regionId=False):
"""
Plot stored region
:param plt: matplotlib.pyplot instance
:param color: specifies color of the line used for drawing regions
:param fig_handle:
:param regionId:
:return:
"""
for region in self.regions:
region_name=region.get_region_name()
# In case of center, there are high-centers and low centers with different boundaries.
# Ignore high centers.
if region_name.endswith('HIGH'):
#print("region_name:{} will be discarded".format(region_name))
continue
if region.region_within_US():
#print("displaying: {}".format(region_name))
region.plot_region(plt,color,fig_handle,regionId)
def read_region_file(self,region_file,region_type):
"""
This function read a center or sector data file
:param region_file: name of the data file
:param region_type: string (used for differentiating sector and center)
:return:
"""
fh = open(region_file, 'r')
# read 4 lines (heading)
for ii in range(4):
fh.readline()
# starting from here reads sector data
line = fh.readline()
self.regions=[]
while line: #Until it reads non-empty string
if line.rstrip(): # if not empty line
aregion=None
if region_type=='SECTOR':
aregion=Sector(line)
aregion.read_waypoints(fh)
elif region_type=='CENTER':
aregion=Center(line)
aregion.read_waypoints(fh)
else:
raise ValueError("Right now only center and sector are considered")
self.regions.append(aregion)
line = fh.readline() # read the next line
self.num_regions = len(self.regions)
print("total {} {}s are read".format(self.num_regions,region_type))
fh.close()
class ACTrajectory:
"""
ACTrajectory is introduced to handle numeric data read from NATS output.
The following units are default:
time:UTC miliseconds
latitude:deg
longitude:deg
altitude: ft (altitude)
altitude rate:fps (rate of climb or descent)
speed:knots (True AirSpeed)
heading:deg
fpa:deg (flight path angle)
sector index: int
For computational efficiency, numpy array is used to denote all the dynamic variables
except the static information regarding aircraft. The following maps define the numeric columns.
colmap: provide column number for the variable name ex) t: time
This provides the definition of columns when get_columns() is called.
fm_map: defines flight_mode string and the integer number for the flight mode.
n2imap: name to index map in order to read h5 extension.
csv_header: description of column in csv file format.
"""
colmap={'t':0,'lat':1,'lon':2,'alt':3,'speed':4,'heading':5,
'altrate':6,'fpa':7,'sector_index':8,'flight_mode':9}
fm_map={'DEPARTING':0,'CLIMB':1,'CRUISE':2,'DESCENT':3,'LANDING':4}
n2imap = {'flight_index': 0, 'callsign': 1, 'actype': 2,
'origin_airport': 3, 'destination_airport': 4,
'start_time': 5, 'interval': 6,
'latitude': 7, 'longitude': 8, 'altitude': 9,
'hdot': 10, 'tas': 11, 'heading': 12,
'fpa': 13, 'sector_index': 14, 'sector_name': 15,
'mode': 16}
csv_header={
't':0, 'lat':1, 'lon':2,'alt':3,'hdot':4,'tas':5,'heading':6,
'fpa':7,'sector_index':8,'sector_name':9,'flight_mode':10
}
def __init__(self,t0_utc,et_node_or_hdf5, type='HDF5'):
"""
the second argument
:param t0_utc: sim_start_time UTC (in miliseconds)
"""
self.t0_utc=t0_utc
self.flight_index = None
self.callsign = None
self.actype = None
self.org_arpt = None
self.dest_arpt = None
self.start_time = None
self.time_interval_milisec = None #for airborne
self.nfields = None
self.time_interval_milsec_ground=None #for ground
if type=='XML':
self.__initialize_from_xml(t0_utc,et_node_or_hdf5)
elif type=='HDF5':
self.__initialize_from_hdf5(t0_utc,et_node_or_hdf5)
elif type=='CSV':
file_handle=et_node_or_hdf5 #for CSV, it is a file handle
self.__initialize_from_csv(t0_utc,file_handle)
else:
raise NameError("type must be either xml or hdf5")
def __initialize_from_csv(self,t0_utc,fh):
"""
:param t0_utc: UTC miliseconds for starting time
:param fh: file_handle that starts reading 'AC'line
:return:
"""
#1. read AC line
line=fh.readline()
#print('read line:{}'.format(line))
#2. read AC specification
tokens=line.rstrip('\n').split(',')
self.flight_index=int(tokens[1])
self.callsign=tokens[2]
self.actype=tokens[3]
self.org_arpt=tokens[4]
self.dest_arpt=tokens[5]
t_start_sec=float(tokens[6])
self.start_time = round(float(t_start_sec) * 1000) + t0_utc # make miliseconds
self.time_interval_milisec_ground=round(float(tokens[7])*1000)
self.time_interval_milisec=round(float(tokens[8])*1000)
num_rows=int(tokens[13])
num_cols = 10
self.nfields = np.zeros((num_rows, num_cols))
for ii in range(num_rows):
line=fh.readline()
tokens=line.rstrip().split(',')
#print("ii:{} line:{}".format(ii,line))
delta_sec=float(tokens[self.csv_header['t']])
self.nfields[ii, 0] = self.t0_utc + delta_sec * 1000
self.nfields[ii,1]=float(tokens[self.csv_header['lat']]) #lat
self.nfields[ii,2]=float(tokens[self.csv_header['lon']]) #lon
self.nfields[ii,3]=float(tokens[self.csv_header['alt']]) #alt
self.nfields[ii,4]=float(tokens[self.csv_header['tas']])
self.nfields[ii,5] =float(tokens[self.csv_header['heading']])
self.nfields[ii,6] =float(tokens[self.csv_header['hdot']])
self.nfields[ii,7] =float(tokens[self.csv_header['fpa']])
self.nfields[ii,8] =float(tokens[self.csv_header['sector_index']])
#obtainint flight mode as a number
flight_mode_str =tokens[self.csv_header['flight_mode']]
flight_mode_num = ACTrajectory.fm_map[flight_mode_str]
self.nfields[ii,9]=flight_mode_num
def __initialize_from_hdf5(self, t0_utc, h5_traj):
self.flight_index = h5_traj[self.n2imap['flight_index']]
#print("flight index:{}".format(self.flight_index))
self.callsign = h5_traj[self.n2imap['callsign']]
self.actype = h5_traj[self.n2imap['actype']]
self.org_arpt = h5_traj[self.n2imap['origin_airport']]
self.dest_arpt = h5_traj[self.n2imap['destination_airport']]
t_start_sec=h5_traj[self.n2imap['start_time']]
self.start_time = round(float(t_start_sec) * 1000) + t0_utc # make miliseconds
self.time_interval_milisec = round(float(h5_traj[self.n2imap['interval']]) * 1000)
#print("type:{},time_interval:{} miliseconds".format(type(self.time_interval_milisec),self.time_interval_milisec))
#the number of rows
num_rows=h5_traj[self.n2imap['latitude']].shape[0]
num_cols=10;
self.nfields = np.zeros((num_rows,num_cols))
#a. fill up the time
for ii in range(num_rows):
self.nfields[ii,0]=self.start_time+ii*self.time_interval_milisec
self.nfields[:,1]=h5_traj[self.n2imap['latitude']]
self.nfields[:,2] = h5_traj[self.n2imap['longitude']]
self.nfields[:,3] = h5_traj[self.n2imap['altitude']]
self.nfields[:,4] = h5_traj[self.n2imap['tas']]
self.nfields[:,5] = h5_traj[self.n2imap['heading']]
self.nfields[:,6] = h5_traj[self.n2imap['hdot']]
self.nfields[:,7] = h5_traj[self.n2imap['fpa']]
self.nfields[:,8] = h5_traj[self.n2imap['sector_index']]
self.nfields[:,9] = h5_traj[self.n2imap['mode']]
def __initialize_from_xml(self,t0_utc,et_node):
self.flight_index=int(et_node.get('flight_index'))
self.callsign=et_node.get('callsign')
self.actype=et_node.get('actype')
self.org_arpt=et_node.get('origin_airport') #three chars
self.dest_arpt=et_node.get('destination_airport')
self.start_time=round(float(et_node.get('start_time'))*1000)+t0_utc #make miliseconds
self.time_interval_milisec=round(float(et_node.get('interval'))*1000)
#print("type:{},time_interval:{} miliseconds".format(type(self.time_interval_milisec),self.time_interval_milisec))
self.nfields=[]
traj_pts=et_node.findall('trajectory_point')
for traj_pt in traj_pts:
time=float(traj_pt.get('timestamp'))
t_utc=ACTrajectory.compute_UTC_milis_time(self.start_time,time)
lat=float(traj_pt.find('latitude_deg').text)
lon=float(traj_pt.find('longitude_deg').text)
alt=float(traj_pt.find('altitude_ft').text)
alt_rate = float(traj_pt.find('rocd_fps').text)
speed = float(traj_pt.find('tas_knots').text)
heading = float(traj_pt.find('heading_deg').text)
fpa = float(traj_pt.find('fpa_deg').text)
secor_index = float(traj_pt.find('sector_index').text)
flight_mode_str=(traj_pt.find('flight_mode').text)
flight_mode_num=ACTrajectory.fm_map[flight_mode_str]
#The following should match with the order of colmap
self.nfields.append([t_utc,lat,lon,alt,speed,heading,alt_rate,fpa,secor_index,flight_mode_num])
#make it numpy array for ease of handling later
self.nfields=np.array(self.nfields)
def get_callsign(self):
"""
:return: return aircraft id (string)
"""
return self.callsign
def get_origin_airport(self):
"""
:return: origin airport (string)
"""
return self.org_arpt
def get_destination_airport(self):
"""
:return: desitnation airport (string)
"""
return self.dest_arpt
def get_all_times(self):
"""
returns time columns in UTC miliseconds
:return: numpy array of shape: (length,)
"""
return self.nfields[:, self.colmap['t']]
def get_initial_time(self):
"""
return the very first time stamp of the aircraft trajectory
:return: integer in UTC miliseconds
"""
return self.nfields[0,self.colmap['t']]
def get_final_time(self):
"""
returns the last time stamp of the aircraft trajectory
:return: integer (for UTC miliseconds)
"""
return self.nfields[-1,self.colmap['t']]
def get_time_step_in_miliseconds(self):
return self.time_interval_milisec
def get_columns(self):
"""
returns all the numeric data as numpy array. The defintion of columns
are provided by the function call: get_column_definition()
:return: ndarray (variables defined in column defintions)
"""
return self.nfields
def get_latlon_data(self):
"""
returns lat_lon degree data
:return: numpy.array((num_time_stamps,2))
"""
lat_idx=self.colmap['lat']
lon_idx=self.colmap['lon']
lat=self.nfields[:,lat_idx:(lat_idx+1)] #make it 2d array
lon=self.nfields[:,lon_idx:(lon_idx+1)]
return np.hstack((lat,lon))
def get_timed_latlon_data(self):
"""
returns numpy array consisting of time_stamp,lat_deg,lon_deg
:return: numpy array of numpy.array((num_time_stamp,3))
"""
time_idx=self.colmap['t']
lat_idx=self.colmap['lat']
lon_idx=self.colmap['lon']
time=self.nfields[:,time_idx:(time_idx+1)]
lat=self.nfields[:,lat_idx:(lat_idx+1)] #make it 2d array
lon=self.nfields[:,lon_idx:(lon_idx+1)]
return np.hstack((time,np.hstack((lat,lon))))
def get_latlon_at_given_time(self,time_msec):
"""
Given time (in UTC miliseconds), return the [lat,lon] at the given time instance
:param time_msec: time stamp specified in UTC miliseconds
:return: list of [lat,lon]
"""
rtn_data=None
all_times=self.get_all_times()
time_indices=np.where(all_times==time_msec) #find indices for the given time
if time_indices[0]: #if there is found one
found_idx=time_indices[0][0] #because time_indices[0] is an array
rtn_data=self._obtain_latlon_data_at_given_time_index(found_idx)
return rtn_data
def _obtain_latlon_data_at_given_time_index(self,data_index):
"""
Given data_index, return [lat,lon]
:param data_index: index for self.nfields
:return: list: [lat,lon] at the given index
"""
lat_idx = self.colmap['lat']
lon_idx = self.colmap['lon']
lat = self.nfields[data_index, lat_idx] # make it 2d array
lon = self.nfields[data_index, lon_idx]
return [lat,lon]
def plot_lat_lon(self,plt):
"""
plot (lat,lon) in x-y plot
:param plt: matplotlib.pyplot handle for plotting
:return:
"""
latlon_data=self.get_latlon_data()
plt.plot(latlon_data[:,0],latlon_data[:,1])
plt.xlabel('lat (deg)')
plt.ylabel('lon(deg)')
@staticmethod
def get_column_defintion():
return ACTrajectory.colmap
@staticmethod
def compute_UTC_milis_time(t0_utc,delt_sec):
"""
Compute the utc time in miliseconds
:param t0_utc: init time in miliseconds integer
:param delt_sec: incremental time given in seconds
:return: utc time in miliseconds
"""
#comp_time=t0_utc+round(delt_sec*1000)
#print("type(comp_time:{}".format(type(comp_time)))
return t0_utc+round(delt_sec*1000) #int
class Node:
"""
Node and Links are used to describe the surface layout of an airport.
Node numbers are introduced to make ease the data access in a numerical form.
"""
def __init__(self, node_num,node_name,lat_deg,lon_deg):
self.node_num = node_num
self.node_name=node_name
self.lat_deg=lat_deg
self.lon_deg=lon_deg
self.rwy_entry=False
def get_node_name(self):
"""
:return: node name (string)
"""
return self.node_name
def set_node_name(self,new_name):
"""
set new_name as the node name.
:param new_name:
:return: None
"""
self.node_name=new_name
def get_node_num(self):
"""
:return: node_number (int)
"""
return self.node_num
def set_node_num(self,node_num):
"""
set node number.
:param node_num: integer
:return: None
"""
self.node_num=node_num
def get_lat_deg(self):
"""
:return: latitude (degree)
"""
return self.lat_deg
def get_lon_deg(self):
"""
:return: longitude (degree)
"""
return self.lon_deg
def get_latlon_deg(self):
"""
:return: tuple of (lat_deg,lon_deg)
"""
return (self.lat_deg,self.lon_deg)
def is_runway_entry(self):
"""
returns True if the node is a runway entry
:return: True or False
"""
return self.rwy_entry
def set_runway_entry_flag(self,ibool):
"""
set True or False on whether the the node is runway entry
:param ibool: True or False
:return: None
"""
self.rwy_entry=ibool
class Link:
"""
Link denotes a road segment that connects two nodes
"""
def __init__(self, node1_num,node2_num):
self.name = None
self.linkid=(node1_num,node2_num) #tuple as a id
self.start_node_num = node1_num
self.end_node_num = node2_num
self.length_ft = None # initially negative (will be set by additonal process)
def get_start_node_num(self):
"""
:return: the node number for the start node
"""
return self.start_node_num
def get_end_node_num(self):
"""
:return: the node number for the end node
"""
return self.end_node_num
def get_link_name(self):
"""
link name convention: start_node_name-end_node_name
:return: the link name
"""
if self.name:
return self.name
else:
raise ValueError("Link Name is not assigned yet.")
def set_link_name(self,name):
"""
set the given name as the link name
:param name:
:return: None
"""
self.name=name
def get_length_ft(self):
"""
As of 04/02/2018, the length of link is not computed
When the length of link is not computed, calling this function raises an error.
:return: link length (float)
"""
if self.length_ft:
return self.length_ft
else:
raise ValueError("Link Length is not set.")
# setters
def set_length(self,new_length_ft):
"""
set the length of the link
:param new_length_ft:
:return: None
"""
self.length_ft = new_length_ft
class TaxiPlan:
"""
For departure:[Gate, ...., Rwy,V2point]
For arrival :[Touchdown,....,RwyExit,Gate]
"""
def get_node_names(self):
return [node.get_node_name() for node in self.wpts]
def __init__(self,surf_layout):
#empty
self.arpt_layout=surf_layout
self.wpts=[] #list of Node instance
self.type=None
#self.__read_type_str(type_str)
def is_departure_plan(self):
"""
if the taxiplan is for departure, it returns True.
Else, it returns False.
:return: True or False
"""
return self.type=='dep'
def get_gate_node_name(self):
"""
If the departure plan, the first point is viewed as the gate.
For arrival, the last is viewed as the gate.
:return:
"""
#default is departure
name=None
if self.wpts:
if self.type=='dep':
name=self.wpts[0].get_node_name()
if self.type=='arv':
name=self.wpts[-1].get_node_name()
return name
def get_departure_runway_node_name(self):
"""
The second last node is viwed as the departure runway
:return: node_name that corresponds to the departure runway
"""
if self.type=='arv':
raise EnvironmentError(" this functio cannot be called for arrival taxi plan")
else: #departure
assert(len(self.wpts)>=3)
return self.wpts[-2].get_node_name()
def get_V2point_node_name(self):
"""
For departure plan, the last one is the V2 point.
:return:
"""
if self.type=='arv':
raise EnvironmentError(" this function cannot be called for arrival taxi plan")
else:
assert(len(self.wpts)>=3)
return self.wpts[-1].get_node_name()
def get_touchdown_node_name(self):
"""
For arrival plan, the first one is the touchdown point.
:return:
"""
if self.type!='arv':
raise EnvironmentError(" this function should be called for arrival taxi plan")
else:
assert(len(self.wpts)>=3)
return self.wpts[0].get_node_name()
def get_runway_exit_node_name(self):
"""
For arrival plan, the seocnd one is the runway exit node.
:return:
"""
if self.type!='arv':
raise EnvironmentError(" this function should be called for arrival taxi plan")
else:
assert(len(self.wpts)>=3)
return self.wpts[1].get_node_name()
def read_csv_route(self,csv_file):
"""
When the user saves a designed route, it saves the route to a csv file
in a specific way. This function reads the outputed csv file.
:param csv_file: csv_filename for designed taxi route
:return:
"""
fr=open(csv_file,'r')
#1.readingthe number of nodes
line=fr.readline()
tokens=line.rstrip('\n').split(',')
num_points=int(tokens[1])
#2.the next line is column description
#node_number, node_name, lat_deg,lon_deg,alt_ft
fr.readline()
#3.Now read numeric data
for ii in range(num_points):
line=fr.readline()
tokens = line.rstrip('\n').split(',')
node_num = int(tokens[0])
node_name = tokens[1]
lat_deg = float(tokens[2])
lon_deg = float(tokens[3])
alt_ft=float(tokens[4])
node_instance = Node(node_num, node_name, lat_deg, lon_deg)
self.wpts.append(node_instance)
#4. Final step:determin departure or arrival
self.__set_taxiplan_type()
def read_route_from_NATS_nodes(self, node_names):
"""
The NATS returns list of node names (strings). This reads those strings
into a sequence of Node instances.
:param node_names: list of node names
:return: None
"""
print("Taxi Route from NATS")
for node_name in node_names:
node_instance=self.arpt_layout.get_node_from_name(node_name)
self.wpts.append(node_instance)
self.__set_taxiplan_type()
def read_route_from_design_strings(self, display_strs):
"""
In case of route design, the display string sometimes starts with 'RWxxxx' for runway
entry points. So extract the node number from the designed string to get the corresponding
Node instance.
Logic: use the node_number to construct a route
:param display_strs: "node_name(node_num)"
:param surf_layout:
:return:
"""
print("Taxi route from design strings")
for d_str in display_strs:
#name_only=re.findall("([A-Za-z0-9_]+)\(",d_str)[0]
node_num=re.findall("\(([0-9]+)\)",d_str)[0]
node_instance=self.arpt_layout.get_node_from_node_number(int(node_num))
#print("name_only:{}".format(name_only))
self.wpts.append(node_instance)
#determine taxiplan type
self.__set_taxiplan_type()
def plot_route(self,plt,label, linecolor=None):
"""
plot the taxi route
:param plt: matplotlib.pyplot
:param label: for the case of legencs
:param linecolor: if the user wants to specify the line color
:return: None
"""
#1.node
lonlats=[(node.get_lon_deg(),node.get_lat_deg()) for node in self.wpts]
lonlats=np.array(lonlats)
if linecolor:
plt.scatter(lonlats[:, 0], lonlats[:, 1], marker='o', c=linecolor, s=7)
plt.plot(lonlats[:, 0], lonlats[:, 1], color=linecolor, label=label)
elif label:
plt.scatter(lonlats[:, 0], lonlats[:, 1], marker='o', c='y', s=7)
plt.plot(lonlats[:, 0], lonlats[:, 1], '-y', label=label)
def __set_taxiplan_type(self):
"""
determine whether departure or arrival depending on
the characteristics of the node-name sequence
This must be called after the wpts are read
:return: None
"""
assert(len(self.wpts)>2) #at least three points
start_node_name=self.wpts[0].get_node_name().upper()
last_node_name=self.wpts[-1].get_node_name().upper()
#if the first node is runway, it must be arrival
if start_node_name.startswith('RW') or start_node_name.startswith('RWY'):
self.type='arv'
elif start_node_name.startswith('GATE'):
self.type='dep'
elif last_node_name.startswith('GATE'):
self.type='arv'
elif last_node_name.startswith('RW') or last_node_name.startswith('RWY'):
self.type='dep'
else:
print("start_node:{}, end_node:{} are not sufficeint to determine type".format(\
start_node_name,last_node_name))
self.type='unknown'
class AirportLayout:
"""
Helper to process Airport Layout
"""
def __init__(self,arpt_name):
"""
In case of single file, it is XXXX_nodesNlinks.csv
In case of two, node_xxx.csv and link_xxx.csv
:param inode_file: node_xxx.csv
:param ilink_file: link_xxx.csv
"""
self.arpt_name=arpt_name
self.nodes={} # dict: key:node_num value:Node instance
self.node_map={} # (name to number map)
self.links={} # dict, key as a tuple (node1_num,node2_num)
self.node_rwy_map={} # key:node_num, value: runway_name
#The following stroage is used for designing a route
self.routes=[]
self.route_completed=False #made true when the user double clicks
self.curr_node_str=None
#Line Drawing for user-selected points
self.xs=[]
self.ys=[]
self.line=None
def initialize_from_NATS_airport_layout(self,nodemap,nodedata,linkdata,rwydata):
"""
from NATS airport layout, build airport surface layout
:param nodemap: obtained by calling airportInterface.getLayout_node_map(XXXX)
:param nodedata: obtained by calling airportInterface.getLayout_node_data(XXXX)
:param linkdata: obtained by calling airportInterface.getLayout_links(XXXX)
:param rwydata : list of mapping between runway (CIFP) ident and node_name
:return: None
"""
num_nodes=len(nodemap)
#self.nodes=[None for ii in range(num_nodes)]
for ii in range(num_nodes):
node_name=nodemap[ii][0]
node_num=nodemap[ii][1]
assert(node_num==nodedata[ii][0])
lat_deg=nodedata[ii][1]
lon_deg=nodedata[ii][2]
#if node_num==0 or node_num==1:
#print("ii:{},node_name:{},node_num:{},lat:{},lon:{}".format(ii,node_name,node_num,lat_deg,lon_deg))
node_instance = Node(node_num, node_name, lat_deg, lon_deg)
self.nodes[node_num]=node_instance
self.node_map[node_name]=node_num
#print("check")
#print(self.nodes[node_num].get_node_name())
#read runway-node map
for each_rwy in rwydata:
rwy_name=each_rwy[0]
node_name=each_rwy[1]
node_num=self.node_map[node_name]
#make the node to the runway entry
self.nodes[node_num].set_runway_entry_flag(True)
#now runway name as a mapping for later purpose
self.node_rwy_map[node_num]=rwy_name
# for key,value in self.node_rwy_map.items():
# print("node:{} {}".format(key,value))
num_links = len(linkdata)
for ii in range(num_links):
node1_num = linkdata[ii][0]
node2_num = linkdata[ii][1]
# print("node1_num:{},node2_num:{}".format(node1_num,node2_num))
# 1.create a link instance
link_instance = Link(node1_num, node2_num)
# 2.make a name
node1_name = self.nodes[node1_num].get_node_name()
node2_name = self.nodes[node2_num].get_node_name()
link_name = node1_name + '-' + node2_name
link_instance.set_link_name(link_name)
# 3. add to the dictionary
self.links[(node1_num, node2_num)] = link_instance
def get_airport_name(self):
"""
ICAO airport name, for example: KPHX
:return: ICAO airport name (string)
"""
return self.arpt_name
def get_designed_route(self):
"""
If there is a route designed (by mouse clicks), the sequence of node names are returned.
The list of node names, displayed to the user and clicked by the user, is returned
:return: list of node names (or CIFP runway names, for example, RW01L)
"""
if self.routes:
return self.routes
else:
print("No route is designed yet")
return None
def get_node_from_node_number(self,node_num):
"""
obtain Node instance using the node number
:param node_num: (integer)
:return:
"""
if node_num in self.nodes.keys():
return self.nodes[node_num]
else:
raise ValueError("Node number:{} not in keys".format(node_num))
def get_node_from_name(self,node_name):
"""
obtain Node instance with the given node name
:param node_name: node name (string)
:return:
"""
if node_name in self.node_map.keys():
return self.nodes[self.node_map[node_name]]
else:
raise NameError("Node_name:{} not recognized".format(node_name))
def get_nodes(self):
"""
returns all the nodes in the airport layout
:return: nodes (dictionary with node numbers as keys)
"""
return self.nodes
def get_links(self):
"""
obtain all the links
:return: dict with keys as the tuple (node1_num,node2_num) corresponding to the start and end node numbers
"""
return self.links
def get_route_completion_flag(self):
"""
tells whether the route design is finished.
The route_completion_flag is set to True when the user double clicks the last node.
:return: True or False whether the route design is finished
"""
return self.route_completed
def set_route_completion_flag(self,ibool):
"""
marks that the roude design is completed
:return: None
"""
self.route_completed=ibool
def have_a_designed_route(self):
"""
tells True whether the airport layout has a non-empty node-name sequence and
the route design is completed. In other words, the airport surface has a designed route
only if the route is non-empty and the user double-clicked the last node.
:return: True or False
"""
#If the routes are not None and they are completed
if self.routes and self.get_route_completion_flag():
return True
else:
return False
def plot_airport_layout(self,plt):
"""
Plot nodes and links in (lon,lat) coordinates
In this plot, even if the user designs a taxi route, it will not be compared to
the one of the shortest path
:param plt: handle for matplotlib.pyplot
:return: None
"""
self.plot_nodes_lonlat(plt)
self.plot_links_lonlat(plt)
def plot_airport_for_taxiroute_design(self,plt,airportInterface,ac_instance):
"""
Plot nodes and links in (lon,lat) coordinates
As of 04/02/2018, Aircraft Instance is mainly used for obtaining aircraft id (call sign).
:param plt: matplotlib.pyplot handle
:param airportInterface: NATS airportInterface to call shortest-path-route function
:param ac_instance: NATS Aircraft Instance for obtaining the information about the aircraft
:return:
"""
self.plot_nodes_lonlat(plt,airportInterface,ac_instance)
self.plot_links_lonlat(plt)
def __obtain_default_deparrture_node_name_sequence(self,design_plan_obj,airportInterface,ac_instance):
"""
This function must be called after design_plan_obj is the TaxiPlan instance with departure confirmation
:param design_plan_obj:
:param airportInterface:
:param ac_instance:
:return:
"""
gate_name = design_plan_obj.get_gate_node_name()
#The following only works for deaprture plan
rwy_node_name = design_plan_obj.get_departure_runway_node_name()
V2_node_name = design_plan_obj.get_V2point_node_name()
print("Gate:{} rwy:{} V2:{}".format(gate_name,rwy_node_name,V2_node_name))
# Shortest Path design from the designed route
ac_id=ac_instance.getAcid()
arpt_name = airportInterface.getDepartureAirport(ac_id)
# This will be replaced later if NATS is ready.
#sp_plans_jStr = airportInterface.generate_taxi_route_from_A_To_B(ac_instance.getAcid(), arpt_name, gate_name, \
# rwy_node_name)
airportInterface.generate_surface_taxi_plan(ac_id,arpt_name,gate_name,rwy_node_name,V2_node_name,None)
java_plan_strs = airportInterface.getSurface_taxi_plan(ac_id, arpt_name)
default_plan_strs=[]
for ii in range(len(java_plan_strs)):
default_plan_strs.append(java_plan_strs[ii])
# sp_plans = []
# for ii in range(len(sp_plans_jStr)):
# sp_plans.append(str(sp_plans_jStr[ii]))
#
# sp_plans.append(V2_node_name)
print("default_departure_plan:{}".format(default_plan_strs))
return default_plan_strs
def __obtain_default_arrival_node_name_sequence(self, design_plan_obj, airportInterface, ac_instance):
"""
This function must be called after design_plan_obj is the TaxiPlan instance with arrival confirmation
:param design_plan_obj:
:param airportInterface:
:param ac_instance:
:return:
"""
gate_name = design_plan_obj.get_gate_node_name()
# The following only works for deaprture plan
rwy_exit_name = design_plan_obj.get_runway_exit_node_name()
td_node_name = design_plan_obj.get_touchdown_node_name()
print("Gate:{} rwy_exit:{} touchdown:{}".format(gate_name, rwy_exit_name, td_node_name))
# Shortest Path design from the designed route
ac_id = ac_instance.getAcid()
arpt_name = airportInterface.getArrivalAirport(ac_id)
# (ac_id,ariprot_code,startNode,endNode,V2node_latlon,touchdown node_latlon)
#td_node_obj = self.get_node_from_name(td_node_name)
#td_lat_lon = [td_node_obj.get_lat_deg(), td_node_obj.get_lon_deg()]
airportInterface.generate_surface_taxi_plan(ac_id, arpt_name, rwy_exit_name, gate_name, None, td_node_name)
java_plan_strs = airportInterface.getSurface_taxi_plan(ac_id, arpt_name)
# sp_plans = []
# for ii in range(len(sp_plans_jStr)):
# sp_plans.append(str(sp_plans_jStr[ii]))
#
# sp_plans.append(V2_node_name)
default_plan_strs = []
for ii in range(len(java_plan_strs)):
default_plan_strs.append(java_plan_strs[ii])
#default_plan_strs.insert(0,td_node_name)
print("default_arrival_plan:{}".format(default_plan_strs))
return default_plan_strs
def plot_default_taxiplan(self,plt,airportInterface,ac_instance):
"""
Plot the default taxiplan corresponding to the designed plan
When the user designs a taxi plan, a default taxi plan, designed using the shortest path algorithm,
is displayed together. The default taxi plan is obtained using (gate_name,dep_rwy,V2 point) in departure
and (touchdown point, runway exit, gate name) in arrival.
:param plt: matplotlib.pyplot handle
:param airportInterface: NATS airportInterface instance
:param ac_instance: NATS Aircraft Instance
:return:
"""
if self.have_a_designed_route():
designed_taxi_route = self.get_designed_route()
print("designed_plan:{}".format(designed_taxi_route))
design_plan = TaxiPlan(self)
design_plan.read_route_from_design_strings(designed_taxi_route)
#depending on arrival or departure, designed_taxi_route is used differently
if design_plan.is_departure_plan():
default_plan_str=self.__obtain_default_deparrture_node_name_sequence(design_plan,\
airportInterface,\
ac_instance)
else: #arrival plan
default_plan_str = self.__obtain_default_arrival_node_name_sequence(design_plan, \
airportInterface, \
ac_instance)
#TaxiPlan instance for default route
default_plan = TaxiPlan(self)
default_plan.read_route_from_NATS_nodes(default_plan_str)
#plot
design_plan.plot_route(plt, label='designed', linecolor='y')
default_plan.plot_route(plt, label='shortest_algorithm', linecolor='r')
plt.legend()
def plot_nodes_lonlat(self, plt,airportInterface=None,ac_instance=None,line_style='om'):
"""
plot links in (lon_deg,lat_deg) format
:param plt: matplotlib.pyplot
:param line_style:
:return:
"""
lonlats=np.zeros((len(self.nodes),2))
for ii in range(len(self.nodes)):
lonlats[ii,0]=self.nodes[ii].get_lon_deg()
lonlats[ii,1]=self.nodes[ii].get_lat_deg()
#1.Nodes are displayed using the scattered plots.
# c: color s: size (in points)
sc=plt.scatter(lonlats[:,0], lonlats[:,1],marker='o',c='m',s=7)
#2. the scatter plot will be annotated with the airport name
lon_avg=np.mean(lonlats[:,0]);lat_max=np.max(lonlats[:,1])
plt.annotate(self.get_airport_name(),xy=(lon_avg,lat_max),color='m')
#3. The following annotation is to display node name (number) when the mouse on the node
annot = plt.annotate("", xy=(0, 0), xytext=(10, 10), textcoords="offset points",
bbox=dict(boxstyle="round", fc="w"),
arrowprops=dict(facecolor='r',arrowstyle="->"))
annot.set_visible(False)
#4.getting fig,ax for later use
fig=plt.gcf()
ax=plt.gca()
#5. The curr_node_str is recorded for it to update annotation and route design
# the mouse click control is used, once the mouse is double clicked, the mouse click does not
# append the node for taxiway route anymore.
self.curr_node_str=None
self.mouse_click_cid=None
def update_annot(attr_dict):
"""
The position of the annotation is gotten from mouse_location_event attributes.
node name is obtained from the node_number information (scatter dot number)
And text is formed such that node_name(node_number).
:param attr_dict: attributes of mouse motion event
:return: annotation_text which will be didplayed in a specified offset position to
the annotation position
"""
node_num=attr_dict["ind"][0]
pos = sc.get_offsets()[node_num]
annot.xy = pos
#print("attr[ind]:{}".format(attr_dict["ind"]))
node_name=self.nodes[node_num].get_node_name()
text = "{}({})".format(node_name,str(node_num))
if self.nodes[node_num].is_runway_entry(): #if runway
text="{}({})".format(self.node_rwy_map[node_num],str(node_num))
#whenever the mouse is on the node, update the current node
annot.set_text(text)
annot.get_bbox_patch().set_facecolor('y')
annot.get_bbox_patch().set_alpha(0.6)
return text
def hover(event):
"""
When mouse event is contained in the scatter plot, the following happens.
a. node string is update by calling the function: update_annot to retrieve
the node name and number information that the mouse is over
b.the annotation is made visible.
:param event: (mouseevent)
:return: None figure is redrawn
"""
vis = annot.get_visible()
if event.inaxes == plt.gca():
cont, attr_dict = sc.contains(event)
if cont:
self.curr_node_str=update_annot(attr_dict)
#print("curr_node_str:{}".format(self.curr_node_str))
annot.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annot.set_visible(False)
fig.canvas.draw_idle()
def on_press(event):
"""
This function is called when the mouse button is pressed.
1. When the mouse is pressed upon the node, the node name is appended to
the list of self.route. The line data (self.xs,self.ys) are updated as the positio of the mouse
and drawn. However, for the route, the node on display is collected.
2. when the mouse is double-clicked
:param event:
:return:
"""
if event.inaxes!=ax: return
#a. only handles the mouse is clicked on the scattered nodes
vis=annot.get_visible()
if vis: # and the node name is being displayed
if event.dblclick:
#print("event.dblclick:{}".format(event.dblclick))
print("{} will be added to the route.".format(self.curr_node_str))
print("the mouse click will not be accepted anymore")
fig.canvas.mpl_disconnect(self.mouse_click_cid)
print("TaxiRoute:{}".format(self.routes))
self.set_route_completion_flag(True)
#when the user double clicks, a default taxi-route based on the shortest path
#algorithm is also displayed.
if airportInterface: #if there is an airportInstance, then draw default plot
time.sleep(2)
self.plot_default_taxiplan(plt,airportInterface,ac_instance)
else: #single click
print("{} will be added to the route.".format(self.curr_node_str))
#1. save the current node string
self.routes.append(self.curr_node_str)
#2. update the route lines
self.xs.append(event.xdata);self.ys.append(event.ydata)
self.line.set_data(self.xs,self.ys)
self.line.figure.canvas.draw()
#6. The following sets lines and make connections between canvas and the the mouse
self.line, = ax.plot([], [], '-y') # empty line
fig.canvas.mpl_connect("motion_notify_event", hover)
self.mouse_click_cid=fig.canvas.mpl_connect("button_press_event",on_press)
def plot_links_lonlat(self, plt, line_style='-m'):
"""
plot links in (lon_deg,lat_deg) format
:param plt: matplotlib.pyplot
:param line_style:
:return:
"""
for lkey, link in self.links.items():
node1_num = lkey[0]
node2_num = lkey[1]
lat1, lon1 = self.nodes[node1_num].get_latlon_deg()
lat2, lon2 = self.nodes[node2_num].get_latlon_deg()
lons = [lon1, lon2]
lats = [lat1, lat2]
node1_name=self.nodes[node1_num].get_node_name()
node2_name=self.nodes[node2_num].get_node_name()
#check if it is a runway segment
line_width=0.5
#if the link connects the nodes of which names start 'Rwy', it is
#treated as a part of runway and colored differently from the other links
if node1_name.startswith('Rwy') and node2_name.startswith('Rwy'):
plt.plot(lons,lats,'-c',linewidth=line_width)
else:
plt.plot(lons, lats, line_style,linewidth=0.5) # plot(lon,lat)
def write_taxiRoute_to_csv(self,out_filename):
"""
write the designed route to a taxi route in csv format
:param out_filename
:return: None
"""
#1. obtain the designed route
d_routes=self.get_designed_route()
#2. out_filename
if d_routes:
fw=open(out_filename,'w')
fw.write("Number of points,{}\n".format(len(d_routes)))
fw.write("node_number, node_name, lat_deg,lon_deg, alt_ft\n")
for node_str in d_routes:
node_name=re.findall("([A-Za-z0-9_]+)",node_str)[0]
node_num_str=re.findall("\(([0-9]+)\)",node_str)[0]
node_num=int(node_num_str)
lat=self.nodes[node_num].get_lat_deg()
lon=self.nodes[node_num].get_lon_deg()
fw.write("{},{},{},{},{}\n".format(node_num,node_name,lat,lon,0))
fw.close()
print("{} has been written.".format(out_filename))
def write_airport_layout_to_kml(self,outdir=None):
"""
Writhe a kml file to the user-specified filename. The resulting kml file can
be loaded to Google My Maps for display
:param out_filename: output filename ending with '.kml'
:return: None. Output file name of "AirportLayout_xxxx.kml" will be generated with xxxx denoting airport id.
"""
#1. header specifying markers for nodes and links
header = self.__get_kml_style()
#2.output filename
kml_outfile="AirportLayout_"+self.get_airport_name()+'.kml'
if outdir:
kml_outfile=os.path.join(outdir,kml_outfile)
fw = open(kml_outfile, 'w')
fw.write(header)
#3. Folder contains specific data for airport layout
fw.write("\n<Folder>\n")
fw.write("<name>" + self.get_airport_name()+ "</name>\n")
fw.write("<open>1</open>\n")
# a.write node first
for node_name,node in self.nodes.items():
pl_str = self.__obtain_node_placemark_str(node)
fw.write(pl_str)
# b.write link
for link_key, link in self.links.items():
pl_str = self.__obtain_link_placemark_str(link)
fw.write(pl_str)
#4.match the closing tags
fw.write("</Folder>\n" + "</Document>\n" + "</kml>")
fw.close()
print("{} has been written.".format(kml_outfile))
def __obtain_node_placemark_str(self,node):
"""
Given node,builds and returns a placemark string for a node
:param node: Node instance
:return: a string of <Placemark>...</Placemark>
"""
pl_str = "<Placemark>\n" + \
"<name>" + node.get_node_name() + "(" + str(node.get_node_num()) + ")" + "</name>\n" + \
"<description>null</description>\n" + \
"<styleUrl>#msn_placemark_circle</styleUrl>\n" + \
"<Point>\n" + \
"<coordinates>" + str(node.get_lon_deg()) + "," + str(node.get_lat_deg()) + ",0 </coordinates>\n" + \
"</Point>\n" + \
"</Placemark>\n"
return pl_str
def __obtain_link_placemark_str(self,link):
"""
Given a link instance, returns a placemark string
:param link: Link
:return: a string of <Placemark>...</Placemark> for link
"""
node1_num = link.get_start_node_num();
node2_num = link.get_end_node_num()
lat1, lon1 = self.nodes[node1_num].get_latlon_deg()
lat2, lon2 = self.nodes[node2_num].get_latlon_deg()
link_lat = (lat1 + lat2) / 2.0;
link_lon = (lon1 + lon2) / 2.0
pl_str = "<Placemark>\n" + \
"<name>" + link.get_link_name() + "</name>\n" + \
"<description>null</description>\n" + \
"<styleUrl>#msn_placemark_arrow</styleUrl>\n" + \
"<MultiGeometry>\n" + \
"<Point>\n" + \
"<coordinates>" + str(link_lon) + "," + str(link_lat) + ",0 </coordinates>\n" + \
"</Point>\n" + \
"<LineString>\n" + \
"<tessellate>0</tessellate>\n" + \
"<coordinates>" + str(lon1) + "," + str(lat1) + ",0" + str(lon2) + "," + str(
lat2) + ",0" + "</coordinates>\n" + \
"</LineString>\n" + \
"</MultiGeometry>\n" + \
"</Placemark>\n"
return pl_str
def __get_kml_style(self):
"""
For airport, returns a header string for writing a kml file in Google My Maps display
To change markers and colors, the property must be changed here.
:return: header of kml file that specifies marker styles
"""
name_str = '<name>GoogleEarth2_' + self.get_airport_name() + "</name>\n"
#The following specifies how nodes and links are displayed in My Maps.
header = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" + \
"<kml xmlns=\"http://earth.google.com/kml/2.2\">\n" + \
"<Document xmlns = \"\" >\n" + \
name_str + \
"<StyleMap id = \"msn_placemark_circle\" >\n" + \
"<Pair>\n" + \
"<key> normal </key>\n" + \
"<styleUrl>#sn_placemark_circle</styleUrl>\n" + \
"</Pair>\n" + \
"<Pair>\n" + \
"<key>highlight</key>\n" + \
"<styleUrl>#sh_placemark_circle_highlight</styleUrl>\n" + \
"</Pair>\n" + \
"</StyleMap>\n" + \
"<Style id=\"sn_placemark_circle\">\n" + \
"<IconStyle>\n" + \
"<color>e61bf5ff</color>\n" + \
"<scale>1</scale>\n" + \
"<Icon>\n" + \
"<href>http://maps.google.com/mapfiles/kml/shapes/placemark_circle.png</href>\n" + \
"</Icon>\n" + \
"</IconStyle>\n" + \
"<LineStyle/>\n" + \
"</Style>\n" + \
"<Style id = \"sh_placemark_circle_highlight\" >\n" + \
"<IconStyle>\n" + \
"<color>e61bf5ff</color>\n" + \
"<scale>1.5</scale>\n" + \
"<Icon>\n" + \
"<href>http://maps.google.com/mapfiles/kml/shapes/placemark_circle_highlight.png</href>\n" + \
"</Icon>\n" + \
"</IconStyle>\n" + \
"<LineStyle/>\n" + \
"</Style>\n" + \
"<StyleMap id =\"msn_placemark_arrow\">\n" + \
"<Pair>\n" + \
"<key>normal</key>\n" + \
"<styleUrl>#sn_placemark_arrow</styleUrl>\n" + \
"</Pair>\n" + \
"<Pair>\n" + \
"<key>highlight</key>\n" + \
"<styleUrl>#sh_placemark_arrow_highlight</styleUrl>\n" + \
"</Pair>\n" + \
"</StyleMap>\n" + \
"<Style id =\"sn_placemark_arrow\"> \n" + \
"<IconStyle>\n" + \
"<color> 80ffffff </color>\n" + \
"<scale> 0.3 </scale>\n" + \
"<Icon>\n" + \
"<href>http://maps.google.com/mapfiles/kml/shapes/arrow.png </href >\n" + \
"</Icon>\n" + \
"</IconStyle>\n" + \
"<LineStyle>\n" + \
"<color>e61bf5ff</color>\n" + \
"<width>2</width>\n" + \
"</LineStyle>\n" + \
"</Style>\n" + \
"<Style id =\"sh_placemark_arrow_highlight\">\n" + \
"<IconStyle>\n" + \
"<color>80ffffff</color>\n" + \
"<scale>0.5</scale>\n" + \
"<Icon>\n" + \
"<href>http://maps.google.com/mapfiles/kml/shapes/arrow.png</href>\n" + \
"</Icon>\n" + \
"</IconStyle >\n" + \
"<LineStyle>\n" + \
"<color>e61bf5ff</color>\n" + \
"<width>2</width>\n" + \
"</LineStyle>\n" + \
"</Style>"
return header
if __name__=='__main__':
#1. plot backgrounds, US map, center, sector
centerfile = './data/Centers_CONUS'
#in Default, include center boundaries
center_handle = RegionHandler()
center_handle.read_region_file(centerfile,'CENTER')
include_sector=False
if include_sector:
sectorfile = './data/SectorData' # large file. takes time in loading and plotting
sector_handle = RegionHandler()
sector_handle.read_region_file(sectorfile,'SECTOR')
#2.output xmlfile
fname='output_trajectory_ex.xml'
#3.Read Output File
oh=OutFileHandler() #empty (for ease of background plot testing)
if fname.endswith('xml'):
oh.read_xml_file(fname) #read xml file
elif fname.endswith('h5'):
oh.read_hdf5_file(fname)
elif fname.endswith('csv'):
oh.read_csv_file(fname)
else:
raise NameError('The file type of {} is not supported'.format(fname))
#3. collect PHX departing trajectories
#callsigns_of_interest=oh.get_callsigns_departing_from('SFO')
#callsigns_of_interest=oh.get_callsigns_departing_from('PHX')
# no PHX arriving trajectories in the above example
#no SFO arriving trajectories
#callsigns_of_interest = oh.get_trajectories_arriving_at('SFO')
callsigns_of_interest=oh.get_all_callsigns()
#4. plot with or without background
fig=plt.figure(0)
fig.set_size_inches(9 * 12 / 7, 9)
ax0 = fig.add_axes([0.05, 0.05, 0.9, 0.9])
ax0.set_facecolor('black')
#a.center plot and/or sector plot
center_handle.plot_regions(plt,color='w')
if include_sector: #takes long time to plot
sector_handle.plot_regions(plt,color='w')
#b. plot trajectories of interest
oh.plot_trajectories_from_callsigns(callsigns_of_interest,plt)
plt.show()
|
[
"michael.hartnett@swri.org"
] |
michael.hartnett@swri.org
|
35a925193f2f4cbdb975c339ea9e87d49e80c1b6
|
a27652a03a0f14308410477f3dd90b4337ef2087
|
/getpass.py
|
f4fb304bef7bf36bf35ed5bf64a2dafee61fbea4
|
[] |
no_license
|
TestardR/Python-Tips_Tricks
|
6b4a9aff5564d9b0b915599b604b7c09facc9da7
|
d03e592bc2d54eea8ab98701dedfd358c0902c7b
|
refs/heads/master
| 2020-05-16T10:03:59.154618
| 2019-04-23T09:09:25
| 2019-04-23T09:09:25
| 182,969,629
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 237
|
py
|
from getpass import getpass
# wrong way
# username = input('Username: ')
# password = input('Password: ')
# print('Logging In...')
# with getpass
username = input('Username: ')
password = getpass('Password: ')
print('Logging In...')
|
[
"rmtestard@gmail.com"
] |
rmtestard@gmail.com
|
7283456ec10dfea2986245ae7e1ccb62cd9ac117
|
ec5e4dacb30800828ae4d68f9d87db523293ab65
|
/fb_post/views/reply_to_comment/request_response_mocks.py
|
39c44980905ef80eb754ba03186049e0c37ccc84
|
[] |
no_license
|
raviteja1766/fb_post_learning
|
54022066ba727220433cb72c43458f9cb6164b24
|
889718fc8f138888aea1b66455fa68c000a02091
|
refs/heads/master
| 2022-11-08T17:39:40.275922
| 2020-06-25T11:19:28
| 2020-06-25T11:19:28
| 274,897,124
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 111
|
py
|
REQUEST_BODY_JSON = """
{
"content": "string"
}
"""
RESPONSE_200_JSON = """
{
"reply_id": 1
}
"""
|
[
"ravitejak125@gmail.com"
] |
ravitejak125@gmail.com
|
ec6637a5f575387ece2715daa824042431890032
|
d9dbe8938c39f95a7887f9ca55e02ed7a0b75628
|
/config.py
|
97995f3c0e392ba75dea1f42d826fa0440a24663
|
[] |
no_license
|
crazydev71/cryptokitties-utils-python
|
daa03acf2ade95605cf4b48417bb214a4fec787c
|
e811014e894ad252ed3c051bba0bcd19b370425f
|
refs/heads/master
| 2020-03-28T02:06:46.977733
| 2018-09-05T16:25:31
| 2018-09-05T16:25:31
| 147,547,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,163
|
py
|
# Set RPC provider
RPC_PROVIDER = 'http://localhost:8545'
contract_address = "0x06012c8cf97BEaD5deAe237070F9587f8E7A266d"
# Cryto Kitties contract ABI
contract_abi = """[{"constant":true,"inputs":[{"name":"_interfaceID","type":"bytes4"}],"name":"supportsInterface","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"cfoAddress","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"_tokenId","type":"uint256"},{"name":"_preferredTransport","type":"string"}],"name":"tokenMetadata","outputs":[{"name":"infoUrl","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"promoCreatedCount","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"name","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_to","type":"address"},{"name":"_tokenId","type":"uint256"}],"name":"approve","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"ceoAddress","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"GEN0_STARTING_PRICE","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_address","type":"address"}],"name":"setSiringAuctionAddress","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"totalSupply","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"pregnantKitties","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"_kittyId","type":"uint256"}],"name":"isPregnant","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"GEN0_AUCTION_DURATION","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"siringAuction","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_from","type":"address"},{"name":"_to","type":"address"},{"name":"_tokenId","type":"uint256"}],"name":"transferFrom","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"_address","type":"address"}],"name":"setGeneScienceAddress","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"_newCEO","type":"address"}],"name":"setCEO","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"_newCOO","type":"address"}],"name":"setCOO","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"_kittyId","type":"uint256"},{"name":"_startingPrice","type":"uint256"},{"name":"_endingPrice","type":"uint256"},{"name":"_duration","type":"uint256"}],"name":"createSaleAuction","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[],"name":"unpause","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"name":"","type":"uint256"}],"name":"sireAllowedToAddress","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"_matronId","type":"uint256"},{"name":"_sireId","type":"uint256"}],"name":"canBreedWith","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"","type":"uint256"}],"name":"kittyIndexToApproved","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_kittyId","type":"uint256"},{"name":"_startingPrice","type":"uint256"},{"name":"_endingPrice","type":"uint256"},{"name":"_duration","type":"uint256"}],"name":"createSiringAuction","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"val","type":"uint256"}],"name":"setAutoBirthFee","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"_addr","type":"address"},{"name":"_sireId","type":"uint256"}],"name":"approveSiring","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"_newCFO","type":"address"}],"name":"setCFO","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"_genes","type":"uint256"},{"name":"_owner","type":"address"}],"name":"createPromoKitty","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"secs","type":"uint256"}],"name":"setSecondsPerBlock","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"paused","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[],"name":"withdrawBalance","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"name":"_tokenId","type":"uint256"}],"name":"ownerOf","outputs":[{"name":"owner","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"GEN0_CREATION_LIMIT","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"newContractAddress","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_address","type":"address"}],"name":"setSaleAuctionAddress","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"name":"_owner","type":"address"}],"name":"balanceOf","outputs":[{"name":"count","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_v2Address","type":"address"}],"name":"setNewAddress","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"secondsPerBlock","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[],"name":"pause","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"name":"_owner","type":"address"}],"name":"tokensOfOwner","outputs":[{"name":"ownerTokens","type":"uint256[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_matronId","type":"uint256"}],"name":"giveBirth","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[],"name":"withdrawAuctionBalances","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"symbol","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"","type":"uint256"}],"name":"cooldowns","outputs":[{"name":"","type":"uint32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"","type":"uint256"}],"name":"kittyIndexToOwner","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_to","type":"address"},{"name":"_tokenId","type":"uint256"}],"name":"transfer","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"cooAddress","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"autoBirthFee","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"erc721Metadata","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_genes","type":"uint256"}],"name":"createGen0Auction","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"name":"_kittyId","type":"uint256"}],"name":"isReadyToBreed","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"PROMO_CREATION_LIMIT","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_contractAddress","type":"address"}],"name":"setMetadataAddress","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"saleAuction","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"_id","type":"uint256"}],"name":"getKitty","outputs":[{"name":"isGestating","type":"bool"},{"name":"isReady","type":"bool"},{"name":"cooldownIndex","type":"uint256"},{"name":"nextActionAt","type":"uint256"},{"name":"siringWithId","type":"uint256"},{"name":"birthTime","type":"uint256"},{"name":"matronId","type":"uint256"},{"name":"sireId","type":"uint256"},{"name":"generation","type":"uint256"},{"name":"genes","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_sireId","type":"uint256"},{"name":"_matronId","type":"uint256"}],"name":"bidOnSiringAuction","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"constant":true,"inputs":[],"name":"gen0CreatedCount","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"geneScience","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_matronId","type":"uint256"},{"name":"_sireId","type":"uint256"}],"name":"breedWithAuto","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"inputs":[],"payable":false,"stateMutability":"nonpayable","type":"constructor"},{"payable":true,"stateMutability":"payable","type":"fallback"},{"anonymous":false,"inputs":[{"indexed":false,"name":"owner","type":"address"},{"indexed":false,"name":"matronId","type":"uint256"},{"indexed":false,"name":"sireId","type":"uint256"},{"indexed":false,"name":"cooldownEndBlock","type":"uint256"}],"name":"Pregnant","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"from","type":"address"},{"indexed":false,"name":"to","type":"address"},{"indexed":false,"name":"tokenId","type":"uint256"}],"name":"Transfer","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"owner","type":"address"},{"indexed":false,"name":"approved","type":"address"},{"indexed":false,"name":"tokenId","type":"uint256"}],"name":"Approval","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"owner","type":"address"},{"indexed":false,"name":"kittyId","type":"uint256"},{"indexed":false,"name":"matronId","type":"uint256"},{"indexed":false,"name":"sireId","type":"uint256"},{"indexed":false,"name":"genes","type":"uint256"}],"name":"Birth","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"newContract","type":"address"}],"name":"ContractUpgrade","type":"event"}]"""
|
[
"crazy.dev@aol.com"
] |
crazy.dev@aol.com
|
a8f66f05dac2c6c58bf0c13a445cbcf72065b610
|
39e4435ad9b4789a8095c745a9bab8f0fb7be333
|
/core/migrations/0008_auto_20160523_1427.py
|
d9c4a8ec94c86f3d8ba8e3bd4979e7a36eda35f6
|
[] |
no_license
|
KrishJoshi/TutrWebsite
|
216cf4f1087170edc2d5e1850c29518a5b634f47
|
d40b84d29967c9c19baf47ecf78a14108d583189
|
refs/heads/master
| 2021-01-24T17:34:54.663958
| 2016-08-09T22:40:34
| 2016-08-09T22:40:34
| 65,242,269
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-23 14:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20160523_1112'),
]
operations = [
migrations.AddField(
model_name='curatedstory',
name='dummy_content',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='featuredstory',
name='dummy_content',
field=models.BooleanField(default=False),
),
]
|
[
"me@krishj.com"
] |
me@krishj.com
|
c186767b80617b71aec5141e4d429a6041b9c319
|
3e4e8a8001539f53a745b73003d945cc48d80117
|
/catalog/views.py
|
b5c8829f5e36fbae16f6b99f6676a511dc807bd7
|
[] |
no_license
|
amithah/WeddingWire
|
d61a8727d0cbaf7bec8deb984d26ca847e064be2
|
c1f07dadc0c6c0b6d210398f7ec2b1e4cd87b1af
|
refs/heads/main
| 2022-12-30T19:02:51.273509
| 2020-10-19T12:48:59
| 2020-10-19T12:48:59
| 304,603,328
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,006
|
py
|
from django.shortcuts import render, redirect, reverse
from .forms import CreateWebsiteForm
from accounts.models import CustomUser
from django.contrib.auth import login
from page.models import Page,Image
import random
import string
def get_random_string(length):
letters = string.ascii_lowercase
result_str = ''.join(random.choice(letters) for i in range(length))
return result_str
def home(request):
return render(request, 'catalog/home.html', {})
def create_website(request):
# validate form and create a new user
# create a page object and connect user id to that page object
# check the design opted by user
# redirect user to the opted design html , with edit=True
if request.method == 'POST':
form = CreateWebsiteForm(request.POST)
if form.is_valid():
if not CustomUser.objects.filter(email=form.cleaned_data['email']).exists():
user = CustomUser.objects.create(username=get_random_string(6), email=form.cleaned_data['email'])
login(request, user)
page = Page.objects.create(
user=user,
email=form.cleaned_data['email'],
groom_name=form.cleaned_data['groom_name'],
bride_name=form.cleaned_data['bride_name'],
design=form.cleaned_data['design'],
city=form.cleaned_data['city'],
date=form.cleaned_data['date'],
slug=get_random_string(4),
)
return redirect(reverse('design1', args=['edit']))
else:
# redirect user to previously created website
return redirect(reverse('design1', args=['edit']))
else:
if request.user.is_anonymous:
form = CreateWebsiteForm()
return render(request, 'catalog/page_creation_form.html', {'form': form})
else:
return redirect(reverse('design1', args=['edit']))
|
[
"amithah.nithin@gmail.com"
] |
amithah.nithin@gmail.com
|
0b8b0d5be2a7cb8d345900bd945f838a7921e3c1
|
8feeb65c41214fe05d88ab6263a56cf6464ef2c1
|
/Documents/cs124/pa2_triage_starter/NaiveBayes.py
|
510ecf3cf34c175ef58d86969c089434439683a1
|
[] |
no_license
|
cmguyman11/triage
|
6d84ac20ad37e4e8a49b64e66bfb969d0e33ec57
|
52d9b2bddde8fe80dc0ebc48551e71f18d974f71
|
refs/heads/master
| 2020-04-18T01:19:58.629287
| 2019-01-25T07:57:30
| 2019-01-25T07:57:30
| 167,115,022
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,720
|
py
|
import sys
import getopt
import os
import math
import operator
from timeit import default_timer as timer
class NaiveBayes:
class TrainSplit:
"""Represents a set of training/testing data. self.train is a list of Examples, as is self.dev and self.test.
"""
def __init__(self):
self.train = []
self.dev = []
self.test = []
class Example:
"""Represents a document with a label. klass is 'aid' or 'not' by convention.
words is a list of strings.
"""
def __init__(self):
self.klass = ''
self.words = []
def __init__(self):
"""NaiveBayes initialization"""
self.FILTER_STOP_WORDS = False
self.USE_BIGRAMS = False
self.BEST_MODEL = False
self.stopList = set(self.readFile('data/english.stop'))
self.aid = [] #will want size of this
self.notaid = [] #will want size of this
self.vocab = set() #for the total vocabulary
self.num_docs = 0
self.num_aid_docs = 0
self.num_notaid_docs = 0
self.timesRan = 0
self.logprior_aid = 0
self.logprior_not = 0
self.count_aid = {}
self.count_not = {}
#TODO: add other data structures needed in classify() and/or addExample() below
#############################################################################
# TODO TODO TODO TODO TODO
# Implement the Multinomial Naive Bayes classifier with add-1 smoothing
# If the FILTER_STOP_WORDS flag is true, you must remove stop words
# If the USE_BIGRAMS flag is true, your methods must use bigram features instead of the usual
# bag-of-words (unigrams)
# If either of the FILTER_STOP_WORDS or USE_BIGRAMS flags is on, the other is meant to be off.
# Hint: Use filterStopWords(words) defined below
# Hint: Remember to add start and end tokens in the bigram implementation
# Hint: When doing add-1 smoothing with bigrams, V = # unique bigrams in data.
def classify(self, words):
""" TODO
'words' is a list of words to classify. Return 'aid' or 'not' classification.
"""
if self.FILTER_STOP_WORDS:
words = self.filterStopWords(words)
if not self.USE_BIGRAMS:
# first time, find all counts and add them to a dictionary
if self.timesRan == 0:
self.logprior_aid = math.log(self.num_aid_docs/self.num_docs)
self.logprior_not = math.log(self.num_notaid_docs/self.num_docs)
# for v in self.vocab:
# numAid = self.aid.count(v)
# numNot = self.notaid.count(v)
# self.count_aid[v] = numAid
# self.count_not[v] = numNot
self.timesRan +=1
p_aid = self.logprior_aid
p_not = self.logprior_not
if self.USE_BIGRAMS:
i = 0
words = ['<s>'] + words + ['</s>']
for w in words:
if i != 0:
bigram = (words[i-1], words[i])
numAid = 0
if bigram in self.count_aid:
numAid = self.count_aid[bigram]
numNot = 0
if bigram in self.count_not:
numNot = self.count_not[bigram]
logliklihood_aid = math.log((numAid + 1)/(len(self.aid) + len(self.vocab)))
logliklihood_not = math.log((numNot + 1)/(len(self.notaid) + len(self.vocab)))
p_aid += logliklihood_aid
p_not += logliklihood_not
i+=1
else:
for w in words:
numAid = 0
if w in self.count_aid:
numAid = self.count_aid[w]
numNot = 0
if w in self.count_not:
numNot = self.count_not[w]
logliklihood_aid = math.log((numAid + 1)/(len(self.aid) + len(self.vocab)))
logliklihood_not = math.log((numNot + 1)/(len(self.notaid) + len(self.vocab)))
p_aid += logliklihood_aid
p_not += logliklihood_not
if p_aid > p_not:
return 'aid'
else:
return 'not'
# round 1: Train Accuracy: 0.82946878266654
# Dev Accuracy: 0.731441896618733
def addExample(self, klass, words):
self.timesRan = 0
if self.FILTER_STOP_WORDS:
words = self.filterStopWords(words)
self.num_docs+=1
if klass == 'aid':
self.num_aid_docs+=1
else:
self.num_notaid_docs+=1
if self.USE_BIGRAMS:
i = 0
words = ['<s>'] + words + ['</s>']
for w in words:
if i != 0:
bigram = (words[i-1], words[i])
if klass == 'aid':
self.aid.append(bigram)
if bigram in self.count_aid:
self.count_aid[bigram] = self.count_aid[bigram] + 1
else:
self.count_aid[bigram] = 1
elif klass == 'not':
self.notaid.append(bigram)
if bigram in self.count_not:
self.count_not[bigram] = self.count_not[bigram] + 1
else:
self.count_not[bigram] = 1
self.vocab.add(bigram)
i+=1
else:
for w in words:
if klass == 'aid':
self.aid.append(w)
if w in self.count_aid:
self.count_aid[w] = self.count_aid[w] + 1
else:
self.count_aid[w] = 1
elif klass == 'not':
self.notaid.append(w)
if w in self.count_not:
self.count_not[w] = self.count_not[w] + 1
else:
self.count_not[w] = 1
self.vocab.add(w)
"""
* TODO
* Train your model on an example document with label klass ('aid' or 'not') and
* words, a list of strings.
* You should store whatever data structures you use for your classifier
* in the NaiveBayes class.
* Returns nothing
"""
pass
# END TODO (Modify code beyond here with caution)
#############################################################################
def readFile(self, fileName):
"""
* Code for reading a file. you probably don't want to modify anything here,
* unless you don't like the way we segment files.
"""
contents = []
f = open(fileName,encoding="utf8")
for line in f:
contents.append(line)
f.close()
result = self.segmentWords('\n'.join(contents))
return result
def segmentWords(self, s):
"""
* Splits lines on whitespace for file reading
"""
return s.split()
def buildSplit(self,include_test=True):
split = self.TrainSplit()
datasets = ['train','dev']
if include_test:
datasets.append('test')
for dataset in datasets:
for klass in ['aid','not']:
dataFile = os.path.join('data',dataset,klass + '.txt')
with open(dataFile,'r', encoding="utf8") as f:
docs = [line.rstrip('\n') for line in f]
for doc in docs:
example = self.Example()
example.words = doc.split()
example.klass = klass
if dataset == 'train':
split.train.append(example)
elif dataset == 'dev':
split.dev.append(example)
else:
split.test.append(example)
return split
def filterStopWords(self, words):
"""Filters stop words."""
filtered = []
for word in words:
if not word in self.stopList and word.strip() != '':
filtered.append(word)
return filtered
def evaluate(FILTER_STOP_WORDS,USE_BIGRAMS):
classifier = NaiveBayes()
classifier.FILTER_STOP_WORDS = FILTER_STOP_WORDS
classifier.USE_BIGRAMS = USE_BIGRAMS
split = classifier.buildSplit(include_test=False)
for example in split.train:
classifier.addExample(example.klass,example.words)
train_accuracy = calculate_accuracy(split.train,classifier)
dev_accuracy = calculate_accuracy(split.dev,classifier)
print('Train Accuracy: {}'.format(train_accuracy))
print('Dev Accuracy: {}'.format(dev_accuracy))
def calculate_accuracy(dataset,classifier):
acc = 0.0
if len(dataset) == 0:
return 0.0
else:
for example in dataset:
guess = classifier.classify(example.words)
if example.klass == guess:
acc += 1.0
return acc / len(dataset)
def main():
start = timer()
FILTER_STOP_WORDS = False
USE_BIGRAMS = False
(options, args) = getopt.getopt(sys.argv[1:], 'fb')
if ('-f','') in options:
FILTER_STOP_WORDS = True
elif ('-b','') in options:
USE_BIGRAMS = True
evaluate(FILTER_STOP_WORDS,USE_BIGRAMS)
elapsed_time = timer() - start # in seconds
print(elapsed_time)
if __name__ == "__main__":
main()
|
[
"cmguyman11@gmail.com"
] |
cmguyman11@gmail.com
|
d0dd949d535fb7bec5ffcc505d115d5861155aea
|
987f85cf4ba5444de2956f35ebca364eba5811b1
|
/06-django/djangotest5/booktest/search_indexes.py
|
2c1ba615b745c4a30c9dfa9d2828266818df0c91
|
[] |
no_license
|
Acowboyz/road-to-python
|
cfb5623a15a4cae404990389169905cfd42e5a46
|
8009ff1836401898b6c5fc6e38238a8e37f1cb53
|
refs/heads/master
| 2022-12-10T21:33:35.115284
| 2019-07-22T09:01:59
| 2019-07-22T09:01:59
| 125,883,636
| 0
| 0
| null | 2022-05-25T00:40:59
| 2018-03-19T15:55:59
|
Python
|
UTF-8
|
Python
| false
| false
| 321
|
py
|
from haystack import indexes
from .models import TinyInfo
class TinyInfoIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
def get_model(self):
return TinyInfo
def index_queryset(self, using=None):
return self.get_model().objects.all()
|
[
"g4332572@gmail.com"
] |
g4332572@gmail.com
|
388a0f8efd8689e7a5d065d1c190db6766eeb17e
|
8a9a0800d0b55cb17956a37ae0d60b807a007916
|
/main.py
|
02aeba6ac9d960f81dccd09b3dc6335d0a47baad
|
[] |
no_license
|
SimonrLofgren/superAI
|
039f23384a5c8ece5ae1881249c889c633e52727
|
16cd83d261ea3d1676dfe8e00d7732bdffb42b91
|
refs/heads/master
| 2023-03-31T22:02:22.260561
| 2021-04-13T10:53:00
| 2021-04-13T10:53:00
| 357,493,266
| 0
| 0
| null | 2021-04-13T10:53:01
| 2021-04-13T09:17:13
|
Python
|
UTF-8
|
Python
| false
| false
| 4,638
|
py
|
import retro
import random
import pickle
from toolbox import printsaved, randomActionFeeder, printButtons
def runStraight():
env = retro.make(game='SuperMarioWorld-Snes')
obs = env.reset()
saved_actions = []
running = True
while running:
action = random.choice([[0, 1, 0, 0, 0, 0, 0, 1], [0, 1, 0, 0, 0, 0, 0, 1]])
saved_actions.append(action)
#obs, rew, done, info = env.step(env.action_space.sample())
obs, rew, done, info = env.step(action)
#print(info)
#print(rew)
env.render()
if info['death']== 0:
print(info['death'])
done = True
if done:
with open('mysaved/saved_actions.pkl', 'wb') as f:
pickle.dump(saved_actions, f)
running = False
obs = env.reset()
env.close()
def run_saved():
env = retro.make(game='SuperMarioWorld-Snes')
obs = env.reset()
with open('mysaved/saved_actions.pkl', 'rb') as f:
saved_actions = pickle.load(f)
i = 0
running = True
while running:
# obs, rew, done, info = env.step(env.action_space.sample())
obs, rew, done, info = env.step(saved_actions[i])
print(rew)
i += 1
env.render()
if info['death']== 0:
print(info['death'])
done = True
if done:
running = False
obs = env.reset()
env.close()
def runRandom():
env = retro.make(game='SuperMarioWorld-Snes')
obs = env.reset()
saved_actions = randomActionFeeder(700)
i = 0
running = True
while running:
# obs, rew, done, info = env.step(env.action_space.sample())
obs, rew, done, info = env.step(saved_actions[i])
printButtons()
print(saved_actions[i])
print(i)
print(rew)
print(info['x'])
i += 1
env.render()
if info['death'] == 0:
print(info['death'])
done = True
if done:
running = False
obs = env.reset()
env.close()
def runToLearn():
learning = True
while learning:
done = False
saved_random_actions = randomActionFeeder(700)
env = retro.make(game='SuperMarioWorld-Snes')
obs = env.reset()
try:
with open('mysaved/saved_actions.pkl', 'rb') as f:
saved_actions = pickle.load(f)
except:
saved_actions = randomActionFeeder(300)
i = 0
running = True
while not done:
# obs, rew, done, info = env.step(env.action_space.sample())
obs, rew, done, info = env.step(saved_actions[i])
print(rew)
i += 1
env.render()
if i == len(saved_actions)-2:
i = 0
while not done:
obs, rew, done, info = env.step(saved_random_actions[i])
print(saved_random_actions[i])
print(i)
print(rew)
print(info['x'])
i += 1
env.render()
if i >= len(saved_random_actions):
done = True
if info['death'] == 0:
print(info['death'])
saved_random_actions = saved_random_actions[0: i-10]
done = True
if done:
try:
saved_actions[-2] = float(saved_actions[-2])
except:
saved_actions.append(rew)
saved_actions.append(info['x'])
if info['x'] > saved_actions[-1]:
if rew > saved_actions[-2]:
saved_actions.append(saved_random_actions)
saved_actions.append(rew)
saved_actions.append(info['x'])
with open('mysaved/saved_actions.pkl', 'wb') as f:
pickle.dump(saved_actions, f)
running = False
obs = env.reset()
obs = env.close()
'''if info['death'] == 0:
print(info['death'])
done = True
if done:
running = False
obs = env.reset()'''
#env.close()
def main():
print(retro.data.list_games())
#printsaved()
#runRandom()
#run_saved()
#run_random()
runToLearn()
if __name__ == "__main__":
main()
|
[
"simonrlofgren@hotmail.com"
] |
simonrlofgren@hotmail.com
|
a10cd58f8abbe8d6c04c7e5da74147437b61afb7
|
69fb386daaedbd741ca381b90867e0960035e491
|
/eleicoes/wsgi.py
|
dd917a89cd848c7fc8ece0759e28b407f6f7b13f
|
[] |
no_license
|
Maethorin/eleicoes
|
981ad80c3d3f4835cac0f7fe229c4c779e49b465
|
bcc167a72847701a128c81df04dfc5fdf90af2ea
|
refs/heads/master
| 2016-09-06T15:09:09.084024
| 2014-10-23T08:20:09
| 2014-10-23T08:20:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
"""
WSGI config for eleicoes project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "eleicoes.settings")
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
application = Cling(get_wsgi_application())
|
[
"maethorin@gmail.com"
] |
maethorin@gmail.com
|
39c1115e6af8e673b376de250e94787fb20087a3
|
4d4e7f42c5d749f74d52b1d66e581364604fce50
|
/bd-srv-v10.py
|
7d9b24b44054a284c1675c1f3f5e3a642a777bc1
|
[] |
no_license
|
Hack-Things/Py-IvzDoggz
|
799cead3d86bf56168855a7c5599cca32cebae33
|
c519b11a1837085746ad291c90cd132aed63ad51
|
refs/heads/master
| 2021-06-03T00:21:25.389420
| 2016-09-26T09:07:47
| 2016-09-26T09:07:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
import socket
import sys
import argparse
parser=argparse.ArgumentParser()
parser.add_argument('-p','--port',required=True,help='Port to listen on',dest='port')
print "Welcome to your bot controller! \nUse the command 'qu1t' to end your session and 'k1ll' to kill the backdoor\n"
mysocket=socket.socket()
mysocket.bind(("",port))
mysocket.listen(1)
connection,fromaddr = mysocket.accept()
while True:
request = connection.recv(10240)
print "from bot: \n" + str(request.decode("rot13"))
command = raw_input("enter next command: \n").encode("rot13")
connection.send(command)
|
[
"noreply@github.com"
] |
Hack-Things.noreply@github.com
|
4f9ad6e13ac23bce940fe991444a311e21bcbf9d
|
3356a44c2a8a76ed88d8a55730177382bcd94b6e
|
/website/docroot/files/test.data.py
|
919188ced61d8162243d9450d24de6b8f588e9bc
|
[] |
no_license
|
spe-sa/purly
|
ad5d19ffec8af3471dd5f0b7fbd2f5142faf43f5
|
2df29d6d1bac215e9997418bdba8ffd29308f8b0
|
refs/heads/master
| 2021-01-19T06:27:46.340562
| 2017-08-03T15:47:32
| 2017-08-03T15:47:32
| 87,463,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 364
|
py
|
# from mainsite.models import Web_Region
context = {'title': 'my static title',
'description': 'my static description',
'data': 'my static data',
}
def get_context(request):
# region_list = Web_Region.objects.values_list('region_name', flat=True)
context.update({'data': 'my dynamic data'})
return context
|
[
"bfountain@spe.org"
] |
bfountain@spe.org
|
36eb117d722a41a6556f88b6a05cc7b24ff2a421
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayMarketingToolXuanyitestCreateModel.py
|
6d3088b2f5601c4d6005d68b68843af5d4549abe
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,507
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.TransferResultInfo import TransferResultInfo
class AlipayMarketingToolXuanyitestCreateModel(object):
def __init__(self):
self._test_12 = None
@property
def test_12(self):
return self._test_12
@test_12.setter
def test_12(self, value):
if isinstance(value, list):
self._test_12 = list()
for i in value:
if isinstance(i, TransferResultInfo):
self._test_12.append(i)
else:
self._test_12.append(TransferResultInfo.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.test_12:
if isinstance(self.test_12, list):
for i in range(0, len(self.test_12)):
element = self.test_12[i]
if hasattr(element, 'to_alipay_dict'):
self.test_12[i] = element.to_alipay_dict()
if hasattr(self.test_12, 'to_alipay_dict'):
params['test_12'] = self.test_12.to_alipay_dict()
else:
params['test_12'] = self.test_12
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMarketingToolXuanyitestCreateModel()
if 'test_12' in d:
o.test_12 = d['test_12']
return o
|
[
"jishupei.jsp@alibaba-inc.com"
] |
jishupei.jsp@alibaba-inc.com
|
62d9ae7118dc620e594d52da3a75907162e5e269
|
6e4e0d430303255b1792961af1da5e20db2236f1
|
/tests/ext/test_profile_providers.py
|
945634c7bb8ca4a9ae37a8153ca904acccc3f1cd
|
[
"BSD-2-Clause"
] |
permissive
|
suminb/finance
|
b051690c859633444080fdaec3f69bb09b452ddf
|
ab298b647b1b47fec2780415c8d2d1104d1244e4
|
refs/heads/develop
| 2023-08-19T19:05:48.795066
| 2023-07-23T09:41:40
| 2023-07-23T09:41:40
| 27,952,702
| 148
| 13
|
BSD-4-Clause
| 2023-07-23T09:41:41
| 2014-12-13T07:05:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,259
|
py
|
from math import isnan
import os
import pytest
from finance.ext.profile import fetch_profile
from finance.ext.profile.base import BaseProfile
from finance.ext.profile.naver_finance import NaverProfile
BASE_PATH = os.path.abspath(os.path.dirname(__file__))
def test_invalid_provider():
with pytest.raises(ValueError):
fetch_profile("unknown", "063170")
def test_naver_profile():
path = os.path.join(BASE_PATH, "063170.html")
with open(path) as fin:
raw_sample = fin.read()
profile = NaverProfile("063170")
profile.parse(raw_sample)
assert profile.name == "서울옥션"
assert profile.current_price == 4470
assert profile.outstanding_shares == 16917500
assert profile.market_cap == 4470 * 16917500
assert profile.eps == -395
assert isnan(profile.per)
assert profile.bps == 4344
assert profile.pbr == pytest.approx(1.03, 0.01)
def test_fetch_naver_profile():
profile = fetch_profile("naver", "005430")
assert profile.name == "한국공항"
# NOTE: Testing with live data. Some information is unknown at the time of
# writing code.
assert profile.current_price > 0
assert profile.outstanding_shares > 0
assert profile.eps != 0
assert profile.bps > 0
|
[
"suminb@gmail.com"
] |
suminb@gmail.com
|
2f4f454e71165a375de76916c65d7782ccd66f01
|
1068e00d3d54ce780ec1b691fb6e3fc9591eebaf
|
/Tests.py
|
220453200f4254ba36e2e1fbe8b8a1d112b68161
|
[] |
no_license
|
freQuensy23-coder/wikiDownloader
|
c384460cb77028f00fab2b4cc22d2cf5a1cd4288
|
c31686ca886d7ac1e059b3b4db896020cae748b6
|
refs/heads/main
| 2023-04-04T06:36:26.669143
| 2021-04-14T07:16:08
| 2021-04-14T07:16:08
| 358,676,507
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 694
|
py
|
from unittest import TestCase
from ThreadingManager import tasks_divider
class Tester(TestCase):
def setUp(self) -> None:
pass
def test_tasks_divider_normal_list(self):
tasks1 = [1, 2, 3, 4, 5, 6]
self.assertEqual(tasks_divider(tasks1, 3), [[1, 2, 3], [4, 5, 6]])
def test_tasks_divider_short_list(self):
tasks2 = [1]
self.assertEqual(tasks_divider(tasks2, 5), [[1]])
def test_tasks_divider_empty_list(self):
tasks3 = []
self.assertEqual(tasks_divider(tasks3, 6), [[]])
def test_tasks_divider_very_long_list(self):
tasks4 = [1] * 99
self.assertEqual(len(tasks_divider(tasks4, 17)), 99 // 17 + 1)
|
[
"you@example.com"
] |
you@example.com
|
4cc71ba466fee307d4536c4be3dab26e2c7b0941
|
9e97c2902601e794d093e874e1e9688e6ab0aa7c
|
/users/migrations/0002_profile.py
|
e50fa34f7bcc6b71485ce7b5f462d848882ea816
|
[] |
no_license
|
FaisalWant/jobPortal
|
b1a66f502f41543d1234665bce858fc2be308f5a
|
97fb14fd7543e00b594be80fb3fe0db110d02b61
|
refs/heads/master
| 2023-04-17T12:44:43.292346
| 2021-04-27T17:36:16
| 2021-04-27T17:36:16
| 351,038,161
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,025
|
py
|
# Generated by Django 3.0 on 2021-03-30 18:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='static-media/users')),
('birth_day', models.DateField(blank=True, default=None, null=True)),
('location', models.CharField(blank=True, max_length=100)),
('resume', models.TextField(blank=True)),
('company', models.CharField(blank=True, max_length=250)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"heliconrngr@gmail.com"
] |
heliconrngr@gmail.com
|
183263a4e827ad7235a4830e749647fcc891ee88
|
08b5df2a27033516b4f0529f739604e69596cb2c
|
/apps/sports/models/metric.py
|
47df05a268cc7fd277216b2c6759ff0e05fb9a67
|
[] |
no_license
|
otimtony/b1gplay
|
24c5d51641033b0444edb6ecabee753648041b28
|
c1131666c9191bd93880f7ac9f800567013cf5eb
|
refs/heads/master
| 2020-05-04T18:03:05.117692
| 2019-03-29T08:47:46
| 2019-03-29T08:47:46
| 179,337,221
| 0
| 0
| null | 2019-04-03T17:22:52
| 2019-04-03T17:22:52
| null |
UTF-8
|
Python
| false
| false
| 656
|
py
|
from django.db import models
from django_extensions.db.models import TimeStampedModel
import uuid
from apps.sports.models import Sport
class Metric(TimeStampedModel):
"""
Represents a way of measuring a players attributes on a given sport
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
sport = models.ForeignKey(Sport, on_delete=models.CASCADE)
metric = models.CharField(max_length=50)
description = models.TextField()
class Meta:
verbose_name = "Metric"
verbose_name_plural = "Metrics"
db_table = 'metric'
def __str__(self):
return self.description
|
[
"nbaleeta@gmail.com"
] |
nbaleeta@gmail.com
|
a00ce178b3bb25786b6273b5e3ea07db818002ca
|
0879d849ab998fde1866e98c2fa9dcdc7a1e0fc2
|
/leads/serializers.py
|
8a869b319a20c53ab5e4a2acc509501bfbfd0a95
|
[] |
no_license
|
conor87/django_react_tutorial
|
f9362c2361b823f19256dbc74fd052e5bd0b03fc
|
a8f381fc7d0d0e4e7f6e0e4e861ab6ff5ecdacff
|
refs/heads/master
| 2022-05-30T19:56:11.059764
| 2020-05-03T16:46:29
| 2020-05-03T16:46:29
| 259,084,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
from rest_framework import serializers
from leads.models import Lead
# Lead serializer
class LeadSerializer(serializers.ModelSerializer):
class Meta:
model = Lead
fields = '__all__'
|
[
"karoldabko@gmail.com"
] |
karoldabko@gmail.com
|
aaa69538a454bb572eba9f6cc4bde5b507d5fd11
|
3f82db7dbcd11d56f15a547ce19946cab3632e17
|
/blog/venv/bin/pip3.8
|
48fa967e0380d03a271661ffd40fb366f591ec1e
|
[] |
no_license
|
daparducci/first_pytest
|
8894faaa7bb4634672999f50550a741223c11a63
|
d3db53f2cb14967dc51e3544b5776c6e3db67925
|
refs/heads/master
| 2020-12-26T11:47:33.979756
| 2020-01-31T19:09:11
| 2020-01-31T19:09:11
| 237,498,206
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
8
|
#!/Users/dominic/code/testingPythonApps/blog/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
|
[
"dominic@dominicWell.local"
] |
dominic@dominicWell.local
|
b45e9fe9e31d59042ec3b2d73c44e05dea4f80b1
|
1d2bbeda56f8fede69cd9ebde6f5f2b8a50d4a41
|
/medium/python/c0108_223_rectangle-area/00_leetcode_0108.py
|
cab8907baa5a2f6249fc7693afbb753f480aa572
|
[] |
no_license
|
drunkwater/leetcode
|
38b8e477eade68250d0bc8b2317542aa62431e03
|
8cc4a07763e71efbaedb523015f0c1eff2927f60
|
refs/heads/master
| 2020-04-06T07:09:43.798498
| 2018-06-20T02:06:40
| 2018-06-20T02:06:40
| 127,843,545
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 865
|
py
|
# DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#223. Rectangle Area
#Find the total area covered by two rectilinear rectangles in a 2D plane.
#Each rectangle is defined by its bottom left corner and top right corner as shown in the figure.
#Assume that the total area is never beyond the maximum possible value of int.
#Credits:
#Special thanks to @mithmatt for adding this problem, creating the above image and all test cases.
#class Solution(object):
# def computeArea(self, A, B, C, D, E, F, G, H):
# """
# :type A: int
# :type B: int
# :type C: int
# :type D: int
# :type E: int
# :type F: int
# :type G: int
# :type H: int
# :rtype: int
# """
# Time Is Money
|
[
"Church.Zhong@audiocodes.com"
] |
Church.Zhong@audiocodes.com
|
e538d5ed698c57999aea523512035efc0f95c9ae
|
8a212396ac639720c3a9bacb03d400ed81db0d67
|
/faceseg/__init__.py
|
c0bd75cfe30c6deff048dde0fc64c8ba8692dc27
|
[
"MIT"
] |
permissive
|
yushuinanrong/enhanced-UGATIT
|
74963c380cbdcec08aed96be9715ed2361283ded
|
5294a6ffd2d6f41f4b83ae901c6edb98f02cadc6
|
refs/heads/main
| 2023-07-13T06:10:06.418732
| 2021-08-23T10:03:12
| 2021-08-23T10:03:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 58
|
py
|
# ref: https://github.com/zllrunning/face-parsing.PyTorch
|
[
"zheng.yuwei@foxmail.com"
] |
zheng.yuwei@foxmail.com
|
350f5a424ba769f69e84d2993fa720750894d9c6
|
6e93c79d9df2a3b17ff797803f3cc4b09072fd42
|
/day_4/3_whatsyourname.py
|
f43798ddeafe5dd167b6e45a4822c773d989adf7
|
[] |
no_license
|
Koorimikiran369/Innomatics_Internship
|
d01192a6aed87a2bbf1f4f530225a60365bfac99
|
45d4a3c04bded5dc29e0b156742e6fdb77c1f544
|
refs/heads/main
| 2023-04-11T08:13:49.917768
| 2021-04-21T06:32:46
| 2021-04-21T06:32:46
| 367,670,015
| 1
| 0
| null | 2021-05-15T15:43:39
| 2021-05-15T15:43:39
| null |
UTF-8
|
Python
| false
| false
| 221
|
py
|
def print_full_name(a, b):
print("Hello {} {}! You just delved into python.".format(a,b))
if __name__ == '__main__':
first_name = input()
last_name = input()
print_full_name(first_name, last_name)
|
[
"noreply@github.com"
] |
Koorimikiran369.noreply@github.com
|
5dffb89b2d119c6930deaf7e3ecb6ff1ccd16682
|
a796865c5ff4dcb7c6a0c848364bd6f7cb3d7a29
|
/chazutsu/datasets/imdb.py
|
f5b42733d4f49347b084c84ef9c8704ca7ffb391
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"CC-BY-SA-4.0"
] |
permissive
|
yk/chazutsu
|
d625f6f7f682d713910ce59953841e507ad27262
|
ecc42c9ff0f8d47632ba4b4c7385a5fdf4386c10
|
refs/heads/master
| 2020-03-18T19:43:40.016170
| 2018-05-28T14:52:00
| 2018-05-28T14:52:00
| 135,173,517
| 0
| 0
|
Apache-2.0
| 2018-05-28T14:41:16
| 2018-05-28T14:41:16
| null |
UTF-8
|
Python
| false
| false
| 3,375
|
py
|
import os
from chazutsu.datasets.framework.dataset import Dataset
from chazutsu.datasets.framework.resource import Resource
class IMDB(Dataset):
def __init__(self):
super().__init__(
name="Large Movie Review Dataset(IMDB)",
site_url="http://ai.stanford.edu/~amaas/data/sentiment/",
download_url="http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz", # noqa
description="Movie review data is constructed by 25,000 reviews " \
"that have positive/negative annotation"
)
def download(self,
directory="", shuffle=True, test_size=0, sample_count=0,
force=False):
if test_size != 0:
raise Exception("The dataset is already splitted to train & test.")
return super().download(directory, shuffle, 0, sample_count, force)
def prepare(self, dataset_root, extracted_path):
extracted_dir = os.path.join(extracted_path, "aclImdb")
data_dirs = ["train", "test"]
pathes = []
for d in data_dirs:
target_dir = os.path.join(extracted_dir, d)
file_path = os.path.join(dataset_root, "imdb_" + d + ".txt")
self.label_by_dir(
file_path, target_dir, {"pos": 1, "neg": 0}, task_size=1000)
pathes.append(file_path)
if d == "train":
unlabeled = os.path.join(dataset_root, "imdb_unlabeled.txt")
self.label_by_dir(
unlabeled, target_dir, {"unsup": None}, task_size=1000)
pathes.append(unlabeled)
return pathes[0]
def make_resource(self, data_root):
return IMDBResource(data_root)
@classmethod
def _parallel_parser(cls, label, path):
features = cls._file_to_features(path)
if label is not None:
line = "\t".join([str(label)] + features) + "\n"
else:
line = "\t".join(features) + "\n" # unlabeled
return line
@classmethod
def _file_to_features(cls, path):
# override this method if you want implements custome process
file_name = os.path.basename(path)
f, ext = os.path.splitext(file_name)
els = f.split("_")
rating = 0
if len(els) == 2:
rating = els[-1]
review = ""
with open(path, encoding="utf-8") as f:
lines = f.readlines()
lines = [ln.replace("\t", " ").strip() for ln in lines]
review = " ".join(lines)
if rating != "0":
return [rating, review]
else:
return [review]
class IMDBResource(Resource):
def __init__(self,
root,
columns=None, target="",
separator="\t", pattern=()):
super().__init__(
root,
["polarity", "rating", "review"],
"polarity",
separator,
{
"train": "_train",
"test": "_test",
"valid": "_valid",
"unlabeled": "_unlabeled",
"sample": "_samples"
})
@property
def unlabeled_file_path(self):
return self._get_prop("unlabeled")
def unlabeled_data(self, split_target=False):
return self._get_data("unlabeled", split_target)
|
[
"icoxfog417@yahoo.co.jp"
] |
icoxfog417@yahoo.co.jp
|
d8bb5a8e7e912929f97f2725224f9fe07f6645ea
|
08021cfc795dc9663f5f8c595d810ef42f416269
|
/gui using wxPython/FrameInstant.py
|
e40a9f094b738e744fda8ab836e226cfebdcc429
|
[] |
no_license
|
ykim879/python
|
4891e4ed4a2f9073d93f5989e45ada6b752ae2ab
|
58283b807b675d9a580dbed74026bc09788ea3e4
|
refs/heads/master
| 2022-07-05T14:29:28.991726
| 2020-05-12T22:37:21
| 2020-05-12T22:37:21
| 263,126,539
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 555
|
py
|
import wx
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, parent = None, title = 'check the user whether frame wanna be closed')
self.Bind(wx.EVT_CLOSE, self.OnClose)
def OnClose(self, event):
if wx.MessageBox("Wanna close the window?",
"please select the button",
wx.YES_NO) != wx.YES:
event.Skip(False) #do not close the window
else:
self.Destroy() #close genearted messagebox
if __name__ == "__main__":
print("This is main method")
app = wx.App()
frame = MyFrame()
frame.Show(True)
app.MainLoop()
|
[
"59812671+ykim879@users.noreply.github.com"
] |
59812671+ykim879@users.noreply.github.com
|
d2ce96320d527c518bebd6228f86f7c1f5543780
|
e41b33f08527164a2d4ed0c4146942a5018f8d4c
|
/lpthw-exercises/ex01.py
|
ed29b6e4dc1ed56988cf67df117cc7ce63cdff7a
|
[] |
no_license
|
clair3st/python-prework
|
42300d0801bc37a2dfa82f296c9c9d0b3931e0dd
|
f535bca5b7744b867cbb4454d832b31578c52aba
|
refs/heads/master
| 2020-09-20T07:29:44.102851
| 2016-12-11T07:15:18
| 2016-12-11T07:15:18
| 67,270,154
| 0
| 0
| null | 2016-12-11T07:11:53
| 2016-09-03T03:57:54
|
Python
|
UTF-8
|
Python
| false
| false
| 259
|
py
|
#exercise 1: a good first program
#source: Learn Python the Hard Way ed3
print "Hello World!"
print "Hello Again"
print "I like typing this."
print "This is fun."
print 'Yay! Printing. '
print "I'd much rather you 'not'."
print 'I "said" do not touch this.'
|
[
"clairejgatenby@gmail.com"
] |
clairejgatenby@gmail.com
|
01605737b22095e5d725dc4845f7a7f4c3cb4956
|
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
|
/langs/7/rq7.py
|
30baec3bde8d82ec49d254be76caa965c2074d15
|
[] |
no_license
|
G4te-Keep3r/HowdyHackers
|
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
|
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
|
refs/heads/master
| 2020-08-01T12:08:10.782018
| 2016-11-13T20:45:50
| 2016-11-13T20:45:50
| 73,624,224
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'rQ7':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1])
|
[
"juliettaylorswift@gmail.com"
] |
juliettaylorswift@gmail.com
|
e1df67e10d3f856d1df676736e3ba774a668c2e9
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part004784.py
|
de1ef04b6fa59401f563bc1e1ad58ea88a73f13e
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,306
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher144968(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.1.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.1.2.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher144968._instance is None:
CommutativeMatcher144968._instance = CommutativeMatcher144968()
return CommutativeMatcher144968._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 144967
return
yield
from collections import deque
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
1c25094fd14ac35a1a078dcd42fd767e0060d645
|
934d61923a73d6aaee0ffeb61ad9410abe17a1d0
|
/plugins/video/onet.TV/resources/lib/biznes.py
|
87e35fc78a9fc307d45eda2c63719fbde458634d
|
[] |
no_license
|
quickbreach/xbmc-addons
|
151cdec2a946e63f7fc2d715156bedf9c308492c
|
7b7d3518506fbf4f590ad69ee1027a485169c03d
|
refs/heads/master
| 2021-05-29T20:31:38.134618
| 2012-05-25T23:40:08
| 2012-05-25T23:40:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,672
|
py
|
import os
import sys
import xbmc
import xbmcgui
import xbmcplugin
import urllib
HOME_DIR = os.getcwd()
names = xbmc.Language( HOME_DIR ).getLocalizedString
rai = (names (30015))
alL = (names (33333))
class Main:
def __init__( self ) :
self.getNames()
def getNames(self):
gl=[
("TVN",'http://www.onet.tv/feed/getMoviesCategoryOrTagsDate,15,1,desc,movies.xml?category=11&tags=%28TVN%29&rss=1'),
("TVN24",'http://www.onet.tv/feed/getMoviesCategoryOrTagsDate,15,1,desc,movies.xml?category=11&tags=%28TVN24%29&rss=1'),
("TVN CNBC",'http://www.onet.tv/feed/getMoviesCategoryOrTagsDate,15,1,desc,movies.xml?category=11&tags=%28TVN_CNBC%29&rss=1'),
("TVN WARSZAWA",'http://www.onet.tv/feed/getMoviesCategoryOrTagsDate,15,1,desc,movies.xml?category=11&tags=%28TVN_Warszawa%29&rss=1'),
("CNN",'http://www.onet.tv/feed/getMoviesCategoryOrTagsDate,15,1,desc,movies.xml?category=11&tags=%28CNN%29&rss=1'),
("Reuters",'http://www.onet.tv/feed/getMoviesCategoryOrTagsDate,15,1,desc,movies.xml?category=11&tags=%28Reuters%29&rss=1'),
# ("BIZNES ONET.PL",''),
(rai,'http://www.onet.tv/feed/getMoviesCategoryOrTagsDate,15,1,desc,movies.xml?category=11&tags=%28Kolej_TV%29&rss=1'),
(alL,'http://www.onet.tv/feed/getMoviesCategoryOrTagsDate,15,1,desc,movies.xml?category=11&rss=1')
]
for name, url in gl:
li=xbmcgui.ListItem(name)
u=sys.argv[0]+"?RSS&po_co="+"&url="+urllib.quote_plus(url)
xbmcplugin.addDirectoryItem(int(sys.argv[1]),u,li,True)
xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_NONE )
xbmcplugin.endOfDirectory( handle=int( sys.argv[ 1 ] ), succeeded=True )
|
[
"pajret@753fbab9-553a-0410-a42e-33068982fbc4"
] |
pajret@753fbab9-553a-0410-a42e-33068982fbc4
|
3a6b7b2df9594b26c15c025bd2c462daf298ac5c
|
22419cd2b08235318400d4514077100dd95fabd2
|
/blog_app/blog_app/settings.py
|
1977d76a2a4baea2ae82eb67386e7d543ee016d3
|
[] |
no_license
|
shindesud/Blog_Application
|
2bc2023a44a8b981c5aeef78b67fdd54f963f98c
|
060c31a724b94561424a772a8647e67812798b54
|
refs/heads/master
| 2023-01-18T19:55:24.948769
| 2020-11-25T19:13:21
| 2020-11-25T19:13:21
| 316,034,587
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,337
|
py
|
"""
Django settings for blog_app project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'n2db)e8%3$qj+9p5*zokc0*i&-rg0z0=$*hcfpu3@m8uz8#c^l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'article',
'crispy_forms'
]
CRISPY_TEMPLATE_PACK= 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blog_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
import os
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATIC_ROOT = os.path.join(BASE_DIR,"staticfiles")
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
|
[
"shindesudarshan1997@gmail.com"
] |
shindesudarshan1997@gmail.com
|
16641e79a0456f31fd2389f72f184f4534eda8a1
|
1439d2fc587c964e3e040f46cf6c81bed22cbae1
|
/Bing_STT_API/Source/main.py
|
2ec1402ac2ca0770925563c62b9f9e1e9ab43da2
|
[] |
no_license
|
gilberto233/Show-me-the-code-project-used
|
eac5eea0cdee71d3d003cfe836e2bd2fe459b07c
|
45df803dff6240d4ba22510fae3fd3e647a76d53
|
refs/heads/master
| 2021-01-01T19:14:39.987350
| 2017-10-28T03:14:55
| 2017-10-28T03:14:55
| 98,550,523
| 0
| 0
| null | 2017-07-28T18:20:02
| 2017-07-27T15:10:27
|
Python
|
UTF-8
|
Python
| false
| false
| 332
|
py
|
"""
The main entry of STT project.
"""
#!/usr/bin/env python
import Record, STT_API
class function_entry():
def __init__(self):
self.__start__()
def __start__(self):
instance = Record.Record()
STT_API.STT_API( instance.get_file_name() )
if __name__ == '__main__':
instance = function_entry()
|
[
"noreply@github.com"
] |
gilberto233.noreply@github.com
|
04d67e916e482526c220890afd7b718aaabe554e
|
606b62a51ad48c12d8734029c917c0e7bbb172a3
|
/day7.py
|
ca92b745f1c7158291cdffd6f43848428fbdd37d
|
[] |
no_license
|
darrenvong/AdventOfCode
|
11df5e2e0829d031e5b0d3e0dab615b2a0056284
|
4be9a5426a07c9ec5cc221e88f1af82fbcab1fde
|
refs/heads/master
| 2021-09-02T06:42:15.895328
| 2017-12-31T04:17:42
| 2017-12-31T04:28:13
| 113,380,506
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,131
|
py
|
"""
Solution for Day 7 of Advent of Code 2017.
Problem: Recursive Circus
Part 1: What's the name of the bottom program?
Part 2: Given that there's only one program with the wrong weight and causes
the program stack to be imbalanced, what should the correct weight of that
faulty program be?
"""
from collections import Counter
def build_nodes(input_file):
nodes = {}
with open(input_file) as f:
for line in f:
row = line.split()
try:
arrow_idx = row.index("->")
children = [c.strip(",") for c in row[arrow_idx + 1:]]
nodes[row[0]] = int(row[1].strip("()")), children
except ValueError:
nodes[row[0]] = int(row[1].strip("()")), []
return nodes
def get_name_of_bottom_program(nodes):
prog_list = set(nodes.keys())
prog_has_parent = set() # keeps track of whether a program has parent
for _, (_, children) in nodes.items():
for child in children:
prog_has_parent.add(child)
diff = prog_list - prog_has_parent
return diff.pop()
def get_branch_weights(root, nodes):
_, children = nodes[root]
branch_weights = {c: calculate_node_weight(nodes, c) for c in children}
return branch_weights
def get_imbalanced_branch(branch_weights):
"""Gets the base node (program) which has an imbalanced branch."""
weight_counts = Counter(branch_weights.values())
for prog, weight in branch_weights.items():
if weight_counts[weight] == 1:
return prog
# if it hits this point, all branches must be balanced
return None
def find_faulty_prog(root, nodes):
branch_weights = get_branch_weights(root, nodes)
imb_root_node = get_imbalanced_branch(branch_weights)
if imb_root_node is None:
# all branches balanced, so current root must be the culprit
return root
else:
faulty = find_faulty_prog(imb_root_node, nodes)
return faulty
def find_correct_weight(faulty_prog, root_prog, nodes):
branch_weights = get_branch_weights(root_prog, nodes)
weight_counts = Counter(branch_weights.values())
ws = map(lambda wc: wc[0], sorted(weight_counts.items(), key=lambda wc: wc[1]))
err_margin = (lambda x, y: x - y)(*ws)
return nodes[faulty_prog][0] - err_margin
def calculate_node_weight(nodes, cur_node):
"""Variant of depth first search"""
weight, children = nodes[cur_node]
# base case - no children
if not children:
return weight
else:
total = 0
for c in children:
total += calculate_node_weight(nodes, c)
return total + weight
if __name__ == '__main__':
nodes = build_nodes("day7_input.txt")
root_prog = get_name_of_bottom_program(nodes)
print("=" * 15, "Part 1", "=" * 15)
print(f"The name of the bottom program is {root_prog}")
print()
print("=" * 15, "Part 2", "=" * 15)
faulty_prog = find_faulty_prog(root_prog, nodes)
print(f"The faulty program: {faulty_prog}. "
"The correct weight should be "
f"{find_correct_weight(faulty_prog, root_prog, nodes)}")
|
[
"d.vongy@gmail.com"
] |
d.vongy@gmail.com
|
3b9c562603bd95678e9dbcc110f8d7626d356c8b
|
6a774d9347a0a224b396afd5b4828ad7a751035c
|
/chat/app.py
|
364d22c22c2af840127525cba3a10808f27ecd84
|
[] |
no_license
|
oscarlaf03/stock_chat
|
09272f06d26d0725b8b3cc770e9d582c40d3077e
|
8d1b5027bd834725b51c7eb336749c21c7dd730c
|
refs/heads/master
| 2022-12-05T00:09:48.271535
| 2020-08-19T13:18:03
| 2020-08-19T13:18:03
| 288,295,150
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,666
|
py
|
from flask import Flask, redirect, render_template, request, url_for
from datetime import datetime
from flask_login import (LoginManager, current_user, login_required,
login_user, logout_user)
from flask_socketio import SocketIO, join_room, leave_room
from pymongo.errors import DuplicateKeyError
from .db import (add_room_members, get_room, get_room_members,
get_rooms_for_user, get_user, is_room_admin, is_room_member,
remove_room_members, save_room, save_user, update_room, save_message, get_messages)
from .models.user import User
from .broker.publisher import Publisher
app = Flask(__name__)
app.secret_key = 'my_secret_key'
socketio = SocketIO(app, message_queue='redis://localhost:6379//')
login_manager = LoginManager()
login_manager.init_app(app)
@app.route('/')
def home():
rooms = []
if current_user.is_authenticated:
rooms = get_rooms_for_user(current_user.username)
return render_template('index.html', rooms=rooms,rooms_size= len(rooms))
@app.route('/login/', methods=['GET', 'POST'])
def login():
message = ''
if request.method == 'POST':
username = request.form.get('username')
password_input = request.form.get('password')
user = get_user(username)
if user and user.check_password(password_input):
login_user(user)
return redirect(url_for('home'))
else:
message = 'Bad login request'
return render_template('login.html', message=message)
@app.route('/logout/')
@login_required
def logout():
logout_user()
return redirect(url_for('home'))
@app.route('/create-room/', methods=['GET', 'POST'])
@login_required
def create_room():
message = ''
if request.method == 'POST':
room_name = request.form.get('room_name')
usernames = [username.strip()
for username in request.form.get('members').split(',')]
if len(room_name) and len(usernames):
room_id = save_room(room_name, current_user.username)
if current_user.username in usernames:
usernames.remove(current_user.username)
add_room_members(room_id, room_name, usernames,
current_user.username)
return redirect(url_for('home'))
else:
message = 'Failed to create room'
return render_template('create_room.html', message=message)
@app.route('/rooms/<room_id>/edit/', methods=['GET','POST'])
@login_required
def edit_room(room_id):
room = get_room(room_id)
if room and is_room_admin(room_id, current_user.username):
room_members = [ member['_id']['username'] for member in get_room_members(room_id) ]
room_members_str = ",".join(room_members)
message =''
if request.method == 'POST':
room_name = request.form.get('room_name')
room['name'] = room_name
update_room(room_id, room_name)
request_members = [ username.strip() for username in request.form.get('members').split(',') ]
members_to_add = list(set(request_members) - set(room_members))
members_to_remove = list(set(room_members) - set(request_members))
if len(members_to_add):
add_room_members(room_id,room_name, members_to_add, current_user.username)
if len(members_to_remove):
remove_room_members(room_id, members_to_remove)
room_members_str = ",".join(request_members)
message = 'Room edited succesfully'
return render_template('edit_room.html', room=room, room_members_str=room_members_str, message=message)
else:
return "Room not found", 404
@app.route('/rooms/<room_id>/')
def view_room(room_id):
username = current_user.username
room = get_room(room_id)
if room and is_room_member(room_id, current_user.username):
room_members = get_room_members(room_id)
messages = get_messages(room_id)
return render_template('view_room.html', username=username, room=room, room_members=room_members, messages=messages)
else:
return "Room not found", 404
@socketio.on('send_message')
def handle_send_message(data):
Publisher(data).publish()
app.logger.info(
f'{data["username"]} has sent a message to the room: {data["room"]} : {data["message"]}')
if not data['message'].startswith('/'):
save_message(data['room'],data['message'],data['username'])
data['created_at'] = datetime.now().strftime('%d %b, %H:%M:%S')
socketio.emit('received_message', data, room=data['room'])
@socketio.on('join_room')
def handle_join_room_event(data):
app.logger.info(f'{data["username"]} has joined the {data["room"]} room')
join_room(data['room'])
socketio.emit('join_room_notice', data)
@socketio.on('leave_room')
def handle_left_room_event(data):
app.logger.info(f'{data["username"]} has left the {data["room"]} room')
leave_room(data['room'])
socketio.emit('left_room_notice', data)
@login_manager.user_loader
def load_user(username):
return get_user(username)
@app.route('/signup/', methods=['GET', 'POST'])
def signup():
message = ''
if request.method == 'POST':
username = request.form.get('username')
email = request.form.get('email')
password = request.form.get('password')
try:
save_user(username, email, password)
return redirect(url_for('login'))
except DuplicateKeyError:
message = 'Sorry, that user name is already taken'
return render_template('signup.html', message=message)
|
[
"ortizg.oscar@gmail.com"
] |
ortizg.oscar@gmail.com
|
5727ee0a340bc59409cbca3dab04c40c96cf0f16
|
69d71011cb93ca0e2fbdb41fd65728a1f9592811
|
/GCJ2008PP/D_ShoppingPlan_DP.py
|
1846a8b643098cc5b3fdf321f1bc2e546e1a466d
|
[] |
no_license
|
supachawal/JavaProgramming
|
571c614ad6897549e4f3f3f6f3aa7a24c42ab835
|
535a954b2b27a594e2ef7aaf5bea80ae360e8d02
|
refs/heads/master
| 2020-03-08T23:31:38.221494
| 2019-04-12T07:28:56
| 2019-04-12T07:28:56
| 128,466,557
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,073
|
py
|
"""
Created on Fri May 22 01:07 2015
@author: supachawal
Problem D. Shopping Plan
You have a list of items you need to buy today, and you know the locations (represented as points on a cartesian grid)
of a few stores in the area. You also know which of these stores are selling each item on your list, and at what price
each store sells it. Given the price of gas, what is the minimum amount you need to spend in order to buy all the items
on your shopping list and then drive back home? You start and end the journey at your house, which is located at (0,0).
To make matters interesting, some of the items on your list may be perishable. Whenever you make a purchase that includes
one or more perishable items, you cannot drive to another store without first stopping back at your house. Every item on
your shopping list is guaranteed to be sold by at least one store, so the trip will always be possible.
Input
The first line of input gives the number of cases, N. N test cases follow. Each case starts with a line formatted as
num_items num_stores price_of_gas
The next line contains the num_items items on your shopping list. The items will be space separated, and each item will
consist of only lowercase letters. If an item is perishable, its name will be followed by a single exclamation point.
There will be no duplicate items on your list. The next num_stores lines will each be formatted as
x_pos y_pos item1:price1 item2:price2 ...
Each of these lines gives the location of one store, along with the items available at that store and their corresponding
prices. Only items which are on your shopping list will appear in these lists. Perishable items will not end with exclamation
points on these lists. No item will be repeated in a store's list. Each store will offer at least one item for sale.
No two stores will be at the same location, and no store will be located at (0,0).
Output
For each test case, output one line containing "Case #x: " followed by the minimum possible cost of the trip,
rounded to seven decimal places. Don't forget about price_of_gas, which is the amount of money you must spend
per unit distance that you drive.
Limits
1 โ�ค N โ�ค 100,
0 โ�ค price_of_gas โ�ค 1000,
-1000 โ�ค x_pos โ�ค 1000,
-1000 โ�ค y_pos โ�ค 1000,
1 โ�ค price of each item โ�ค 1000.
Small dataset
1 โ�ค num_items โ�ค 5,
1 โ�ค num_stores โ�ค 10.
Large dataset
1 โ�ค num_items โ�ค 15,
1 โ�ค num_stores โ�ค 50.
Sample
Input
2
1 2 10
cookies
0 2 cookies:400
4 0 cookies:320
3 3 5
cookies milk! cereal
0 2 cookies:360 cereal:110
4 0 cereal:90 milk:150
-3 -3 milk:200 cookies:200
Output
Case #1: 400.0000000
Case #2: 519.2920690
"""
import itertools
import os
from psutil import virtual_memory
import re
import sys
import time
import math
inline_compute_distance = lambda a, b: ((a[0]-b[0])**2 + (a[1]-b[1])**2)**0.5
INFINITY = 1e+16
class ItemInfo:
def __init__(self, itemId, initName):
if initName[-1] == '!':
self.item_id = itemId
self.name = initName[0:-1]
self.is_perishable = True
else:
self.name = initName
self.is_perishable = False
self.store_list = []
def __hash__(self):
return hash(self.name)
def __lt__(self, another):
return self.name < another.name
def __eq__(self, another):
return self.name == another.name
class StoreInfo:
def __init__(self, storeId, initPositionXY):
self.store_id = storeId
self.position_xy = initPositionXY
self.__items = {}
def __hash__(self):
return hash(self.store_id)
def __lt__(self, another):
return self.store_id < another.store_id
def __eq__(self, another):
return self.store_id == another.store_id
def compute_transportation_cost_to_origin(self, gasPrice):
return gasPrice * (self.position_xy[0]**2 + self.position_xy[1]**2)**0.5
def add_item(self, itemInfo, price):
self.__items[itemInfo] = price
def get_items(self):
return self.__items
def minimum_shopping_cost(stores, items, gasPrice):
# dynamic programming algorithm with worst case running time of O(m^2 * n)
m = len(stores)
n = len(items) + 1
R = [[INFINITY] * n for i in range(m)] # accumulated item amount (element i = 0 means home, j = 0 means nothing on hand)
F = [[-1] * n for i in range(m)] #buy from F
#Case #1: #items=3, #stores=2, gas_price=1, answer=5454.0824588
# gk cr! qq!
# -883 -292 cr:953 gk:967
# 526 -734 qq:794 gk:41
for i in range(m):
R[i][0] = stores[i].compute_transportation_cost_to_origin(gasPrice)
for j in range(1, n):
item = items[j - 1]
# minimize cost of the new item (wherever we buy it from, we update as if we go to every stores[i])
for i in range(m):
minCost = R[i][j]
bestStore = i
for store, price in item.store_list:
k = store.store_id
if R[k][j] == INFINITY:
R[k][j] = R[k][j - 1] + price # initial price without moving
cost = R[k][j]
if gasPrice > 0.0 and k != i: # if move from other store that never visited
visited = False
for v in range(j):
if k == F[i][v]:
visited = True
break
if not visited:
if item.is_perishable:
cost += stores[k].compute_transportation_cost_to_origin(gasPrice) \
+ stores[i].compute_transportation_cost_to_origin(gasPrice)
else:
cost += gasPrice * inline_compute_distance(stores[k].position_xy
, stores[i].position_xy)
if cost < minCost:
minCost = cost
bestStore = k
R[i][j] = minCost
F[i][j] = bestStore
return R[0][n - 1]
def main(argv):
mem = virtual_memory()
starttime = time.clock()
print('=============== start program (FREEMEM=%.0fm)===============' % (mem.total/1024/1024))
# inputFileName = '-sample1.in'
# inputFileName = '-sample2.in'
# inputFileName = '-small-practice.in'
inputFileName = '-large-practice.in'
inputFileName = os.path.basename(__file__)[0] + inputFileName
if len(argv) > 1:
inputFileName = argv[1]
outputFileName = inputFileName.split('.in', 1)[0] + '.out'
print('%s --> %s' % (inputFileName, outputFileName))
inputFile = open(inputFileName, 'r')
outputFile = open(outputFileName, 'w')
textLine = inputFile.readline().rstrip()
testCaseCount = int(textLine)
testCaseNumber = 1
textLine = inputFile.readline().rstrip()
while testCaseNumber <= testCaseCount:
splitted = re.split('\\s+', textLine)
num_items = int(splitted[0])
num_stores = int(splitted[1])
price_of_gas = float(splitted[2])
textLine = inputFile.readline().rstrip()
splitted = re.split('\\s+', textLine)
items = []
itemId = 0
for item_name in splitted:
items.append(ItemInfo(itemId, item_name))
itemId += 1
assert(num_items == len(items))
nPerishable = len([1 for item in items if item.is_perishable])
itemMapIndex = {items[i].name: i for i in range(num_items)}
stores = []
stores.append(StoreInfo(0, (0, 0)))
for i in range(num_stores):
textLine = inputFile.readline().rstrip()
splitted = re.split('\\s+', textLine)
storePos = (float(splitted[0]), float(splitted[1]))
store = StoreInfo(i + 1, storePos)
for item_with_price in splitted[2:]:
subsplitted = re.split(':', item_with_price)
item = items[itemMapIndex[subsplitted[0]]]
price = float(subsplitted[1])
store.add_item(item, price)
item.store_list.append((store, price))
stores.append(store)
print('Case #%d: #npitems=%d, #pItems=%d, #stores=%d, gas_price=%g,' % (testCaseNumber, num_items - nPerishable, nPerishable, num_stores, price_of_gas), end=' ', flush=True)
answer = minimum_shopping_cost(stores, items, price_of_gas)
print('answer=%.7f' % (answer), sep='', flush=True)
print('Case #%d: %.7f' % (testCaseNumber, answer), file=outputFile, flush=True)
testCaseNumber += 1
textLine = inputFile.readline().rstrip()
print('=============== end program (FREEMEM=%.0fm ELAPSED=%f)===============' % (mem.total/1024/1024, time.clock() - starttime))
if __name__ == '__main__':
main(sys.argv)
|
[
"supachawal@gmail.com"
] |
supachawal@gmail.com
|
24b569fd3622422f261b5f68111b8abd6e551daf
|
ec14635ef4e0dffb100df7317910888756e50339
|
/website/website/urls.py
|
6f78c4d1a0bc1f288c2c31bfec6c4fba745ad0fa
|
[] |
no_license
|
gymikechen/Exchange
|
8e351dd5c9377ae830a41d9bb4d8fde684e18ca6
|
67ef96d534d0a1a87a0bb69510bd45b947655ae0
|
refs/heads/master
| 2021-01-18T03:34:44.697360
| 2015-04-25T16:31:18
| 2015-04-25T16:31:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^mainpage/', include('mainpage.urls', namespace='mainpage'))
)
|
[
"wasiseal@gmail.com"
] |
wasiseal@gmail.com
|
5e6c55022ca74ff0691569b67e09db50a2549950
|
3ae7afb05534962ea12f22fe9d9e347a7a45c8f5
|
/old/experiment/naive.py
|
561c319ff1cb9cb60bce847ee42f50935b2d86f3
|
[] |
no_license
|
driquet/dsbench
|
ec2681ddb907d09400c3b744edf480ee558c94e9
|
31d29004223cc02250b5c7c8d40da97350665a8c
|
refs/heads/master
| 2021-01-01T05:30:35.558205
| 2012-02-02T09:34:53
| 2012-02-02T09:34:53
| 3,217,806
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,147
|
py
|
#!/usr/bin/python
'''
File: naive.py
Author: Damien Riquet
Description: Naive Algorithm
While an attacker is not detected, he scans targets
When an attacker is detected, the algorithm takes the next to do the same
'''
import algorithm
import core.common as common
import core.constant as constant
import random
import time
import sys
class Algorithm(algorithm.Algorithm):
def run(self):
""" Run the experiment """
common.log("Main")
fw_proxy = self.proxy_firewalls[self.firewalls[0]]
# Starting alert timer at the firewall
time.sleep(1.5)
fw_proxy.run("/etc/init.d/snort restart")
time.sleep(1.5)
fw_proxy.run(constant.fw_cmd)
common.log(" >> [Firewall : %s:%d] %s" % (self.firewalls[0][0], self.firewalls[0][1], constant.fw_cmd))
for timing in self.timings:
common.log("Naive Algorithm")
common.log(" %d attackers" % len(self.attackers))
common.log(" %d victims" % len(self.victims))
common.log("")
common.log(">> Timing %s" % timing)
for option, type in self.types :
common.log("")
common.log("")
common.log(" >> Type %s" % type)
# Building command(s)
subparts = self.generate(self.victims, constant.mostusedports)
random.shuffle(subparts)
common.log(" >> Generating %d subparts" % len(subparts))
remotes = list(self.attackers)
nb_detected = 0
i_subpart = 0
i_remote = 0
for remote in remotes:
# Connecting to the remote host
remote_proxy = self.proxy_attackers[remote]
undetected = True
while undetected:
if i_subpart == len(subparts):
break
subpart = subparts[i_subpart]
i_subpart += 1
# Creating the command
cmd = common.convert(subpart, option, timing)
common.log(" >> [Remote : %s:%d][Subpart : %s - %d/%d][Remote %d/%d]"
% (remote[0], remote[1], cmd, i_subpart, len(subparts), i_remote, len(remotes)))
remote_proxy.run(cmd)
sys.stdout.write("[%s] << Waiting : " % (time.ctime()))
sys.stdout.flush()
while True:
status = remote_proxy.poll()
if not status:
status = fw_proxy.pollfw()
if status != None and status != False:
undetected = False
else:
undetected = True
break
status = fw_proxy.pollfw()
if status != None and status != False:
undetected = False
remote_proxy.kill()
break
sys.stdout.write('.')
sys.stdout.flush()
common.timing_sleep(timing)
str = "Undetected"
if not undetected:
str = "Detected"
fw_proxy.kill()
fw_proxy.run(constant.fw_cmd)
time.sleep(0.5)
sys.stdout.write(" >> %s\n" % str)
sys.stdout.flush()
if not undetected:
nb_detected += 1
i_remote +=1
# Log time
if i_remote in self.log_nb_attackers:
percentage = float(i_subpart)/len(subparts) * 100
message = "%s - [Remotes detected : %d/%d][Scan accomplished : %.2f%%]" % (str, i_remote, len(remotes), percentage)
common.log(message)
common.logtype(type, timing, message, percentage, 'naive', self.logdir, '../log/', i_remote)
if i_subpart == len(subparts):
# Scan has finished
percentage = 100
message = "%s - [Remotes detected : %d/%d][Scan accomplished : %.2f%%]" % (str, i_remote, len(remotes), percentage)
for i in self.log_nb_attackers:
if i > i_remote:
common.log(message)
common.logtype(type, timing, message, percentage, 'naive', self.logdir, '../log/', i)
break
common.log(" << Type %s" % type)
common.log("<< Timing %s" % timing)
|
[
"d.riquet@gmail.com"
] |
d.riquet@gmail.com
|
27f44c21550f126bc6a2e8d6b8aa62130c306ec6
|
c602d28de32d559316e3d60d48f1b107fd57905d
|
/solution/_141_linked_list_cycle.py
|
9ed12d48fed91244259fe6f172771b0630236b4b
|
[] |
no_license
|
zhangda7/leetcode
|
73b90ccf987b5cd1e7a6bda1cc7546e5d9a8533a
|
7fea71d92a12659e186b999ae06c06be39a19290
|
refs/heads/master
| 2021-01-17T22:29:12.480403
| 2015-08-24T09:36:35
| 2015-08-24T09:36:35
| 38,037,990
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,114
|
py
|
# -*- coding:utf-8 -*-
'''
Created on 2015/8/5
@author: dazhang
Given a linked list, determine if it has a cycle in it.
Follow up:
Can you solve it without using extra space?
'''
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @param head, a ListNode
# @return a boolean
def hasCycle(self, head):
if head == None:
return False
p1 = head
p2 = head
while(p1!= None and p2!= None):
#print p1.val, p2.val
p1 = p1.next
if p2.next != None:
p2 = p2.next.next
else:
return p1 == p2
if p1 == p2:
return True
return False
if __name__ == '__main__':
s1 = ListNode(1)
s2 = ListNode(2)
s3 = ListNode(3)
s4 = ListNode(4)
s5 = ListNode(5)
s6 = ListNode(6)
s1.next = None
s2.next = s3
s3.next = s4
s4.next = s5
s5.next = s6
s6.next = None
s = Solution()
print(s.hasCycle(s1))
pass
|
[
"dazhang@lenovo.com"
] |
dazhang@lenovo.com
|
ef5f90848736e935b74c19e84546e440ed9df1cf
|
7afbbad7e7813faa10fb0883a5eec39ea26cfa36
|
/churn_modeling/main.py
|
08b44e72c3cd6017ebc11e21b03ceea3d7b3f56d
|
[
"MIT"
] |
permissive
|
maybemanolo/deeps
|
7a7934548e6e332706b2404cd2d8313bd00ccd1c
|
5e073da05305f275b1f5930ea2a2746141cb9d1e
|
refs/heads/master
| 2021-08-26T07:08:38.180459
| 2017-11-22T04:17:03
| 2017-11-22T04:17:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,717
|
py
|
import pandas as pd
import numpy as np
from train import train_model
from save_model import save_model
from load_model import load_model
from preprocessing import preprocessing
from one_sample import one_sample
from pathlib import Path
from sklearn.metrics import classification_report
dataset = pd.read_csv("Churn_Modelling.csv")
X_train,y_train,X_test,y_test,scaler = preprocessing(dataset)
load = int(input("Load Model (0/1): "))
if load == 1:
file = input("File (no extensions): ")
if not Path(file + '.json').is_file():
print("File not found")
load = 0
else:
model = load_model(file)
if load == 0:
print("Creating neural network...")
file = input("Name: ")
epochs = int(input("Epcohs: "))
batch = int(input("Batch Size: "))
model = train_model(X_train,y_train,batch,epochs,file)
more_train = int(input("More training (0/1): "))
if more_train == 1:
more_epochs = int(input("How much epochs: "))
batch = int(input("Batch Size: "))
model = train_model(X_train,y_train,batch,more_epochs,file)
predict = int(input("Predict y_test (0/1): "))
y_pred = model.predict(X_test)
y_pred_bool = (y_pred > 0.5)
if predict == 1:
print("Probability of each one to quit, sample = 10")
for i in y_pred[0:10]:
print(i)
print("They will quit in the next 6 months?, sample = 10")
for i in y_pred_bool[0:10]:
print(i)
medir = int(input("Check precision (0/1): "))
if medir == 1:
accuracy = classification_report(y_test,y_pred_bool)
print(accuracy)
tryit = int(input("Predict over one sample (0/1): "))
if tryit == 1:
pred = model.predict(one_sample())
print("Probability to get out")
print(pred)
pred = (pred > 0.5)
print("He will quit in the next 6 months?")
print(pred)
|
[
"manoloesparta@gmail.com"
] |
manoloesparta@gmail.com
|
226cdcb6545bff51e3fdf9ecf0b928241ece44d1
|
6b3742f77bc89bc3b1436462150e9bef6db01264
|
/学习阶段历程/作业.py
|
e7a37ea3aa65781bca4b244f710dd3d580aa889d
|
[] |
no_license
|
shenjinrong0901/python_work
|
84498aa4a34d825b4ae5a74b0163929af458afdb
|
3b829d0df91a36e2d4ff33341af6b7e1b136a312
|
refs/heads/master
| 2022-11-19T20:52:41.481755
| 2020-07-25T02:42:16
| 2020-07-25T02:42:16
| 282,359,281
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
fname = input("请输入要打开的文件名称:")
fo = open(fname, "r")
txt = fo.read()
# 对全文txt进行处理
fo.close()
|
[
"724509720@qq.com"
] |
724509720@qq.com
|
5d4066d70c16752a570b7172fea82e0b62071e20
|
c7c62b30959e33e80404530d246ec936425a325c
|
/Principiante/1014.py
|
e66ea891fddb2181c350234e7f9dfd4e8436ab24
|
[] |
no_license
|
DerekMazino/Uri-Online-Judge
|
d67d7deabf09cf82222293ff0df6a55159cc5817
|
596ce6c331eefe9663da225ccfd410e123715240
|
refs/heads/master
| 2022-11-11T17:04:23.334751
| 2020-07-03T23:18:23
| 2020-07-03T23:18:23
| 260,353,090
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 74
|
py
|
x=int(input())
y=float(input())
cons=x/y
print('{:.3f} km/l'.format(cons))
|
[
"juancmaringcu@gmail.com"
] |
juancmaringcu@gmail.com
|
b2070fbc98a967804cbc495777fc42562a7e1daa
|
bd3b4a3403ad0476d287eb555bbe4211134b093e
|
/nuitka/tree/ReformulationBooleanExpressions.py
|
57f04d993025bbb4e07262bc902c89bd8b58c472
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
suryansh2020/Nuitka
|
7ecff5bd0199a6510e446be13569c829ba165be5
|
3dd382e91884a77c28aeee6b0bd44a0fc58beee8
|
refs/heads/master
| 2021-01-19T14:28:47.154859
| 2014-12-21T07:34:12
| 2014-12-21T07:34:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,113
|
py
|
# Copyright 2014, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Reformulation of boolean and/or expressions.
Consult the developer manual for information. TODO: Add ability to sync
source code comments with developer manual sections.
"""
from nuitka.nodes.AssignNodes import (
StatementAssignmentVariable,
StatementDelVariable
)
from nuitka.nodes.ConditionalNodes import ExpressionConditional
from nuitka.nodes.OperatorNodes import ExpressionOperationNOT
from nuitka.nodes.VariableRefNodes import (
ExpressionTargetTempVariableRef,
ExpressionTempVariableRef
)
from .Helpers import (
buildNode,
buildNodeList,
getKind,
makeTryFinallyExpression,
wrapTryFinallyLater
)
def buildBoolOpNode(provider, node, source_ref):
bool_op = getKind(node.op)
if bool_op == "Or":
# The "or" may be short circuit and is therefore not a plain operation
return buildOrNode(
provider = provider,
values = buildNodeList(provider, node.values, source_ref),
source_ref = source_ref
)
elif bool_op == "And":
# The "and" may be short circuit and is therefore not a plain operation
return buildAndNode(
provider = provider,
values = buildNodeList(provider, node.values, source_ref),
source_ref = source_ref
)
elif bool_op == "Not":
# The "not" is really only a unary operation and no special.
return ExpressionOperationNOT(
operand = buildNode(provider, node.operand, source_ref),
source_ref = source_ref
)
else:
assert False, bool_op
def buildOrNode(provider, values, source_ref):
values = list(values)
result = values[-1]
del values[-1]
temp_scope = None
count = 1
while values:
if temp_scope is None:
temp_scope = provider.allocateTempScope(
name = "or"
)
keeper_variable = provider.allocateTempVariable(
temp_scope = temp_scope,
name = "value_%d" % count
)
count += 1
tried = StatementAssignmentVariable(
variable_ref = ExpressionTargetTempVariableRef(
variable = keeper_variable,
source_ref = source_ref
),
source = values[-1],
source_ref = source_ref,
)
result = makeTryFinallyExpression(
tried = tried,
final = None,
expression = ExpressionConditional(
condition = ExpressionTempVariableRef(
variable = keeper_variable,
source_ref = source_ref
),
yes_expression = ExpressionTempVariableRef(
variable = keeper_variable,
source_ref = source_ref
),
no_expression = makeTryFinallyExpression(
expression = result,
final = None,
tried = StatementDelVariable(
variable_ref = ExpressionTargetTempVariableRef(
variable = keeper_variable,
source_ref = source_ref
),
tolerant = False,
source_ref = source_ref,
),
source_ref = source_ref
),
source_ref = source_ref
),
source_ref = source_ref
)
wrapTryFinallyLater(
result,
StatementDelVariable(
variable_ref = ExpressionTargetTempVariableRef(
variable = keeper_variable,
source_ref = source_ref
),
tolerant = True,
source_ref = source_ref,
)
)
del values[-1]
return result
def buildAndNode(provider, values, source_ref):
values = list(values)
result = values[-1]
del values[-1]
temp_scope = None
count = 1
while values:
if temp_scope is None:
temp_scope = provider.allocateTempScope(
name = "and"
)
keeper_variable = provider.allocateTempVariable(
temp_scope = temp_scope,
name = "value_%d" % count
)
count += 1
tried = StatementAssignmentVariable(
variable_ref = ExpressionTargetTempVariableRef(
variable = keeper_variable,
source_ref = source_ref
),
source = values[-1],
source_ref = source_ref,
)
result = makeTryFinallyExpression(
tried = tried,
final = None,
expression = ExpressionConditional(
condition = ExpressionTempVariableRef(
variable = keeper_variable,
source_ref = source_ref
),
no_expression = ExpressionTempVariableRef(
variable = keeper_variable,
source_ref = source_ref
),
yes_expression = makeTryFinallyExpression(
expression = result,
final = None,
tried = StatementDelVariable(
variable_ref = ExpressionTargetTempVariableRef(
variable = keeper_variable,
source_ref = source_ref
),
tolerant = False,
source_ref = source_ref,
),
source_ref = source_ref
),
source_ref = source_ref
),
source_ref = source_ref
)
wrapTryFinallyLater(
result,
StatementDelVariable(
variable_ref = ExpressionTargetTempVariableRef(
variable = keeper_variable,
source_ref = source_ref
),
tolerant = True,
source_ref = source_ref,
)
)
del values[-1]
return result
|
[
"kay.hayen@gmail.com"
] |
kay.hayen@gmail.com
|
8385ed94440875fdf2a2dcfdfda8b28ef0af0604
|
028c630b2111a085d46cfa0afce52b8ef19ebf62
|
/map_index_to_x.py
|
508be392b7125db9464d1239e737f6d46e3d1b69
|
[] |
no_license
|
crocagiles/bike_wipe
|
cd39c2f58b2afc6a289e31e58f16d97c24cbaa74
|
b7758af6415edc3c45e8c70c97dffaf36958a5c1
|
refs/heads/master
| 2020-08-30T20:27:01.025160
| 2019-10-31T05:15:21
| 2019-10-31T05:15:21
| 218,479,883
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,637
|
py
|
# Dirty little script to generate map to go from linear array to "bike space". Paste output from this into the main .ino file.
# x indexes for position on frame
top_tube = list(range(0, 15))
top_tube_r = top_tube[::-1]
down_tube = list(range(0, 16))
down_tube_r = down_tube[::-1]
# seat_tube = 10 * [15]
seat_tube = [15,16,17,18,19,19,18,17,16,15]
seat_stay = list(range(15, 28))
seat_stay_r = seat_stay[::-1]
chain_stay = list(range(16, 26))
chain_stay_r = chain_stay[::-1]
# Construct mapping for C file
print('// 1.) Left Downtube')
for i, p in enumerate(range(0, 16)):
print(f'n[ {p} ] = {down_tube[i]};')
print('// 2.) Left Chain Stay')
for i, p in enumerate(range(16, 26)):
print(f'n[ {p} ] = {chain_stay[i]};')
print('// 3.) Left Seat Stay')
for i, p in enumerate(range(26, 39)):
print(f'n[ {p} ] = {seat_stay_r[i]};')
print('// 4.) Left Seat Tube')
for i, p in enumerate(range(39, 49)):
print(f'n[ {p} ] = {seat_tube[i]};')
print('// 5.) Right Seat Tube')
for i, p in enumerate(range(49, 59)):
print(f'n[ {p} ] = {seat_tube[i]};')
print('// 6.) Right Seat Stay')
for i, p in enumerate(range(59, 72)):
print(f'n[ {p} ] = {seat_stay[i]};')
print('// 7.) Right Chain Stay')
for i, p in enumerate(range(72, 82)):
print(f'n[ {p} ] = {chain_stay_r[i]};')
print('// 8.) Right Down Tube')
for i, p in enumerate(range(82, 98)):
print(f'n[ {p} ] = {down_tube_r[i]};')
print('// 9.) Right Top Tube')
for i, p in enumerate(range(98, 113)):
print(f'n[ {p} ] = {top_tube[i]};')
print('// 10.) Left Top Tube')
for i, p in enumerate(range(113, 128)):
print(f'n[ {p} ] = {top_tube_r[i]};')
|
[
"noreply@github.com"
] |
crocagiles.noreply@github.com
|
2439106f8ae59153774a85d5dfb970e64b5f96d2
|
902bb426c7bd57091bbf7aa514cb9e4a4d2271fe
|
/test.py
|
78e6a0adea09f3884507e0cc3c07e7d103bd638b
|
[] |
no_license
|
asheed/nltk_prj
|
2e9c2329f4c34d36bbced1ec98a6e0fc09743ad9
|
dd832ecaef4139eaf3f71bb702cc0f825669e73a
|
refs/heads/master
| 2020-06-04T23:56:55.908219
| 2015-06-17T18:43:36
| 2015-06-17T18:43:36
| 37,614,172
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
__author__ = 'woojin'
# -*- coding: utf-8 -*-
para = "Hello World. It's good to see you. Thanks for buying this book."
para_kor = """안녕하세요. 만나서 반갑습니다. 이 책을 구입해 주셔서 감사합니다.
그런데, 이런 문장도 구분할 수 있나요?
음...그렇군요.
테스트입니다."""
from nltk.tokenize import sent_tokenize
print (sent_tokenize(para_kor,'english'))
|
[
"woojin7.ahn@samsung.com"
] |
woojin7.ahn@samsung.com
|
d665c557995591484bad40e38c586b4bb700a29b
|
d4b7d21bc16d81094cf568e4fb77701d4afeeb65
|
/lesson7_step6.py
|
d835610a07521c78a0d9287cf096622fd0255018
|
[] |
no_license
|
brizolya/stepik_auto_test_kurs
|
4e541fd6d322e3affb595e76aba5dbcc23ed82ea
|
9199b71f77211069a7441fa143a304925cbffdba
|
refs/heads/main
| 2023-01-10T04:28:34.968492
| 2020-11-10T17:08:33
| 2020-11-10T17:08:33
| 309,724,290
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 761
|
py
|
from selenium import webdriver
import time
import math
try:
link = "http://suninjuly.github.io/redirect_accept.html"
browser = webdriver.Chrome()
browser.get(link)
def calc(x):
return str( math.log( abs( 12*math.sin(int(x)) ) ) )
button = browser.find_element_by_css_selector("button.trollface.btn").click()
time.sleep(1)
new_window = browser.window_handles[1]
browser.switch_to_window(new_window)
x_element = browser.find_element_by_id('input_value')
x = x_element.text
y = calc(x)
input_answer = browser.find_element_by_id('answer')
input_answer.send_keys(y)
sub_button = browser.find_element_by_css_selector("button.btn").click()
finally:
time.sleep(10)
browser.quit()
|
[
"73897007+brizolya@users.noreply.github.com"
] |
73897007+brizolya@users.noreply.github.com
|
32916f0608b05e2b015b6fd5258a5ef9ce590a7a
|
f040e683d9c54c9e676f179541aad5891ce23114
|
/03 Control Flow/01 Practice Conditional Statements/prize.py
|
213eb2ae51845974e14e69a0a36c25747bfe5199
|
[] |
no_license
|
Elpuma/Udacity-Introduction-to-Python-Programming
|
6aea1e58507fea253c9c52cc74a2b01945637d83
|
2add41df7d6dafddf55919bcb7a7f7e4ac2cdef0
|
refs/heads/main
| 2023-03-23T11:09:34.295554
| 2021-03-15T01:56:56
| 2021-03-15T01:56:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
points = 174 # use this input to make your submission
# write your if statement here
if points <= 50:
result = "Congratulations! You won a wooden rabbit!"
elif points <= 150:
result = "Oh dear, no prize this time."
elif points <= 180:
result = "Congratulations! You won a wafer-thin mint!"
else:
result = "Congratulations! You won a penguin!"
print(result)
|
[
"noreply@github.com"
] |
Elpuma.noreply@github.com
|
d6d6df2e0ad50395b7901a28aa7f55087680ae51
|
5eff9222e5e97abbc17aa564701135e996b833f0
|
/examples/basic.py
|
c073b9b0fcac4bc6ce9fe4f05273c3d9aa19f2d4
|
[
"Apache-2.0"
] |
permissive
|
standoff-nlp/standoffconverter
|
70bd5a4d8887f5bfafab6cabe4a4f6d03227bc8f
|
70b6e3699772d1a4466e3bfacf9566753f33a619
|
refs/heads/master
| 2022-05-11T01:19:01.610235
| 2022-04-28T16:18:00
| 2022-04-28T16:18:00
| 194,285,160
| 19
| 0
| null | 2021-03-10T19:26:23
| 2019-06-28T14:15:01
|
Python
|
UTF-8
|
Python
| false
| false
| 583
|
py
|
from lxml import etree
from standoffconverter import Standoff
input_xml = '''<TEI><teiHeader></teiHeader><text><body><p>1 2 3 4 5 6 7 9 10</p><p> 11 12 13 14</p></body></text></TEI>'''
if __name__ == "__main__":
print("INPUT XML:")
print(input_xml)
tree = etree.fromstring(input_xml)
so = Standoff(tree)
so.add_inline(
begin=4,
end=7,
tag="threefour",
attrib={"resp":"David Lassner"},
depth=None,
)
print("Collapsed view:")
print(so.collapsed_table)
new_xml = etree.tostring(so.text_el).decode("utf-8")
print("\n\n####\nOUTPUT XML")
print(new_xml)
|
[
"davidlassner@gmail.com"
] |
davidlassner@gmail.com
|
aeb7ae3dd549fed2106d4c5b03f296d41f1ab9db
|
4d2cc76dbceff9eded071ba542ab2a1dd8c19f7b
|
/bhp056/apps/mpepu_summary/classes/medication_summary.py
|
26500fa436f7d1446aff50d56c57b0adcbf10892
|
[] |
no_license
|
botswana-harvard/mpepu
|
5d436638b760150ed76ec223121f5ac7aeee1020
|
6aa29c91f4fab50782b27e5f55aa33b30aee1dd0
|
refs/heads/master
| 2021-01-16T23:15:44.335940
| 2016-08-12T14:30:17
| 2016-08-12T14:30:17
| 65,557,693
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,310
|
py
|
from edc.subject.registration.models import RegisteredSubject
from apps.mpepu_infant.models import InfantBirthFeed, InfantStudyDrugInit, InfantBirthArv, InfantStudyDrugItems, InfantFuMed, InfantFuNewMed, InfantArvProphMod
class MedicationSummary(object):
def __init__(self, **kwargs):
self.context = {}
self.subject_identifier = kwargs.get('subject_identifier')
self.context['section_name'] = kwargs.get('section_name')
self.context['search_name'] = kwargs.get('search_name')
self.context['template'] = 'medication_summary.html'
self.context['subject_identifier'] = self.subject_identifier
if self.subject_identifier:
self.context['registered_subject'] = RegisteredSubject.objects.get(subject_identifier=self.subject_identifier)
# CTX / Placebo
if InfantStudyDrugInit.objects.filter(infant_visit__appointment__registered_subject__subject_identifier__exact=self.subject_identifier):
infant_study_drug_init = InfantStudyDrugInit.objects.get(infant_visit__appointment__registered_subject__subject_identifier__exact=self.subject_identifier)
self.context['initiated'] = infant_study_drug_init.initiated
self.context['first_dose_date'] = infant_study_drug_init.first_dose_date
if InfantStudyDrugItems.objects.filter(inf_study_drug__infant_visit__appointment__registered_subject__subject_identifier__exact=self.subject_identifier):
self.context['study_medications'] = InfantStudyDrugItems.objects.filter(inf_study_drug__infant_visit__appointment__registered_subject__subject_identifier__exact=self.subject_identifier)
else:
self.context['study_medications']= None
# AZT / NVP
# supplied at dischare?
if InfantBirthArv.objects.filter(infant_visit__appointment__registered_subject__subject_identifier__exact=self.subject_identifier):
infant_birth_arv = InfantBirthArv.objects.get(infant_visit__appointment__registered_subject__subject_identifier__exact=self.subject_identifier)
self.context['azt_discharge_supply'] = infant_birth_arv.azt_discharge_supply
self.context['nvp_discharge_supply'] = infant_birth_arv.nvp_discharge_supply
# record
if InfantArvProphMod.objects.filter(infant_arv_proph__infant_visit__appointment__registered_subject__subject_identifier__exact=self.subject_identifier):
self.context['prophylaxis'] = InfantArvProphMod.objects.filter(infant_arv_proph__infant_visit__appointment__registered_subject__subject_identifier__exact=self.subject_identifier)
else:
self.context['prophylaxis']= None
# vaccinations
# at birth
if InfantBirthFeed.objects.filter(infant_visit__appointment__registered_subject__subject_identifier__exact=self.subject_identifier):
self.context['birth_vaccines'] = InfantBirthFeed.objects.filter(infant_visit__appointment__registered_subject__subject_identifier__exact=self.subject_identifier).order_by('infant_visit__report_datetime')
else:
self.context['birth_vaccines']= None
# during follow up
if InfantFuMed.objects.filter(infant_fu__infant_visit__appointment__registered_subject__subject_identifier__exact=self.subject_identifier):
self.context['vaccines'] = InfantFuMed.objects.filter(infant_fu__infant_visit__appointment__registered_subject__subject_identifier__exact=self.subject_identifier).order_by('infant_fu__infant_visit__report_datetime')
else:
self.context['vaccines']= None
if InfantFuNewMed.objects.filter(infant_fu__infant_visit__appointment__registered_subject__subject_identifier__exact=self.subject_identifier):
self.context['concomitant'] = InfantFuNewMed.objects.filter(infant_fu__infant_visit__appointment__registered_subject__subject_identifier__exact=self.subject_identifier).order_by('infant_fu__infant_visit__report_datetime')
else:
self.context['concomitant'] = None
|
[
"fchilisa@bhp.org.bw"
] |
fchilisa@bhp.org.bw
|
83eb54e9b676b8216417795e302ce1fbf7ddc511
|
dbbb7334b1f04b9be5a46ed30ed67a3d38539ef8
|
/koto - backend/backend/migrations/0001_initial.py
|
dc007ae96428759cebe981506835a870e23d5afb
|
[
"MIT"
] |
permissive
|
SirJAKfromSpace/Koto-FinanceManager
|
464fed8f4a7d05220ab4381f5795dc0813ff8aa8
|
bfec6f36d97b5986294e348bdf2c0b229449bb19
|
refs/heads/master
| 2022-11-15T10:33:08.469276
| 2020-07-14T12:46:48
| 2020-07-14T12:46:48
| 279,584,661
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,053
|
py
|
# Generated by Django 2.0.7 on 2018-07-29 17:32
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='googleCredentials',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('userId', models.CharField(max_length=500)),
('email', models.CharField(max_length=500)),
('dob', models.DateTimeField(verbose_name='birth date')),
('name', models.CharField(max_length=250)),
],
),
migrations.CreateModel(
name='kotoUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('monthlyExpencess', models.IntegerField(default=0)),
('profession', models.CharField(max_length=250)),
],
),
]
|
[
"jawad.aziz.khan.jak@gmail.com"
] |
jawad.aziz.khan.jak@gmail.com
|
4d37e886781e168f4fa656b21eb8ce963ed5de67
|
74dca9366302ba2fbf8cf11d8673ccf15b332d7e
|
/savitzky_golay.py
|
c48a6621cc33b579eb3987edd00131cb72086aa3
|
[
"MIT"
] |
permissive
|
aithc/phenology
|
f67b26823ca0f4bf0cfc1e9fd176faad99256172
|
b7dd182c992755d59ce294062e3a7d51c56d8b1e
|
refs/heads/main
| 2023-07-29T01:33:28.662971
| 2021-09-24T03:11:39
| 2021-09-24T03:11:39
| 409,524,807
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,640
|
py
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
def savitzky_golay(y, window_size, order, deriv=0):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techhniques.
This code has been taken from http://www.scipy.org/Cookbook/SavitzkyGolay
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.savefig('images/golay.png')
#plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError as error:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv]
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m, y, mode='valid')
if __name__ == "__main__":
mod_ref = np.loadtxt("../data/FuentesAndalucia_MOD09A1.txt", delimiter=";")
ndvi = (mod_ref[:,8] - mod_ref[:,7])/(mod_ref[:,8]+mod_ref[:,7])
plt.plot ( ndvi, 'k-', label="MOD09 NDVI")
ndvi_smooth = savitzky_golay(ndvi, window_size=11, order=2)
plt.plot ( ndvi_smooth, '-r', label="Smooth NDVI", lw=1.5)
plt.legend(loc='best' )
plt.grid ( True )
plt.show()
plt.savefig('images/golay.png')
|
[
"noreply@github.com"
] |
aithc.noreply@github.com
|
0e50ff1eadd9ee963b05df54a202d58a99597fc8
|
a868f5f506df840bdb7119a9106331c19def4978
|
/cheesepi/storage/__init__.py
|
b7530eb20c45d95326d608e63bd727487d1682d2
|
[
"Apache-2.0"
] |
permissive
|
k9ert/cheesepi
|
850ab48ea7d7093da31dc00840a82120c5299ed6
|
c6a57552a72c3e6b8875fbc909ecc92f9e53b8de
|
refs/heads/master
| 2022-04-05T10:53:30.356100
| 2016-05-26T07:09:41
| 2016-05-26T07:09:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 566
|
py
|
import dao
try:
import dao_mongo
except ImportError as e:
print "Missing Mongo python module (and GridFS and bson), use 'pip install pymongo'"
print str(e)
try:
import dao_influx08
except ImportError as e:
print "Missing InfluxDB python module, use 'pip install influxdb'"
print str(e)
try:
import dao_influx09
except ImportError as e:
print "Missing InfluxDB python module, use 'pip install influxdb'"
print str(e)
#try:
# import dao_mysql
#except ImportError as e:
# print "Missing MySQL python module, use 'pip install MySQL-python'"
# print str(e)
|
[
"liamjjmcnamara@gmail.com"
] |
liamjjmcnamara@gmail.com
|
bcfcf45fdf5cb262dd7c05b05cf43a3156cf4f98
|
3d0edb598db227d0a17c6fdb364e04d71e651fd9
|
/main.py
|
2ce42cf43ae8fdf2cfd99e0479a3f4fa58522041
|
[] |
no_license
|
alexandrocw/Geranda
|
e27b172750c7f45703b1cca22d353f11f09a0b66
|
cfa3bfeb98a7704cce04a0645573f29b76ccebc5
|
refs/heads/master
| 2023-01-05T05:49:18.592768
| 2020-11-02T17:15:07
| 2020-11-02T17:15:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 961
|
py
|
from kivy.app import App
from kivy.core.window import Window
from kivy.uix.boxlayout import BoxLayout
import re
Window.size = (360, 640)
class MainWindow(BoxLayout):
def login(self):
email = self.ids.login_email.text
password = self.ids.login_password.text
self.check_email(email)
self.check_password(password)
def check_password(self, password):
if(password == ''):
self.ids.login_label.text = 'ENTER PASSWORD'
else:
self.ids.login_label.text = ''
def check_email(self, email):
if re.match(r'[^@]+@[^@]+\.[^@]+', email) is None:
self.ids.login_label.text = 'NOT A VALID EMAIL OR PASSWORD'
else:
self.ids.login_label.text = ''
class Geranda(App):
def build(self):
self.icon = 'UI/assets/Icon/Geranda.png'
return MainWindow()
if __name__ == '__main__':
Geranda().run()
|
[
"noreply@github.com"
] |
alexandrocw.noreply@github.com
|
77ddc556f9f9f4193bdd41ba47683bbbc3b1dbd0
|
f426fa0f79eb40ffccd87ab4e36d190b5a614e4a
|
/HttpTrigger/__init__.py
|
0ca7e8855f54027dce9ca7fb091c36285414358e
|
[] |
no_license
|
OD1995/VideoIndexerFileSplitter
|
8386cba2cea4645db109608836f1e65df11bf54c
|
e0b38aed1f2606ae7e36d7575e938ca4eae70a1e
|
refs/heads/master
| 2023-03-15T17:32:00.538745
| 2021-03-08T12:35:03
| 2021-03-08T12:35:03
| 328,656,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 690
|
py
|
import logging
from MyFunctions import initial_function
import azure.functions as func
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
name = req.params.get('name')
## Do work
result = initial_function(
fileURL=,
container=
)
if name:
return func.HttpResponse(f"Hello, {name}. This HTTP triggered function executed successfully.")
else:
return func.HttpResponse(
"This HTTP triggered function executed successfully. Pass a name in the query string or in the request body for a personalized response.",
status_code=200
)
|
[
"oliverdernie1@googlemail.com"
] |
oliverdernie1@googlemail.com
|
7556655b4ed2df96fee3297369a1ee8ce7d18893
|
632e8ed762f9f694e8f72d4d65303b5246a11217
|
/Secondhand/models.py
|
1bd32ef1b12b7c85761bae9e532dca9ea2e9d1bb
|
[] |
no_license
|
yiyusheng/django
|
36537bedf7efd2db3e41809c898cdabe2af6c381
|
a8a79ef0323cd9234cec83735618940f63dfc2a4
|
refs/heads/master
| 2022-11-19T11:36:30.868697
| 2022-11-06T04:01:50
| 2022-11-06T04:01:50
| 97,066,542
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,874
|
py
|
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey has `on_delete` set to the desired behavior.
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from __future__ import unicode_literals
from django.db import models
class Advertiser(models.Model):
uname = models.CharField(max_length=100, blank=True, null=True)
create_time = models.DateTimeField(blank=True, null=True)
webname = models.CharField(max_length=20, blank=True, null=True)
week_count = models.CharField(max_length=100, blank=True, null=True)
month_count = models.CharField(max_length=500, blank=True, null=True)
update_time = models.DateTimeField(blank=True, null=True)
update_count = models.CharField(max_length=100, blank=True, null=True)
class Meta:
managed = False
db_table = 'advertiser'
unique_together = (('uname', 'webname'),)
class Secondhand(models.Model):
title = models.CharField(max_length=200)
uname = models.CharField(max_length=100, blank=True, null=True)
time = models.DateTimeField(blank=True, null=True)
reply_count = models.CharField(max_length=100, blank=True, null=True)
create_time = models.DateTimeField(blank=True, null=True)
webname = models.CharField(max_length=20)
url = models.CharField(max_length=100)
ext1 = models.CharField(max_length=500, blank=True, null=True)
ext2 = models.CharField(max_length=500, blank=True, null=True)
ext3 = models.CharField(max_length=500, blank=True, null=True)
ext4 = models.CharField(max_length=100, blank=True, null=True)
ext5 = models.CharField(max_length=100, blank=True, null=True)
update_count = models.IntegerField(blank=True, null=True)
update_time = models.DateTimeField(blank=True, null=True)
advertiser = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'secondHand'
unique_together = (('url', 'title', 'webname'),)
class WordSubscribe(models.Model):
id = models.AutoField(db_column='Id', primary_key=True) # Field name made lowercase.
time = models.DateTimeField(blank=True, null=True)
word = models.CharField(max_length=191, blank=True, null=True)
user = models.CharField(max_length=191, blank=True, null=True)
sckey = models.CharField(max_length=191, blank=True, null=True)
counts = models.IntegerField(blank=True, null=True)
enable = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'word_subscribe'
unique_together = (('sckey', 'word'),)
|
[
"yiyusheng.hust@gmail.com"
] |
yiyusheng.hust@gmail.com
|
916544b2804324ec10166fa74ee03b353482ab1a
|
76c027b2ef7e0e044d51f59fa707f7fcfccf4b6d
|
/src/code/lib/util/df_ops.py
|
de955c52ceb35aa766df857ea1833bb696695a45
|
[] |
no_license
|
dengl11/CS224N-Project-Machine-Reading
|
74bd211d9aaa196ea7d678c8c23e9c8036f40433
|
66756a0019b294ec4e2e048473f10115bda45bde
|
refs/heads/master
| 2021-09-10T08:00:26.015911
| 2018-03-22T14:07:25
| 2018-03-22T14:07:25
| 117,194,512
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
###########################################################
############### Dataframe Ops #################
###########################################################
from pandas import DataFrame
def mat2df(mat, index=None, columns=None):
"""return a dataframe from a np matrix
Args:
mat: np matrix
Return:
"""
return DataFrame(mat, index=index, columns=columns)
|
[
"dengl11@stanford.edu"
] |
dengl11@stanford.edu
|
41d4af172a2c1307c87d581829dd428d1cb0d413
|
16f173135e81215d05ee8f475c13a16e3796e1fa
|
/Introduction_to_TensorFlow/3.Neural_Networks/Initialization in TensorFlow.py
|
20fcc200c66e0b25e60ab43478413b929beaa860
|
[] |
no_license
|
jerry-mkpong/DataCamp
|
1b53821f1a32b48efdc8465251401721ba75bb56
|
10445bad35ef11567910ffab6ac70a980555a1b7
|
refs/heads/master
| 2022-11-11T03:57:21.923366
| 2020-06-28T17:36:10
| 2020-06-28T17:36:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 781
|
py
|
'''
A good initialization can reduce the amount of time needed to find the global minimum. In this exercise, we will initialize weights and biases for a neural network that will be used to predict credit card default decisions. To build intuition, we will use the low-level, linear algebraic approach, rather than making use of convenience functions and high-level keras operations. We will also expand the set of input features from 3 to 23. Several operations have been imported from tensorflow: Variable(), random(), and ones().
'''
# Define the layer 1 weights
w1 = Variable(random.normal([23, 7]))
# Initialize the layer 1 bias
b1 = Variable(ones([7]))
# Define the layer 2 weights
w2 = Variable(random.normal([7, 1]))
# Define the layer 2 bias
b2 = Variable(ones([0]))
|
[
"egorfollia@gmail.com"
] |
egorfollia@gmail.com
|
40c5e1b87e8a6311f8675e5c085bf69e41c79246
|
391f170a8c61e49fe6fcaea1e5942a02341398de
|
/appointments_scheduler/app/views.py
|
d252c80c9eb25ce8893693e3404ff4a9cc4a5044
|
[] |
no_license
|
bhargavaganti/appointments_scheduler
|
5755639ba4d23fd8c3b4a162e1726214bd19d992
|
b57ec02827041b504ca2eb38680f953f5350297d
|
refs/heads/master
| 2020-04-10T06:56:22.657184
| 2018-03-05T20:52:32
| 2018-03-05T20:52:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 865
|
py
|
from django.shortcuts import render
from .models import Appointment, Patient, Procedure
from .serializers import AppointmentSerializer, PatientSerializer, ProcedureSerializer
from rest_framework import viewsets
from rest_framework.response import Response
class AppointmentViewSet(viewsets.ModelViewSet):
""" API endpoint that allows appointments to be viewed or edited. """
queryset = Appointment.objects.all()
serializer_class = AppointmentSerializer
class PatientViewSet(viewsets.ModelViewSet):
""" API endpoint that allows patients to be viewed or edited. """
queryset = Patient.objects.all()
serializer_class = PatientSerializer
class ProcedureViewSet(viewsets.ModelViewSet):
""" API endpoint that allows procedures to be viewed or edited. """
queryset = Procedure.objects.all()
serializer_class = ProcedureSerializer
|
[
"viniciuschan@hotmail.com"
] |
viniciuschan@hotmail.com
|
2737ca89ceb64641f04634f6bd44fc8a8614e924
|
3d7745367787514fed13de826dc484ed822fe047
|
/qa/L0_config_search/config_generator.py
|
3a198ca04f1261d4ad9102ee04c3d9f00dc33de1
|
[
"Apache-2.0"
] |
permissive
|
ahiroto/model_analyzer
|
ce21011c732ed98831b2795bdce5e270fa57bfc9
|
4dbd47c0e71d66d90526d5523570e2c6f717d5bc
|
refs/heads/main
| 2023-06-12T13:10:58.066767
| 2021-06-25T17:52:49
| 2021-06-25T17:52:49
| 381,279,764
| 0
| 0
|
Apache-2.0
| 2021-06-29T07:39:07
| 2021-06-29T07:39:06
| null |
UTF-8
|
Python
| false
| false
| 5,306
|
py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import yaml
def _get_sweep_configs(profile_models):
sweep_configs = []
model_config = {
'run_config_search_disable': True,
'profile_models': {
model: {
'parameters': {
'concurrency': [1]
},
'model_config_parameters': {
'instance_group': [{
'count': [1, 2],
'kind': 'KIND_GPU'
}]
}
}
for model in profile_models
},
}
model_config['total_param'] = 2
model_config['total_param_remote'] = 1
model_config['total_models'] = 2
model_config['total_models_remote'] = 1
sweep_configs.append(model_config)
model_config = {
'run_config_search_max_concurrency': 2,
'run_config_search_max_instance_count': 2,
'run_config_search_max_preferred_batch_size': 2,
'profile_models': {
model: {
'model_config_parameters': {
'instance_group': [{
'count': [1, 2],
'kind': 'KIND_GPU'
}]
}
}
for model in profile_models
},
}
model_config['total_param'] = 4
model_config['total_param_remote'] = 2
model_config['total_models'] = 2
model_config['total_models_remote'] = 1
sweep_configs.append(model_config)
model_config = {
'run_config_search_max_concurrency': 2,
'run_config_search_max_instance_count': 2,
'run_config_search_max_preferred_batch_size': 2,
'profile_models': profile_models,
}
model_config['total_param_remote'] = 2
model_config['total_models_remote'] = 1
model_config['total_param'] = 16
model_config['total_models'] = 8
sweep_configs.append(model_config)
model_config = {
'run_config_search_max_concurrency': 2,
'run_config_search_max_instance_count': 2,
'run_config_search_max_preferred_batch_size': 1,
'profile_models': {
model: {
'parameters': {
'concurrency': [1]
},
}
for model in profile_models
},
}
model_config['total_param'] = 6
model_config['total_param_remote'] = 1
model_config['total_models'] = 6
model_config['total_models_remote'] = 1
sweep_configs.append(model_config)
model_config = {
'run_config_search_max_concurrency': 2,
'run_config_search_max_instance_count': 2,
'run_config_search_max_preferred_batch_size': 2,
'profile_models': {
model: {
'parameters': {
'concurrency': [1]
},
'model_config_parameters': {
'instance_group': [{
'count': [1, 2],
'kind': 'KIND_GPU'
}]
}
}
for model in profile_models
},
}
model_config['total_param'] = 2
model_config['total_param_remote'] = 1
model_config['total_models'] = 2
model_config['total_models_remote'] = 1
sweep_configs.append(model_config)
return sweep_configs
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-m',
'--profile-models',
type=str,
required=True,
help='The models to be profiled for this test')
args = parser.parse_args()
for i, configuration in enumerate(
_get_sweep_configs(args.profile_models.split(','))):
total_param = configuration['total_param']
total_param_remote = configuration['total_param_remote']
total_models_remote = configuration['total_models_remote']
total_models = configuration['total_models']
del configuration['total_param']
del configuration['total_param_remote']
del configuration['total_models']
del configuration['total_models_remote']
with open(f'./config-{i}-param.txt', 'w') as file:
file.write(str(total_param))
with open(f'./config-{i}-param-remote.txt', 'w') as file:
file.write(str(total_param_remote))
with open(f'./config-{i}-models.txt', 'w') as file:
file.write(str(total_models))
with open(f'./config-{i}-models-remote.txt', 'w') as file:
file.write(str(total_models_remote))
with open(f'./config-{i}.yml', 'w') as file:
yaml.dump(configuration, file)
|
[
"noreply@github.com"
] |
ahiroto.noreply@github.com
|
47d8946854e404b39050e195de9d3b4589543e31
|
cce27413adaf346dba823ae5f02e275f28e149b2
|
/src/project_name/settings/test.py
|
f14bd1723faf88b971667e966f009e7fb9938c4a
|
[] |
no_license
|
viktortat/django-project-template
|
a98bdf7f6803c466af395f2238ee009012f84d69
|
d44194d7d62a96b57d58b032255b769ae2c5710d
|
refs/heads/master
| 2021-06-14T14:52:03.588615
| 2017-03-08T16:39:39
| 2017-03-08T16:39:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
# coding: utf-8
import dj_database_url
from {{ project_name }}.settings.base import *
DEBUG = True
SECRET_KEY = 'test_key'
DATABASES = {
'default': dj_database_url.parse(e.get('DJANGO_DB')),
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
INTERNAL_IPS = ['127.0.0.1']
|
[
"n10101010@gmail.com"
] |
n10101010@gmail.com
|
22caa0b55a0e30eb1f51ce2482e0386226a9a691
|
0bed02f5b5f4747827e37ee795a3da065b640136
|
/argo/workflows/client/models/v1alpha1_template_ref.py
|
3f97a48eac30bfac7baebd5fb77404424581595d
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
hadim/argo-client-python
|
eb5c91807ccf0caf14c15fe809b256f15a6cfcd3
|
4264e063e31865c55418e0b242dd21ba6d19ed64
|
refs/heads/master
| 2022-07-31T09:43:41.814450
| 2020-02-21T09:11:30
| 2020-02-21T09:11:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,216
|
py
|
# coding: utf-8
"""
Argo
Python client for Argo Workflows # noqa: E501
OpenAPI spec version: 2.5.0-rc10
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class V1alpha1TemplateRef(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'runtime_resolution': 'bool',
'template': 'str'
}
attribute_map = {
'name': 'name',
'runtime_resolution': 'runtimeResolution',
'template': 'template'
}
def __init__(self, name=None, runtime_resolution=None, template=None): # noqa: E501
"""V1alpha1TemplateRef - a model defined in Swagger""" # noqa: E501
self._name = None
self._runtime_resolution = None
self._template = None
self.discriminator = None
if name is not None:
self.name = name
if runtime_resolution is not None:
self.runtime_resolution = runtime_resolution
if template is not None:
self.template = template
@property
def name(self):
"""Gets the name of this V1alpha1TemplateRef. # noqa: E501
Name is the resource name of the template. # noqa: E501
:return: The name of this V1alpha1TemplateRef. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1alpha1TemplateRef.
Name is the resource name of the template. # noqa: E501
:param name: The name of this V1alpha1TemplateRef. # noqa: E501
:type: str
"""
self._name = name
@property
def runtime_resolution(self):
"""Gets the runtime_resolution of this V1alpha1TemplateRef. # noqa: E501
RuntimeResolution skips validation at creation time. By enabling this option, you can create the referred workflow template before the actual runtime. # noqa: E501
:return: The runtime_resolution of this V1alpha1TemplateRef. # noqa: E501
:rtype: bool
"""
return self._runtime_resolution
@runtime_resolution.setter
def runtime_resolution(self, runtime_resolution):
"""Sets the runtime_resolution of this V1alpha1TemplateRef.
RuntimeResolution skips validation at creation time. By enabling this option, you can create the referred workflow template before the actual runtime. # noqa: E501
:param runtime_resolution: The runtime_resolution of this V1alpha1TemplateRef. # noqa: E501
:type: bool
"""
self._runtime_resolution = runtime_resolution
@property
def template(self):
"""Gets the template of this V1alpha1TemplateRef. # noqa: E501
Template is the name of referred template in the resource. # noqa: E501
:return: The template of this V1alpha1TemplateRef. # noqa: E501
:rtype: str
"""
return self._template
@template.setter
def template(self, template):
"""Sets the template of this V1alpha1TemplateRef.
Template is the name of referred template in the resource. # noqa: E501
:param template: The template of this V1alpha1TemplateRef. # noqa: E501
:type: str
"""
self._template = template
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(V1alpha1TemplateRef, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1TemplateRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"macermak@redhat.com"
] |
macermak@redhat.com
|
08d69aed5a55722c5d920579dbf378c7c403aab3
|
5419873e177757f9e6c0e2c474f95e4294701d77
|
/numpy_w2v.py
|
bd78f6a718ad1cfa2c3c6894456e85d927c4af92
|
[] |
no_license
|
Alucardmini/aug_code
|
db196afad42e92c423b68a08a6d206f61a32e18c
|
6c2ca87083c047e4aa1f5889cc64900f1e9c38a1
|
refs/heads/master
| 2023-07-10T12:21:00.321690
| 2021-08-22T07:06:27
| 2021-08-22T07:06:27
| 305,087,850
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,704
|
py
|
# -*- coding:utf-8 -*-
# @version: 1.0
# @author: wuxikun
# @date: '2020/8/16 4:04 PM'
from collections import defaultdict
import numpy as np
text = "natural language processing and machine laarning is fun and exciting"
corpus = [[word.lower() for word in text.split()]]
settings = {
'window_size': 2,
'n': 10,
'epochs': 500,
'learning_rate': 0.05
}
getW1 = [[0.236, -0.962, 0.686, 0.785, -0.454, -0.833, -0.744, 0.677, -0.427, -0.066],
[-0.907, 0.894, 0.225, 0.673, -0.579, -0.428, 0.685, 0.973, -0.070, -0.811],
[-0.576, 0.658, -0.582, -0.112, 0.662, 0.051, -0.401, -0.921, -0.158, 0.529],
[0.517, 0.436, 0.092, -0.835, -0.444, -0.905, 0.879, 0.303, 0.332, -0.275],
[0.859, -0.890, 0.651, 0.185, -0.511, -0.456, 0.377, -0.274, 0.182, -0.237],
[0.368, -0.867, -0.301, -0.222, 0.630, 0.808, 0.088, -0.902, -0.450, -0.408],
[0.728, 0.277, 0.439, 0.138, -0.943, -0.409, 0.687, -0.215, -0.807, 0.612],
[0.593, -0.699, 0.020, 0.142, -0.638, -0.633, 0.344, 0.868, 0.913, 0.429],
[0.447, -0.810, -0.061, -0.495, 0.794, -0.064, -0.817, -0.408, -0.286, 0.149]]
getW2 = [[-0.868, -0.406, -0.288, -0.016, -0.560, 0.179, 0.099, 0.438, -0.551],
[-0.395, 0.890, 0.685, -0.329, 0.218, -0.852, -0.919, 0.665, 0.968],
[-0.128, 0.685, -0.828, 0.709, -0.420, 0.057, -0.212, 0.728, -0.690],
[0.881, 0.238, 0.018, 0.622, 0.936, -0.442, 0.936, 0.586, -0.020],
[-0.478, 0.240, 0.820, -0.731, 0.260, -0.989, -0.626, 0.796, -0.599],
[0.679, 0.721, -0.111, 0.083, -0.738, 0.227, 0.560, 0.929, 0.017],
[-0.690, 0.907, 0.464, -0.022, -0.005, -0.004, -0.425, 0.299, 0.757],
[-0.054, 0.397, -0.017, -0.563, -0.551, 0.465, -0.596, -0.413, -0.395],
[-0.838, 0.053, -0.160, -0.164, -0.671, 0.140, -0.149, 0.708, 0.425],
[0.096, -0.995, -0.313, 0.881, -0.402, -0.631, -0.660, 0.184, 0.487]]
class word2vec():
def __init__(self):
self.n = settings['n']
self.lr = settings['learning_rate']
self.epochs = settings['epochs']
self.windows = settings['window_size']
def generate_training_data(self, settings, corpus):
word_counts = defaultdict(int)
for row in corpus:
for word in row:
word_counts[word] += 1
self.v_count = len(word_counts.keys())
self.words_list = list(word_counts.keys())
self.word_index = dict((word, i) for i, word in enumerate(self.words_list))
self.index_word = dict((i, word) for i, word in enumerate(self.words_list))
training_data = []
for sentence in corpus:
sent_len = len(sentence)
for i, word in enumerate(sentence):
w_target = self.word2onehot(sentence[i])
w_context = []
for j in range(i - self.windows, i + self.windows + 1):
if j != i and sent_len - 1 >= j >= 0:
w_context.append(self.word2onehot(sentence[j]))
training_data.append([w_target, w_context])
return np.array(training_data)
def word2onehot(self, word):
# word_vec = [0 for i in range(0, self.v_count)]
word_vec = [0 for i in range(0, int(self.v_count))]
word_index = self.word_index[word]
word_vec[word_index] = 1
return word_vec
def train(self, training_data):
self.w1 = np.random.uniform(-1, 1, (self.v_count, self.n))
self.w2 = np.random.uniform(-1, 1, (self.n, self.v_count))
# self.w1 = np.array(getW1)
# self.w2 = np.array(getW2)
for i in range(self.epochs):
self.loss = 0
for w_t, w_c in training_data:
y_pred, h, u = self.forward_pass(w_t)
EI = np.sum([np.subtract(y_pred, word) for word in w_c], axis=0)
self.backprop(EI, h, w_t)
self.loss += -np.sum([u[word.index(1)] for word in w_c]) + len(w_c) * np.log(np.sum(np.exp(u)))
print('Epoch:', i, "Loss:", self.loss)
def forward_pass(self, x):
h = np.dot(x, self.w1)
u = np.dot(h, self.w2)
y_c = self.softmax(u)
return y_c, h, u
def softmax(self, x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
def backprop(self, e, h, x):
d1_dw2 = np.outer(h, e)
d1_dw1 = np.outer(x, np.dot(self.w2, e.T))
self.w1 = self.w1 - (self.lr * d1_dw1)
self.w2 = self.w2 - (self.lr * d1_dw2)
if __name__ == '__main__':
w2v = word2vec()
training_data = w2v.generate_training_data(settings, corpus)
w2v.train(training_data)
|
[
"xikun.wu@ushow.media"
] |
xikun.wu@ushow.media
|
b23c8e71124c91224c393e431c3503c0fbc2d7a1
|
e07ba3b985cf6d3c63bbd6c3b01468d597fd2d80
|
/python-project-euler-100/p024.py
|
3d9d0b437dec3d5cfda98dc7ceaa41ab9921b637
|
[
"Apache-2.0"
] |
permissive
|
TalaatHarb/project-euler-100
|
093e62ff35671b5523e25d6eeea29dd3dd337bed
|
e474e08ec1a0a4aabf1c4c6cacf55b4e021fa847
|
refs/heads/develop
| 2023-07-19T21:56:19.846435
| 2022-08-04T23:08:28
| 2022-08-04T23:08:28
| 233,001,543
| 2
| 0
|
Apache-2.0
| 2022-08-03T16:20:20
| 2020-01-10T08:30:21
|
Java
|
UTF-8
|
Python
| false
| false
| 821
|
py
|
from math import trunc
from Solution import Solution
class P024(Solution):
# Factoradic method
def string_permutation(self, n, str):
s = []
result = ""
str = list(str)
n = n - 1
for i in range(1, len(str) + 1):
s.append(n % i)
n = int(n / i)
for i in range(len(str)):
a = s[-1]
result += str[a]
for j in range(a, len(str) - 1):
str[j] = str[j + 1]
str[j + 1] = '\0'
s.pop()
return result
def solve(self):
self.problem_number = 24
n = 1000000
data = '0123456789'
result = int(self.string_permutation(n, data))
return result
def main():
P024().run()
if __name__ == "__main__":
main()
|
[
"mohamed.harb@cegedim.com"
] |
mohamed.harb@cegedim.com
|
ad1091aea23925c648dd23ae33ce6111e0c0e8e5
|
b51434d9ceca0a18a9ea5ab638bfb3204985cf7e
|
/selflib/azrmath.py
|
84cf0759f11faeb54ae8a990fce643dc7eda06bd
|
[] |
no_license
|
anqiaoqiao/AZR_Pushing_Toolbox
|
fcdc12c0be3e6217a18f6e37004d3174de93e6b8
|
04bd1ba72a4f5eece71c1ea35b3e68fddfdf1920
|
refs/heads/main
| 2023-03-18T07:03:53.608711
| 2021-03-13T02:50:29
| 2021-03-13T02:50:29
| 347,255,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,658
|
py
|
def ship_xp(level, isnormal):
# desribe: input ship level, output ship total xp.
# ship XP function for azur lane JP/EN version
# convert level to XP <1-120>
# unnormal ship: DR and UR
exp = 0
if isnormal:
if 1 <= level <= 40:
an = 100 * (level - 1)
exp = int(0.5 * an * level)
elif 41 <= level <= 60:
an = 4000 + 200 * (level - 41)
exp = int(0.5 * (4000 + an) * (level - 40)) + 78000
elif 61 <= level <= 69:
an = 8000 + 300 * (level - 61)
exp = int(0.5 * (8000 + an) * (level - 60)) + 196000
elif level == 70:
exp = 289500
elif 71 <= level <= 79:
an = 12100 + 440 * (level - 71)
exp = int(0.5 * (12100 + an) * (level - 70)) + 289500
elif level == 80:
exp = 430300
elif 81 <= level <= 89:
an = 17250 + 575 * (level - 81)
exp = int(0.5 * (17250 + an) * (level - 80)) + 430300
elif level == 90:
exp = 628675
elif 91 <= level <= 99:
l = [1, 1, 2, 2, 4, 5, 5, 20, 72]
exp = 628675
an = 24000
for i in range(level - 90):
exp += an
an += 1200 * l[i]
elif level == 100:
exp = 1120675
elif 101 <= level <= 104:
an = 70000 + 2000 * (level - 101)
exp = int(0.5 * (70000 + an) * (level - 100)) + 1120675
elif level == 105:
exp = 1490675
elif 106 <= level <= 110:
an = 85000 + 12000 * (level - 106)
exp = int(0.5 * (85000 + an) * (level - 105)) + 1490675
elif 111 <= level <= 120:
l = [18, 18, 18, 18, 18, 21, 21, 21, 21, 0]
exp = 2035675
an = 145000
for i in range(level - 110):
exp += an
an += 1000 * l[i]
else:
return -1
else:
if 1 <= level <= 40:
an = 120 * (level - 1)
exp = int(0.5 * an * level)
elif 41 <= level <= 60:
an = 4800 + 240 * (level - 41)
exp = int(0.5 * (4800 + an) * (level - 40)) + 93600
elif 61 <= level <= 69:
an = 9600 + 360 * (level - 61)
exp = int(0.5 * (9600 + an) * (level - 60)) + 235200
elif level == 70:
exp = 347400
elif 71 <= level <= 79:
an = 13200 + 480 * (level - 71)
exp = int(0.5 * (13200 + an) * (level - 70)) + 347400
elif level == 80:
exp = 501000
elif 81 <= level <= 89:
an = 18000 + 600 * (level - 81)
exp = int(0.5 * (18000 + an) * (level - 80)) + 501000
elif level == 90:
exp = 708000
elif 91 <= level <= 99:
l = [1, 1, 2, 2, 4, 5, 5, 20, 72]
exp = 708000
an = 26000
for i in range(level - 90):
exp += an
an += 1300 * l[i]
elif level == 100:
exp = 1241000
elif 101 <= level <= 104:
an = 84000 + 2400 * (level - 101)
exp = int(0.5 * (84000 + an) * (level - 100)) + 1241000
elif level == 105:
exp = 1685000
elif 106 <= level <= 110:
an = 102000 + 14400 * (level - 106)
exp = int(0.5 * (102000 + an) * (level - 105)) + 1685000
elif 111 <= level <= 120:
l = [216, 216, 216, 216, 216, 252, 252, 252, 252, 0]
exp = 2339000
an = 174000
for i in range(level - 110):
exp += an
an += 100 * l[i]
else:
return -1
return exp
def commander_supply(level):
# desribe: input commander level, output oil and gold limitation
if 1 <= level <= 200:
oil = 1000 + 100 * (level - 1)
gold = 6000 + 600 * (level - 1)
return oil, gold
else:
return -1
def chapter_xp(cht, sec, isemo=True, ismvp=True, isflag=True):
# describe: input chatper index, output total xp and average xp per acess
# defualt: Rank S victory, normal difficulty, average enemy level
# defualt: auto-battle mode
# Event chatper: A-14, B-15, C-16, D-17, SP-18
basic_xp = [
[240, 456, 492, 756], # chapter 1
[666, 949, 1033, 1120], # chapter 2
[1205, 1290, 1375, 1462], # chapter 3
[1545, 1637, 1718, 2218], # chapter 4
[2324, 2430, 2522, 2641], # chapter 5
[2744, 2852, 2955, 3635], # chapter 6
[3719, 3803, 3886, 3970], # chapter 7
[3236, 3306, 3376, 3438], # chapter 8
[4172, 4255, 4337, 4420], # chapter 9
[5222, 5273, 5368, 5463], # chapter 10
[6947, 7012, 7065, 7131], # chapter 11
[7255, 7310, 7372, 7429], # chapter 12
[7545, 7600, 7660, 8769], # chapter 13
[784, 1029, 1212], # chapter A
[1579, 2298, 2528], # chapter B
[2676, 2859, 3619], # chapter C
[4053, 4951, 5206], # chapter D
[6377] # chapter sp
]
basic_acess = [
[2, 3, 3, 4], # chapter 1
[3, 4, 4, 4], # chapter 2
[4, 4, 4, 4], # chapter 3
[4, 4, 4, 5], # chapter 4
[5, 5, 5, 5], # chapter 5
[5, 5, 5, 6], # chapter 6
[6, 6, 6, 6], # chapter 7
[5, 5, 5, 5], # chapter 8
[6, 6, 6, 6], # chapter 9
[7, 7, 7, 7], # chapter 10
[7, 7, 7, 7], # chapter 11
[7, 7, 7, 7], # chapter 12
[7, 7, 7, 8], # chapter 13
[4, 5, 5], # chapter A
[5, 6, 6], # chapter B
[5, 5, 6], # chapter C
[6, 7, 7], # chapter D
[8] # chapter sp
]
if isemo:
ratio_emo = 1.2
else:
ratio_emo = 1
if ismvp:
ratio_mvp = 2
else:
ratio_mvp = 1
if isflag:
ratio_flag = 1.5
else:
ratio_flag = 1.5
xp = basic_xp[cht - 1][sec - 1] * ratio_emo * ratio_mvp * ratio_flag * 1.2
xp_per_acess = xp / basic_acess[cht - 1][sec - 1]
return round(xp, 2), round(xp_per_acess, 2)
def chapter_diffculty(cht, sec, aver_level, antiair, air_ctrl, front_num, back_num):
# defualt: cycle mode, normal mode
recommend_air_ctrl = [
[120, 120, 120, 120], # chapter 1
[120, 120, 120, 120], # chapter 2
[120, 132, 152, 184], # chapter 3
[128, 144, 164, 188], # chapter 4
[224, 260, 308, 360], # chapter 5
[236, 268, 304, 340], # chapter 6
[388, 444, 500, 560], # chapter 7
[616, 676, 740, 804], # chapter 8
[876, 952, 1032, 1108], # chapter 9
[1204, 1300, 1400, 1500], # chapter 10
[1584, 1676, 1768, 1864], # chapter 11
[1968, 2076, 2184, 2296], # chapter 12
[2408, 2548, 2692, 2832], # chapter 13
[120, 176, 291], # chapter A
[249, 395, 384], # chapter B
[811, 956, 1185], # chapter C
[1133, 1570, 1539], # chapter D
[2640] # chapter sp
]
enemy_level = [
[2, 4, 6, 10], # chapter 1
[12, 15, 18, 21], # chapter 2
[24, 27, 30, 33], # chapter 3
[36, 39, 42, 45], # chapter 4
[48, 51, 54, 57], # chapter 5
[60, 63, 66, 69], # chapter 6
[72, 74, 76, 78], # chapter 7
[80, 82, 84, 86], # chapter 8
[89, 91, 93, 95], # chapter 9
[96, 98, 100, 102], # chapter 10
[104, 105, 106, 107], # chapter 11
[108, 110, 112, 114], # chapter 12
[116, 118, 120, 121], # chapter 13
[25, 30, 35], # chapter A
[45, 55, 60], # chapter B
[75, 80, 85], # chapter C
[95, 100, 105], # chapter D
[110] # chapter sp
]
# damage decrease: antiair: higher is easy
dd_antiair = (antiair) / (150 + antiair)
# airctrl gain: higher is easy
airctrl_diff = (air_ctrl - recommend_air_ctrl[cht - 1][sec - 1])
dd_airctrl = 1 + (airctrl_diff / (recommend_air_ctrl[cht - 1][sec - 1]) * 0.5)
# level gain: higher is easy
level_diff = (aver_level - enemy_level[cht - 1][sec - 1])
if 0 <= level_diff <= 25:
dd_level = 0.04 * level_diff
elif level_diff > 25:
dd_level = 1
else:
dd_level = 0
# burn or airforce gain: higher is easy
if 10 <= cht <= 13:
dd_burn_air = 0
else:
dd_burn_air = 1
# fleetscale gain
dd_fleetscale = (front_num + back_num) / 6
# difficulty_ratio: lower is safe
difficulty_ratio = 1 / (dd_antiair + dd_airctrl + dd_level + dd_fleetscale + dd_burn_air)
return round(difficulty_ratio, 2)
|
[
"ryuuseiku.home@gmail.com"
] |
ryuuseiku.home@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.