blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d2ab74e47619d9d6edf2602f1e1354117feaf75f
|
d3f831eb7eb33f5b86f957f6a156fc5c02cddc98
|
/mainapp/migrations/0010_auto_20201031_2220.py
|
6fcda1f6028646c6a5ea78cbf91ed8b03f804759
|
[] |
no_license
|
anujsngh/QUAD-twt_oct
|
fb1835c95880c27fc0f3ea7d81b5b71cc783170a
|
12101be1d85fdda9a4bb317e4713de794fd416d5
|
refs/heads/master
| 2023-07-21T19:18:09.820788
| 2020-12-17T11:23:15
| 2020-12-17T11:23:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 578
|
py
|
# Generated by Django 3.1.2 on 2020-10-31 16:50
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('mainapp', '0009_auto_20201031_2214'),
]
operations = [
migrations.AlterField(
model_name='links',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"yuvrajmotiramani5115@gmail.com"
] |
yuvrajmotiramani5115@gmail.com
|
416f71a810b96bd8c19cec485b09432fb88b9536
|
949c5eae4bb5057c1ef8db8a34042b87146cd7f3
|
/03_list_ranges_tuples/03_ranges.py
|
3118403df72a194d1b49bf97db04a23ef35458ed
|
[] |
no_license
|
JesusEduardo2028/python01
|
eb1fbcfce3dbd19c5937ef2563182ac70b996112
|
d60d67b1629095ec2de0e325bd9da541fc91b7c6
|
refs/heads/master
| 2020-05-17T05:07:39.212717
| 2019-04-25T23:28:42
| 2019-04-25T23:28:42
| 183,523,415
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
my_list = list(range(10))
print(my_list)
even = list(range(0, 10, 2))
odd = list(range(1, 10, 2))
print(even)
print(odd)
# Ranges represents sequences that follow a parent, they are not lists but can be used to iterate processes
small_decimals = range(0, 10)
print(small_decimals)
print(small_decimals.index(3))
odd = range(1, 10000, 2)
#print(odd)
print(odd.index(985))
print(odd[985])
|
[
"omnibnk@OmniBnks-MacBook-Air-2.local"
] |
omnibnk@OmniBnks-MacBook-Air-2.local
|
69010e3b6fdf85acdc542e3cc173cc2efe29689c
|
784eb69213d850280d468798725578910295b30e
|
/management/commands/django2hugo.py
|
942f8832c5b7dbd1d4905bfa6b3af1b64f358a71
|
[] |
no_license
|
hypertexthero/hypertexthero.com
|
e2f21e32ab3844c3b9e88f64d55c7fe9197d82db
|
229abd3e4cbbe4b9d0d91c123b76840ee7810f50
|
refs/heads/master
| 2020-04-06T07:04:56.590886
| 2016-06-29T02:53:58
| 2016-06-29T02:53:58
| 7,525,136
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,848
|
py
|
# _*_ coding:utf-8 _*_
# https://github.com/sjkingo/mezzanine2jekyll
from django.core.management.base import BaseCommand
# http://stackoverflow.com/a/15364584/412329
from django.utils.encoding import smart_str, smart_unicode
from hth.models import Entry
import os
import re
class Command(BaseCommand):
"""
Usage: $ python ../manage.py django2hugo /chosen/output/directory/
"""
help = 'Export hth logbook entries as Hugo markdown files'
def add_arguments(self, parser):
parser.add_argument('output_dir', help='Where to put the outputted Hugo files')
def handle(self, *args, **options):
for post in Entry.objects.all():
header = {
# 'layout': 'notebook',
'title': post.title.replace(':', ''),
# =todo: make date format to 2005-03-22T00:00:00Z - http://stackoverflow.com/a/25120668/412329
'date': post.pub_date,
'what': '\n - ' + '\n - '.join([str(kw) for kw in post.tags.all()]),
'kind': post.kind,
'linkedurl': post.url
}
# output_dir = '/Users/simon/Desktop/django2hugo_output'
output_dir = args[0]
filename = '{d.year:02}-{d.month:02}-{d.day:02}-{slug}.markdown'.format(
d=post.pub_date, slug=post.slug)
# content = post.body.decode('latin-1').decode('utf-8')
content = post.body.encode('utf-8').replace('\r', '')
# Write out the file
with open(os.path.join(output_dir, filename), 'w') as fp:
fp.write('---' + os.linesep)
for k, v in header.items():
fp.write(smart_str('%s: %s%s' % (k, v, os.linesep)))
fp.write('---' + os.linesep)
fp.write(smart_str(content))
|
[
"simon@hypertexthero.com"
] |
simon@hypertexthero.com
|
becc7b68dedd044942e8a0ad78c43194e7a0ec46
|
130f9e059b5f0341bc9ccfdd50be6e7baf1aaf9d
|
/Decoratore/property_deco.py
|
c13e87c000d838af04d37c78a20cf34ed86d3307
|
[] |
no_license
|
Rschanania/core_python
|
d331d9fd3537f95570c304a65d2fcdb739567efb
|
09b32a7eabb53512e98d12ed1db18d0f140bd512
|
refs/heads/master
| 2020-12-28T17:16:32.693437
| 2020-02-05T10:10:03
| 2020-02-05T10:10:03
| 238,419,521
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,563
|
py
|
# class student:
# def __init__(self,first_name,last_name):
# self.first_name=first_name
# self.last_name=last_name
# @property
# def email(self):
# if self.first_name==None or self.last_name==None:
# return f"Email is not set please give an valis email"
# else:
# return (f"The email Address is :-{self.first_name}.{self.last_name}@kaamkr.com")
# @email.setter
# def email(self,string):
# print("Setter is calling")
# name=string.split("@")[0]
# self.first_name=name.split(".")[0]
# self.last_name=name.split(".")[-1]
# @email.deleter
# def email(self):
# self.first_name=None
# self.last_name=None
#
#
# ravi=student("Ravinder","Singh")
# print(ravi.email)
# ravi.email="Ravi.chanania@Kaamkr.com"
# del ravi.email
# print(ravi.email)
#
class student:
def __init__(self,first_name,last_name):
self.first_name=first_name
self.last_name=last_name
def getValue(self):
return f"{self.first_name}.{self.last_name}@codeWithharry.com"
def setValue(self,value):
name=value.split("@")[0]
self.first_name=name.split(".")[0]
self.last_name=name.split(".")[-1]
def delValue(self):
print("Deleting email Address")
self.first_name=None
self.last_name=None
print("Email Deleted ")
email=property(getValue,setValue,delValue)
ravi=student("Ravi","Singh")
print(ravi.email)
ravi.email="Vijy.Pankaj@gmail.com"
print(ravi.email)
del ravi.email
|
[
"chananias1@gmail.com"
] |
chananias1@gmail.com
|
25c5fd4e74b248a9a197688b3f0f66449b792c87
|
d8cbe9ce0469f72b8929af01538b6ceddff10a38
|
/homeassistant/components/devolo_home_network/config_flow.py
|
23ae1602d965d55f52b04b3d43b5179d950d669a
|
[
"Apache-2.0"
] |
permissive
|
piitaya/home-assistant
|
9c1ba162dac9604e4d43e035e74bad7bba327f0b
|
48893738192431f96966998c4ff7a3723a2f8f4a
|
refs/heads/dev
| 2023-03-07T16:13:32.117970
| 2023-01-10T17:47:48
| 2023-01-10T17:47:48
| 172,578,293
| 3
| 1
|
Apache-2.0
| 2023-02-22T06:15:56
| 2019-02-25T20:19:40
|
Python
|
UTF-8
|
Python
| false
| false
| 5,280
|
py
|
"""Config flow for devolo Home Network integration."""
from __future__ import annotations
from collections.abc import Mapping
import logging
from typing import Any
from devolo_plc_api.device import Device
from devolo_plc_api.exceptions.device import DeviceNotFound
import voluptuous as vol
from homeassistant import config_entries, core
from homeassistant.components import zeroconf
from homeassistant.const import CONF_HOST, CONF_IP_ADDRESS, CONF_NAME, CONF_PASSWORD
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers.httpx_client import get_async_client
from .const import DOMAIN, PRODUCT, SERIAL_NUMBER, TITLE
_LOGGER = logging.getLogger(__name__)
STEP_USER_DATA_SCHEMA = vol.Schema({vol.Required(CONF_IP_ADDRESS): str})
STEP_REAUTH_DATA_SCHEMA = vol.Schema({vol.Optional(CONF_PASSWORD): str})
async def validate_input(
hass: core.HomeAssistant, data: dict[str, Any]
) -> dict[str, str]:
"""Validate the user input allows us to connect.
Data has the keys from STEP_USER_DATA_SCHEMA with values provided by the user.
"""
zeroconf_instance = await zeroconf.async_get_instance(hass)
async_client = get_async_client(hass)
device = Device(data[CONF_IP_ADDRESS], zeroconf_instance=zeroconf_instance)
await device.async_connect(session_instance=async_client)
await device.async_disconnect()
return {
SERIAL_NUMBER: str(device.serial_number),
TITLE: device.hostname.split(".", maxsplit=1)[0],
}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for devolo Home Network."""
VERSION = 1
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle the initial step."""
errors: dict = {}
if user_input is None:
return self.async_show_form(
step_id="user", data_schema=STEP_USER_DATA_SCHEMA, errors=errors
)
try:
info = await validate_input(self.hass, user_input)
except DeviceNotFound:
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
await self.async_set_unique_id(info[SERIAL_NUMBER], raise_on_progress=False)
self._abort_if_unique_id_configured()
user_input[CONF_PASSWORD] = ""
return self.async_create_entry(title=info[TITLE], data=user_input)
return self.async_show_form(
step_id="user", data_schema=STEP_USER_DATA_SCHEMA, errors=errors
)
async def async_step_zeroconf(
self, discovery_info: zeroconf.ZeroconfServiceInfo
) -> FlowResult:
"""Handle zeroconf discovery."""
if discovery_info.properties["MT"] in ["2600", "2601"]:
return self.async_abort(reason="home_control")
await self.async_set_unique_id(discovery_info.properties["SN"])
self._abort_if_unique_id_configured()
self.context[CONF_HOST] = discovery_info.host
self.context["title_placeholders"] = {
PRODUCT: discovery_info.properties["Product"],
CONF_NAME: discovery_info.hostname.split(".")[0],
}
return await self.async_step_zeroconf_confirm()
async def async_step_zeroconf_confirm(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow initiated by zeroconf."""
title = self.context["title_placeholders"][CONF_NAME]
if user_input is not None:
data = {
CONF_IP_ADDRESS: self.context[CONF_HOST],
CONF_PASSWORD: "",
}
return self.async_create_entry(title=title, data=data)
return self.async_show_form(
step_id="zeroconf_confirm",
description_placeholders={"host_name": title},
)
async def async_step_reauth(self, data: Mapping[str, Any]) -> FlowResult:
"""Handle reauthentication."""
self.context[CONF_HOST] = data[CONF_IP_ADDRESS]
self.context["title_placeholders"][PRODUCT] = self.hass.data[DOMAIN][
self.context["entry_id"]
]["device"].product
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow initiated by reauthentication."""
if user_input is None:
return self.async_show_form(
step_id="reauth_confirm",
data_schema=STEP_REAUTH_DATA_SCHEMA,
)
reauth_entry = self.hass.config_entries.async_get_entry(
self.context["entry_id"]
)
assert reauth_entry is not None
data = {
CONF_IP_ADDRESS: self.context[CONF_HOST],
CONF_PASSWORD: user_input[CONF_PASSWORD],
}
self.hass.config_entries.async_update_entry(
reauth_entry,
data=data,
)
self.hass.async_create_task(
self.hass.config_entries.async_reload(reauth_entry.entry_id)
)
return self.async_abort(reason="reauth_successful")
|
[
"noreply@github.com"
] |
piitaya.noreply@github.com
|
9e35c118eeb8e36df7885fa66cc24a4df686446e
|
827e0c2b46043635c307eb985654afb56efa76d4
|
/python_stack/django/django_full_stack/LoginReg/apps/login_app/models.py
|
12fd6f3ee2bd86274eff75a716fa1d90f166e1b9
|
[] |
no_license
|
efaro2014/Dojo-Assignments
|
0f51e695a99bcc830de0fa2b56cd4d0f1d32ba97
|
bcf5150f00ffaf408167cb9357b979dd80b4d509
|
refs/heads/master
| 2022-10-29T10:03:00.837990
| 2019-12-27T05:40:53
| 2019-12-27T05:40:53
| 230,378,896
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,569
|
py
|
from django.db import models
# from datetime import date
import re
# now = date.today().isoformat()
# used for date time validations in relation to now
class UserManager(models.Manager):
def basic_validator(self, postData):
error = {}
first_name = postData['first_name']
last_name = postData['last_name']
if User.objects.filter(email=postData['email']):
error['emaildupe'] = "User already has an account with that email address"
return error
if not((len(first_name) >2 ) & str.isalpha(first_name)):
error["first_name"] = "First name should be populated"
if not((len(last_name) >2 ) & str.isalpha(last_name)):
error["last_name"] = "Last name should be populated"
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
if not EMAIL_REGEX.match(postData['email']):
error['email'] = ("Email address is invalid!")
if len(postData['password']) < 8:
error['password'] = "Password too short"
if (postData['password'])!= (postData['pw_confirm']):
error["pw_confirm"] = "Passwords do not match"
return error
class User(models.Model):
first_name = models.CharField(max_length = 45)
last_name = models.CharField(max_length = 45)
email = models.CharField(max_length = 55)
password = models.CharField(max_length = 255)
created_at = models.DateTimeField(auto_now = True)
updated_at = models.DateTimeField(auto_now_add = True)
objects = Ver_Manager()
|
[
"efarkbg@gamil.com"
] |
efarkbg@gamil.com
|
913aa7a2ad32626259822e0e440d680bd21a5078
|
30678ff7104b26961a535bc8c675c887e63ae233
|
/ros/src/waypoint_updater/controller_tuning.py
|
1ce7aa0a93d89b6728f47c36f2e448877d386131
|
[
"MIT"
] |
permissive
|
jonam85/CarND-Capstone
|
2b560c689b51a9ed9d698598c7b31aa1577dc078
|
610f89b1a46660f2c76099aa97792b137246012e
|
refs/heads/master
| 2020-03-22T20:21:04.542200
| 2018-07-28T04:19:56
| 2018-07-28T04:19:56
| 140,592,107
| 0
| 0
| null | 2018-07-11T15:06:14
| 2018-07-11T15:06:13
| null |
UTF-8
|
Python
| false
| false
| 3,212
|
py
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Empty, Int32
import itertools
import numpy as np
from enum import Enum
from waypoint_updater import WaypointUpdater
class ControllerTuning(WaypointUpdater):
def __init__(self):
super(ControllerTuning, self).__init__()
self.traffic_waypoint_msg = Int32(-1) # Traffic waypoints are disabled
self.set_next_tuning_pub = rospy.Publisher('/set_next_tuning', Empty, queue_size=1)
self.tuning = TuningSettings(self.publish_set_next_tuning)
self.waitUntilInit()
self.loopForEver()
def loopForEver(self):
while not rospy.is_shutdown():
if self.dbw_enabled_msg.data:
# Get current tuning settings
target_velocity, jerk_limit = self.tuning.get_settings(self.velocity_msg.twist.linear.x)
# Update settings in waypoint calculator
self.wp_calc.set_target_velocity(target_velocity)
self.wp_calc.set_limits(jerk_limit=jerk_limit)
waypoints = self.wp_calc.calc_waypoints(self.pose_msg, self.velocity_msg, self.traffic_waypoint_msg)
self.publish_waypoints(waypoints)
else:
assert(False) # DBW must always be active when tuning
self.rate.sleep()
def publish_set_next_tuning(self):
self.set_next_tuning_pub.publish(Empty())
class TuningSettings(object):
def __init__(self, publish_set_next_tuning):
self.publish_set_next_tuning = publish_set_next_tuning
self.idx = 0
self.state = self.State.ACCELERATE
velocities = [20.0, 10.0, 5.0]
jerk_limits = [10.0, 7.5, 5.0, 2.5]
self.settings = list(itertools.product(velocities, jerk_limits))
class State(Enum):
ACCELERATE = 0
KEEP_SPEED = 1
DECELERATE = 2
STAND_STILL = 3
def get_settings(self, current_speed):
if self.state == self.State.ACCELERATE and np.isclose(current_speed, self.settings[self.idx][0], atol=0.5):
self.state = self.State.KEEP_SPEED
self.start_time = rospy.get_time()
elif self.state == self.State.KEEP_SPEED and (rospy.get_time() - self.start_time) > 20.0:
self.state = self.State.DECELERATE
elif self.state == self.State.DECELERATE and np.isclose(current_speed, 0.0, atol=0.005):
self.state = self.State.STAND_STILL
self.start_time = rospy.get_time()
elif self.state == self.State.STAND_STILL and (rospy.get_time() - self.start_time) > 5.0:
self.state = self.State.ACCELERATE
self.idx += 1
if self.idx == len(self.settings):
self.publish_set_next_tuning()
self.idx = 0
if self.state in (self.State.ACCELERATE, self.State.KEEP_SPEED):
target_speed = self.settings[self.idx][0]
else:
target_speed = 0.0
jerk_limit = self.settings[self.idx][1]
return target_speed, jerk_limit
if __name__ == '__main__':
try:
ControllerTuning()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
|
[
"markus.isaksson@gmail.com"
] |
markus.isaksson@gmail.com
|
ce95c4e50326e90750f2820f361a805f818f6e96
|
58409d01e1d6ac940f7c8c3e72938e1556e42767
|
/.env/bin/django-admin
|
d26792b1ffb5fc831f089f8309742cf4e5a1fc42
|
[] |
no_license
|
almahdiy/hajj_hackathon_2018
|
f4fadb7d765c842c3d05512d21debaec3bc1b5d1
|
e4be7a088a6171e92a36d18e223a6e64a63a6add
|
refs/heads/master
| 2022-12-09T10:33:44.417170
| 2018-08-03T05:10:54
| 2018-08-03T05:10:54
| 143,181,228
| 0
| 0
| null | 2022-12-08T09:02:06
| 2018-08-01T16:25:10
|
Python
|
UTF-8
|
Python
| false
| false
| 406
|
#!/home/integration/hajj_hackathon_2018/.env/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'Django','console_scripts','django-admin'
__requires__ = 'Django'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('Django', 'console_scripts', 'django-admin')()
)
|
[
"alghanmi_3z@live.com"
] |
alghanmi_3z@live.com
|
|
5b2dcfdf726452a7a72b1fa68fa90ec4be1b927f
|
3e611bfeb85bb55efd3054ce86c119a156e1cfde
|
/decision.py
|
75b92559abe3cd59fbcccae7b0969b4a43f832e5
|
[] |
no_license
|
Aynas/14co12
|
cfebbffc41c594355cd3d2671565c7197204f085
|
e35997343f5e4a984b8238681c730712115296c2
|
refs/heads/master
| 2020-04-15T15:05:08.932861
| 2019-04-09T10:21:07
| 2019-04-09T10:21:07
| 164,778,979
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,700
|
py
|
# Classification template
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
x = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, 4].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size = 0.25, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Fitting classifier to the Training set
from sklearn.tree import DecisionTreeClassifier
classifier = DecisionTreeClassifier(criterion='entropy', random_state = 0)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Visualising the Training Set Results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Decision Tree Classification (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# Visualising the Test Set Results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Decision Tree Classification (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
|
[
"aynaskhalfe@gmail.com"
] |
aynaskhalfe@gmail.com
|
4401d91cb37fa38397f08d62e957845a10bd0ae2
|
c637f89e989fea446afda8095a82749a7ea02979
|
/lab2/main.py
|
25e2a226e7df761cd0d4e339f24f874cc9439c4a
|
[] |
no_license
|
Zavxoz/Image-processing
|
1836b6a0b6e47949452b20ac2f8e39cb1755aeea
|
9247e497a4ec132643549bd1051b7a013170f500
|
refs/heads/master
| 2020-08-05T01:39:48.792816
| 2019-11-03T19:27:17
| 2019-11-03T19:27:17
| 212,350,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
from processing import otsuThreshold, filtering
from detect import ImageObjectDetector
from PIL import Image
def main():
image = Image.open('c.jpg')
image.show()
image = otsuThreshold(filtering(image))
image.show()
detector = ImageObjectDetector(image)
detector.labeling()
detector.kmedians()
detector.colorize_clusters()
detector.show()
if __name__ == "__main__":
main()
|
[
"artem.klimec8@gmail.com"
] |
artem.klimec8@gmail.com
|
0b32f526bb7fe26f8b14d0f4ae3ddc90cef00588
|
29f9bd64230eb5f5ef476a568e648d69ab95fe2c
|
/Pong/pong.py
|
20ba7c2cf5f1b0ecf869f1a7685ad5ca03738331
|
[] |
no_license
|
Navid-Mehralizadeh/Random-Python-Programs
|
fa2853334566c68fdeb2dcddcbbe94c62a2fd524
|
9cafe2922879bb819a4ba3561b48005777337336
|
refs/heads/main
| 2023-06-25T14:58:09.570020
| 2021-07-25T19:01:26
| 2021-07-25T19:01:26
| 369,648,784
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,469
|
py
|
import turtle
wn=turtle.Screen()
wn.title("Pong by Navid Mehr")
wn.bgcolor("blue")
wn.setup(width=800, height=600)
wn.tracer(0)
#score
score_a=0
score_b=0
#paddle_a
paddle_a = turtle.Turtle()
paddle_a.speed(0)
paddle_a.shape("square")
paddle_a.color("white")
paddle_a.shapesize(stretch_wid=5,stretch_len=1)
paddle_a.penup()
paddle_a.goto(-350, 0)
#paddle_b
paddle_b = turtle.Turtle()
paddle_b.speed(0)
paddle_b.shape("square")
paddle_b.color("white")
paddle_b.shapesize(stretch_wid=5,stretch_len=1)
paddle_b.penup()
paddle_b.goto(350, 0)
#ball
ball = turtle.Turtle()
ball.speed(0)
ball.shape("square")
ball.color("red")
ball.penup()
ball.goto(0, 0)
ball.dx = 0.2
ball.dy = 0.2
#pen
pen = turtle.Turtle()
pen.speed(0)
pen.shape("square")
pen.color("white")
pen.penup()
pen.hideturtle()
pen.goto(0, 260)
pen.write("Player A: 0 Player B: 0", align="center", font=("Courier", 24, "normal"))
def paddle_a_up():
y = paddle_a.ycor()
y += 20
paddle_a.sety(y)
def paddle_a_down():
y = paddle_a.ycor()
y -= 20
paddle_a.sety(y)
def paddle_b_up():
y = paddle_b.ycor()
y += 20
paddle_b.sety(y)
def paddle_b_down():
y = paddle_b.ycor()
y -= 20
paddle_b.sety(y)
wn.listen()
wn.onkeypress(paddle_a_up, "w")
wn.onkeypress(paddle_a_down, "s")
wn.onkeypress(paddle_b_up, "Up")
wn.onkeypress(paddle_b_down, "Down")
while True:
wn.update()
ball.setx(ball.xcor() + ball.dx)
ball.sety(ball.ycor() + ball.dy)
if ball.ycor() > 290:
ball.sety(290)
ball.dy *= -1
elif ball.ycor() < -290:
ball.sety(-290)
ball.dy *= -1
if ball.xcor() > 350:
score_a += 1
pen.clear()
pen.write("Player A: {} Player B: {}".format(score_a, score_b), align="center", font=("Courier", 24, "normal"))
ball.goto(0, 0)
ball.dx *= -1
elif ball.xcor() < -350:
score_b += 1
pen.clear()
pen.write("Player A: {} Player B: {}".format(score_a, score_b), align="center", font=("Courier", 24, "normal"))
ball.goto(0, 0)
ball.dx *= -1
if ball.xcor() < -340 and ball.ycor() < paddle_a.ycor() + 50 and ball.ycor() > paddle_a.ycor() - 50:
ball.dx *= -1
elif ball.xcor() > 340 and ball.ycor() < paddle_b.ycor() + 50 and ball.ycor() > paddle_b.ycor() - 50:
ball.dx *= -1
|
[
"noreply@github.com"
] |
Navid-Mehralizadeh.noreply@github.com
|
bcfac09f499ad3ea7956d399b9bf6b62fd76c736
|
5ed7170b06a334f118537a356e223b1513f1a09c
|
/mb_projectt/wsgi.py
|
1944fd43e9a36f197334f6b7b9d599daa3a1daae
|
[] |
no_license
|
MarquiseCassar/mb_projectt
|
4c3274b4bff739875d1add125e5e96259a01e293
|
6297f72b3afa9203fea9c296fa8a9b5167d24f47
|
refs/heads/master
| 2020-04-06T12:08:20.324510
| 2018-11-13T20:49:23
| 2018-11-13T20:49:23
| 157,444,456
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for mb_projectt project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mb_projectt.settings')
application = get_wsgi_application()
|
[
"marquise.cassar@newbury.edu"
] |
marquise.cassar@newbury.edu
|
30ce9c01e676701f1f672e2f0be4607171d8b038
|
cb9646da51f8ac9b656a8f9bdeaf547927628e58
|
/manage.py
|
27b6b21f276706e02904887d88390594dabb67b6
|
[] |
no_license
|
irmii/hello_music
|
c3fac82557eece614db3023c7dcf3b1ded56aef6
|
f8eb365b57ab673f8505510f1e3e11b6bb849a15
|
refs/heads/main
| 2023-09-01T11:13:28.256847
| 2021-10-28T11:44:29
| 2021-10-28T11:44:29
| 417,206,983
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hello_music.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"i.suvorova@x5.ru"
] |
i.suvorova@x5.ru
|
c184b2800b46a011424b199d9348045f1ad8ce4d
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_118/2872.py
|
923077a412d3f2d3d7f6de71b67a942c571bdc00
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,242
|
py
|
import math
def isPalidrome(num):
numStr = '%.12g' % num
halfstrlen = len(numStr)/2
i=0
result = True
while i<=halfstrlen:
if numStr[i] != numStr[-1*i - 1]:
result = False
break
i+=1
return result
with open('FairSq-A-large-practice.in.txt') as f:
#with open('TTTT-A-small-practice.in.txt') as f:
content = f.read().splitlines()
numTests = int(content[0])
fileText = content[1:]
offset = 0
i=1
f = open ('FairSqSmalloutput.txt','w')
while i<=numTests:
rangeLowerBound,rangeUpperBound = map(int,fileText[offset].split())
sqrtLower = math.ceil(math.sqrt(rangeLowerBound))
sqrtUpper = math.floor(math.sqrt(rangeUpperBound))
print rangeLowerBound,rangeUpperBound,sqrtLower,sqrtUpper
numFairSquare = 0
currNum = sqrtLower
while currNum <= sqrtUpper:
if isPalidrome(currNum):
currNumSq = math.pow(currNum,2)
if isPalidrome(currNumSq):
numFairSquare += 1
print 'currNumSq: '+str(currNumSq)+' is palindrome: '+str(isPalidrome(currNumSq))
currNum += 1
print numFairSquare
f.write('Case #'+str(i) + ': ' + str(numFairSquare)+'\n')
i+=1
offset += 1
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
c8d4c025b7e8668a98d49c2cdeecf3530ebcc06e
|
d6a7903f018c155b56f27ac97df746469e36d1f5
|
/saleor_restfull_api/urls.py
|
0c3c773f406885323002f065c703d9aca79aa7b1
|
[] |
no_license
|
danghieuthang/Saleor-gateway-api
|
2f3ec628a511569a3987783ee8f7d806c4a19819
|
5176785ed71b1fd212e692f3e72d5906e81a01e4
|
refs/heads/master
| 2023-04-10T21:24:48.236447
| 2021-04-19T17:13:35
| 2021-04-19T17:13:35
| 359,535,777
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
from django.contrib import admin
from django.urls import path
from django.conf import settings
from django.conf.urls import include, url
# version = getattr(settings, "API_VERSION")
# if version is None:
# raise("API_VERSION must be config in settings")
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^', include('api.urls', namespace="Product"))
]
|
[
"dhthang1998"
] |
dhthang1998
|
77df79a4b8334a9bfd29a676c8016d7d744fabce
|
47e63488b95d37452a3f7898ef664bb903a7c2fb
|
/examen/pregunta 8.py
|
b058c03eb1d1d58c6922b13b72d8e319594d0a42
|
[] |
no_license
|
Evelyn-agv/Ejercicios.Extra-POO
|
3450f0778256d5d34fd941561327e93d8fd55bfd
|
0d92894a0c681fcc149274fceb6b528bca94d855
|
refs/heads/main
| 2023-07-30T16:52:23.227577
| 2021-09-12T04:07:05
| 2021-09-12T04:07:05
| 405,534,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 878
|
py
|
class Cadena:
def __init__(self, cadena):
self.cadena= cadena
self.Minuscula= ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q",
"r","s","t","u","v","w","x","y","z"]
self.Mayuscula= ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q",
"R", "S", "T", "U", "V", "W", "X", "Y", "Z"]
def mayMin(self):
cmay,cmin=0,0
for i,v in enumerate(self.cadena):
if v in self.Mayuscula: #"ERROR"-->Como busca cantidad debe ir [v], porque [i] es la posición
cmay+=1
elif v in self.Minuscula: #"ERROR"-->Como busca cantidad debe ir [v], porque [i] es la posición
cmin+=1
return cmay,cmin
tarea=Cadena("Examen de POO")
print(tarea.mayMin())
|
[
"noreply@github.com"
] |
Evelyn-agv.noreply@github.com
|
8481a49249d0f57e1d71117a3d781afc8ab1dc6f
|
6ac0aeea8229c4e2c7a041e85c3afeeb106c6b01
|
/string_methods_and_operators.py
|
fc95a7167f707e4222b807f54446b9ad4c8ed9dd
|
[] |
no_license
|
waiteb15/py3intro
|
325dafaaa642052280d6c050eacf8b406b40e01d
|
68b30f147e7408220490a46d3e595acd60513e9e
|
refs/heads/master
| 2020-03-27T10:50:25.928836
| 2019-02-28T21:47:11
| 2019-02-28T21:47:11
| 146,448,412
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 963
|
py
|
#!/usr/bin/env python
movie_star = "Tommy Trotter"
print(movie_star)
print(movie_star.upper())
print(movie_star.count('t'))
print(movie_star.count('Tom'))
print(len(movie_star))
print(type(movie_star))
print(movie_star.count('T') + movie_star.count('t'))
print(movie_star.lower().count('t'))
print(movie_star.startswith('Tom'))
print(movie_star.startswith('Brad'))
print(movie_star.replace('Tom', 'Brad'))
print(movie_star.replace('Tom', ''))
s = " All my exes live in Texas "
print("|" + s.lstrip() + '|')
print("|" + s.rstrip() + '|')
print("|" + s.strip() + '|')
print()
s = "xyxxyyxxxyyyxyxyxyxAll my exes live in Texasxyxyxyxyyyyyy"
print("|" + s.lstrip('xy') + '|')
print("|" + s.rstrip('xy') + '|')
print("|" + s.strip('xy') + '|')
print()
word_string = 'spink warf blom yuvu'
words = word_string.split()
print(words)
word_string = 'spink/warf/blom/yuvu'
words = word_string.split('/')
print(words)
print(words[0], words[0][0])
|
[
"waiteb15@gmail.com"
] |
waiteb15@gmail.com
|
d2cb920809a7dd60600b739ba7c2ee2357b0a795
|
d7cf062051ac79cc97111630521861f114762b01
|
/lib/quantity_code/tools/distribution_collector.py
|
4272e58091801a2e95ca7bad879fa9e37f1fdc36
|
[] |
no_license
|
lswzjuer/tensorflow-refmodel
|
abdd0c48ea6264b9919f18c05b94e67809e1b6cd
|
d34946d002ac22bb266b932fc5c3911b9ebb6eef
|
refs/heads/master
| 2022-01-23T17:56:36.119132
| 2019-07-17T09:04:37
| 2019-07-17T09:04:37
| 197,353,921
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,020
|
py
|
import math
import numpy as np
import multiprocessing
class DistributionCollector:
def __init__(self,
tensor_list,
interval_num=2048,
statistic=1,
worker_num=1,
debug=False):
self._tensor_list = tensor_list
self._interval_num = interval_num
self._statistic = statistic
self._worker_num = worker_num
self._debug = debug
self._distributions = {}
self._max_vals = {}
for tensor_name in self._tensor_list:
self._distributions[tensor_name] = np.zeros((self._interval_num,), dtype=np.int32)
self._max_vals[tensor_name] = 0
self._max_vals_refreshed_flag = False
self._added_to_distributions_flag = False
@property
def max_vals(self):
assert self._max_vals_refreshed_flag, "Please use refresh_max_val() first."
return self._max_vals
@property
def distribution_intervals(self):
assert self._max_vals_refreshed_flag, "Please use refresh_max_val() first."
distribution_intervals = {}
for tensor_name in self._tensor_list:
distribution_intervals[tensor_name] = \
self._statistic * self._max_vals[tensor_name] / self._interval_num + 1e-12
self._distribution_intervals = distribution_intervals
return distribution_intervals
@property
def distributions(self):
assert self._added_to_distributions_flag, "Please use add_to_distributions() first."
return self._distributions
def refresh_max_val(self, tensors):
"""Put this function in the loop of the network forwarding to refresh
the max abs val of each tensor.
"""
self._max_vals_refreshed_flag = True
for tensor_name in self._tensor_list:
tensor = tensors[tensor_name]
max_val = max(abs(np.max(tensor)), abs(np.min(tensor)))
self._max_vals[tensor_name] = max(self._max_vals[tensor_name], max_val)
def add_to_distributions(self, tensors):
"""Put this function in the loop of the network forwarding to refresh
the distribution of each tensor.
"""
if self._debug and self._added_to_distributions_flag:
return
self._added_to_distributions_flag = True
if not hasattr(self, '_distribution_intervals'):
print("interval:", self.distribution_intervals)
pool = multiprocessing.Pool(processes=self._worker_num)
amount_per_worker = int(math.floor(len(self._tensor_list) / self._worker_num))
results = []
for worker_i in range(self._worker_num):
sub_tensor_list = self._tensor_list[worker_i * amount_per_worker:
(worker_i + 1) * amount_per_worker]
if worker_i == 0:
sub_tensor_list += self._tensor_list[self._worker_num * amount_per_worker:]
sub_tensors, sub_distribution_intervals = {}, {}
for tensor_name in sub_tensor_list:
sub_tensors[tensor_name] = tensors[tensor_name]
sub_distribution_intervals[tensor_name] = self._distribution_intervals[tensor_name]
result = pool.apply_async(
run,
args=(DistributionCollector,
sub_tensor_list,
sub_tensors,
sub_distribution_intervals,
self._interval_num,
self._debug))
results.append(result)
pool.close()
pool.join()
for result in results:
tensor_list, sub_distributions = result.get()
for (tensor_name, distribution) in zip(tensor_list, sub_distributions):
self._distributions[tensor_name] += distribution
pool.terminate()
@staticmethod
def add_to_distribution_worker(tensor_list, tensors, intervals, interval_num, debug=False):
if debug:
return tensor_list, [np.ones(interval_num, dtype=np.int32)
for _ in range(len(tensor_list))]
def _add_to_distribution(data, interv_num, interval):
distribution = [0 for _ in range(interv_num)]
max_index = interv_num - 1
indexes = np.minimum((abs(data[data != 0]) / interval).astype(np.int32), max_index)
# Note that distribution[indexes] += 1 is not work.
for index in indexes:
distribution[index] += 1
return np.array(distribution, dtype=np.int32)
distributions = []
for tensor_name in tensor_list:
distribution = _add_to_distribution(
tensors[tensor_name], interval_num, intervals[tensor_name])
distributions.append(distribution)
return tensor_list, distributions
def run(cls_instance, *args):
"""Compatible with Python2."""
return cls_instance.add_to_distribution_worker(*args)
|
[
"liusongwei@fabu.ai"
] |
liusongwei@fabu.ai
|
d146fb94afc09c3251f385bfc647c051d76c9618
|
cb4766dff799a2a094014badcaa6a2da50dfc269
|
/app/forms.py
|
45124d90d7f3a5a966aab1bf675a3aaf6b042ffb
|
[] |
no_license
|
kiranpalkathait/GetAGrip
|
0c421c67b8ad28eca639fed084d449ec37aaa9f6
|
cc7bd44995cffd4f6060ebf5b17e6109c531b569
|
refs/heads/master
| 2020-12-25T06:23:25.554636
| 2013-11-04T06:54:23
| 2013-11-04T06:54:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 908
|
py
|
from flask.ext.wtf import Form, TextField, BooleanField, IntegerField
from flask.ext.wtf import Required, Email
class LoginForm(Form):
openid = TextField('openid', validators = [Required()])
remember_me = BooleanField('remember_me', default = False)
class ContactForm(Form):
name = TextField('name', validators = [Required(message="*Sorry, the name field is required. ")])
email = TextField('email', validators = [Required(message="*Sorry, the email field is required"), Email(message="Sorry, the email you entered is not valid. ")])
body = TextField('body', validators = [Required(message="*Sorry, the body field is required")])
class ProductForm(Form):
name = TextField('name', validators = [Required()])
stock = IntegerField('stock', validators=[Required()])
image = TextField('image', validators=[Required()])
price = IntegerField('price', validators=[Required()])
|
[
"jonbodian@Jonathan-Bodians-MacBook-Pro.local"
] |
jonbodian@Jonathan-Bodians-MacBook-Pro.local
|
e43cb350478d9a1c56e8abf8cd6ab9606f6dea55
|
3f19365f3e9ce48acb8b6638e4a1161fa025aebd
|
/docs/conf.py
|
a25c08094c272cbb4f51fbac6fce42dcf68ea79e
|
[
"BSD-3-Clause"
] |
permissive
|
Durburz/eMonitor
|
9f57c51ecf4d68a17803b9fd630faf6d0ec5b4cd
|
56f3b1fe39b9da3a12b49bdd60d0cfca51c23351
|
refs/heads/master
| 2021-09-28T23:37:05.778353
| 2018-11-21T13:06:17
| 2018-11-21T13:06:17
| 120,184,604
| 0
| 0
|
NOASSERTION
| 2018-11-21T12:57:19
| 2018-02-04T12:52:44
|
Python
|
UTF-8
|
Python
| false
| false
| 11,366
|
py
|
# -*- coding: utf-8 -*-
#
# eMonitor documentation build configuration file, created by
# sphinx-quickstart on Wed Dec 03 17:00:50 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
from emonitor import __version__ as eversion
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../emonitor'))
sys.path.insert(0, os.path.abspath('../emonitor/modules'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.mathjax',
'rst2pdf.pdfbuilder',
]
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['docs/source/_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
#master_doc = 'doc'
# General information about the project.
project = u'eMonitor'
copyright = u'2014-2015, Arne Seifert'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ".".join(eversion.split('.')[:-1])
# The full version, including alpha/beta/rc tags.
release = eversion
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'de'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['documentation.rst', 'README.rst']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Dokumentation eMonitor'
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'Dokumentation'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'source/_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['source/_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'eMonitor.tex', u'eMonitor Documentation',
u'Arne Seifert', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'emonitor', u'eMonitor Documentation',
[u'Arne Seifert'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'eMonitor', u'eMonitor Documentation',
u'Arne Seifert', 'eMonitor', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for PDF output --------------------------------------------------
# Grouping the document tree into PDF files. List of tuples
# (source start file, target name, title, author, options).
#
# If there is more than one author, separate them with \\.
# For example: r'Guido van Rossum\\Fred L. Drake, Jr., editor'
#
# The options element is a dictionary that lets you override
# this config per-document.
# For example,
# ('index', u'MyProject', u'My Project', u'Author Name',
# dict(pdf_compressed = True))
# would mean that specific document would be compressed
# regardless of the global pdf_compressed setting.
pdf_documents = [
('index', u'eMonitor', u'eMonitor', u'Arne Seifert'),
]
# A comma-separated list of custom stylesheets. Example:
pdf_stylesheets = ['sphinx', 'kerning', 'a4']
# A list of folders to search for stylesheets. Example:
pdf_style_path = ['.', '_styles']
# Create a compressed PDF
# Use True/False or 1/0
# Example: compressed=True
#pdf_compressed = False
# A colon-separated list of folders to search for fonts. Example:
# pdf_font_path = ['/usr/share/fonts', '/usr/share/texmf-dist/fonts/']
# Language to be used for hyphenation support
#pdf_language = "en_US"
# overflow, shrink or truncate
#pdf_fit_mode = "shrink"
# Section level that forces a break page.
# For example: 1 means top-level sections start in a new page
# 0 means disabled
#pdf_break_level = 0
# When a section starts in a new page, force it to be 'even', 'odd',
# or just use 'any'
#pdf_breakside = 'any'
# Insert footnotes where they are defined instead of
# at the end.
#pdf_inline_footnotes = True
# verbosity level. 0 1 or 2
#pdf_verbosity = 0
# If false, no index is generated.
#pdf_use_index = True
# If false, no modindex is generated.
#pdf_use_modindex = True
# If false, no coverpage is generated.
#pdf_use_coverpage = True
# Name of the cover page template to use
#pdf_cover_template = 'sphinxcover.tmpl'
# Documents to append as an appendix to all manuals.
#pdf_appendices = []
# Enable experimental feature to split table cells. Use it
# if you get "DelayedTable too big" errors
#pdf_splittables = False
# Set the default DPI for images
#pdf_default_dpi = 72
# Enable rst2pdf extension modules (default is only vectorpdf)
# you need vectorpdf if you want to use sphinx's graphviz support
#pdf_extensions = ['vectorpdf']
# Page template name for "regular" pages
#pdf_page_template = 'cutePage'
# Show Table Of Contents at the beginning?
#pdf_use_toc = True
# How many levels deep should the table of contents be?
pdf_toc_depth = 9999
# Add section number to section references
pdf_use_numbered_links = False
# Background images fitting mode
pdf_fit_background_mode = 'scale'
|
[
"arne@seifert-online.net"
] |
arne@seifert-online.net
|
0d1603f4daf6cbf9be08b640866ec0f3d073cc04
|
ae428b0d9d441dc186aa3409845165cb854279e4
|
/emaillove/__init__.py
|
6b2f541619aa63c81e914a8db1fe45fcfb9d39a0
|
[
"MIT"
] |
permissive
|
ryanrdetzel/EmailLove
|
afccb1eee5d1afbbe9ebfbf47884855d367dd7e8
|
643a1e06f68fbb5dc28da1c3fce76faa7f423e78
|
refs/heads/master
| 2021-01-13T02:27:36.179028
| 2012-12-01T11:57:00
| 2012-12-01T11:57:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 715
|
py
|
from emaillove.exceptions import NoCurrentProvider
class EmailLove:
def __init__(self):
self.providers = []
self.current_provider = None
def get_current_provider(self):
if self.current_provider is None:
if len(self.providers) == 1:
self.current_provider = self.providers[0]
else:
raise NoCurrentProvider("No current provider selected")
return self.current_provider
def send(self, message):
''' Send this message from the current provider '''
return self.get_current_provider().send(message)
def unsubscribes(self, since=None):
return self.get_current_provider().unsubscribes(since)
|
[
"ryan.detzel@gmail.com"
] |
ryan.detzel@gmail.com
|
d5ce4fa8bae2c918ea1118bd4c7635a0740f3f93
|
c30cca59b50c65e31ddf3cf1f7d01d1249fefd78
|
/ip.py
|
e6fefd33f25247caf7da9140b60d304690371a8a
|
[] |
no_license
|
BarryAllen001/BarryTelegramBot
|
149adabae0d699da5ad74cb22487f0d4add0cbd2
|
ab31713ac4b0a2e27dd747e19de8e81cf6631579
|
refs/heads/main
| 2023-03-11T20:59:29.600420
| 2021-03-03T22:39:48
| 2021-03-03T22:39:48
| 344,283,237
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 597
|
py
|
import requests
def IP(message):
request = requests.get('http://ip-api.com/json/' + message.text.replace(
'https://', '').replace('http://', '').replace('@nomedoseubot', '').replace('/ip ', '')).json()
if 'message' not in request:
return f'''
*IP: {request.get('query')}
Pais: {request.get('country')}
Estado: {request.get('regionName')}
Cidade: {request.get('city')}
Provedor: {request.get('isp')}
Latitude: {request.get('lat')}
Longitude: {request.get('lon')}
Timezone: {request.get('timezone')}
As: {request.get('as')}*
'''
else:
return '*IP invalido*'
|
[
"noreply@github.com"
] |
BarryAllen001.noreply@github.com
|
b6dc77aaa6a868ae685b9c4ad26ed007c8937345
|
3ac0a169aa2a123e164f7434281bc9dd6373d341
|
/constructRectangle.py
|
cbd54a3a9edba9faf918605bfbe63caf2bf84d8e
|
[] |
no_license
|
sfeng77/myleetcode
|
02a028b5ca5a0354e99b8fb758883902a768f410
|
a2841fdb624548fdc6ef430e23ca46f3300e0558
|
refs/heads/master
| 2021-01-23T02:06:37.569936
| 2017-04-21T20:31:06
| 2017-04-21T20:31:06
| 85,967,955
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
class Solution(object):
def constructRectangle(self, area):
"""
:type area: int
:rtype: List[int]
"""
from math import sqrt
for w in range(int(sqrt(area)), 0, -1):
if area % w == 0:
return [area / w, w]
|
[
"sfeng77@gmail.com"
] |
sfeng77@gmail.com
|
16f7b6884be1d60199b52593c50285a3d151b167
|
3162404355ee85d26091e26f9559d632eb8aeff0
|
/algorithms/binary search tree level order.py
|
4d51751f90ab8276f311b372fecea8214eeda9b0
|
[] |
no_license
|
HopeCheung/leetcode
|
375f017f4f192ec28b7b15cfd12ecdae06360bb0
|
2418b3eed1ab85cfd9cac039c6cfdc1a349ad345
|
refs/heads/master
| 2020-04-15T00:51:36.153056
| 2019-02-20T16:00:44
| 2019-02-20T16:00:44
| 164,254,396
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,094
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if root == None:
return []
queue, order = [], []
def children(node):
if node.right == None and node.left == None:
return []
elif node.right == None:
return [node.left]
elif node.left == None:
return [node.right]
else:
return [node.left, node.right]
def bfs():
while len(queue) > 0:
node = queue.pop(0)
for n in children(node):
queue.append(n)
order.append(n.val)
if root != None:
queue.append(root)
order.append(root.val)
bfs()
return order
|
[
"568038810@qq.com"
] |
568038810@qq.com
|
e6ac69914d43b217ab8558fa8f5404bcd9625b7a
|
3ec46ec9ded40c7072de5df6475bc99932f8aa26
|
/script/helper/extractor.py
|
5ffd582348862c2399ab2df9242e7afca1c30320
|
[] |
no_license
|
iTrauco/READMEClassifier
|
d4e697ad44e7e156a9fcb455aa127c072368cb1e
|
090cc4c58377de9536bfbcaff6cb8d8205dd934e
|
refs/heads/master
| 2020-12-30T04:59:21.169896
| 2019-11-15T09:12:45
| 2019-11-15T09:12:45
| 238,868,321
| 1
| 0
| null | 2020-02-07T07:40:26
| 2020-02-07T07:40:25
| null |
UTF-8
|
Python
| false
| false
| 22,258
|
py
|
import logging
from script.helper.helper2 import *
import sqlite3
from sqlite3 import Error
import sys
import codecs
import pandas
from pandas import DataFrame
import os
import shutil
def merge_classes_1_and_2(code_string):
code_string = code_string.replace('2','1')
code_csv = ','.join([d for d in code_string])
return code_csv
def abstract_out_markdown(filenames, readme_file_dir, temp_abstracted_markdown_file_dir):
for f_row in filenames:
filename = f_row[0]
readme_file_full_path = readme_file_dir + filename
# temp_html_file_full_path = temp_abstracted_html_file_dir + filename + '.html'
temp_markdown_file_full_path = temp_abstracted_markdown_file_dir + filename
logging.info('Processing {0}'.format(readme_file_full_path))
# with open(readme_file_full_path, 'r', encoding='utf-8') as f:
# use 'backslashreplace' to deal with UnicodeDecodeError
with open(readme_file_full_path, 'r', encoding='utf-8', errors='backslashreplace') as f:
text_with_markdown = f.read()
abstracted_markdown_text = abstract_text(text_with_markdown)
# Write abstracted markdown to temp markdown directory
with open(temp_markdown_file_full_path,'w',encoding='utf-8') as f_out_markdown:
f_out_markdown.write(abstracted_markdown_text)
logging.info("Abstraction of README file into temporary directory has been completed")
'''
Known issue: Unable to handle underline-style H1 and H2
'''
def extract_section_from_abstracted_files(temp_abstracted_markdown_file_dir, db_filename, overview_table, content_table):
conn = sqlite3.connect(db_filename)
try:
c = conn.cursor()
logging.info("Fetching information of section to extract and load")
headings = pandas.read_sql("""
SELECT file_id, section_id, local_readme_file, heading_markdown, abstracted_heading_markdown, heading_text, NULL as content_text_w_o_tags
FROM {0}
ORDER BY file_id, section_id""".format(overview_table), conn)
curr_filename = None
curr_filename_lines = None
curr_filename_line_number = 0
# Section definition: lines between current heading and next, regardless of level
# Iterate through file sequentially since a file may have a several heading with same text and level
# (e.g. multiple "Example" subheadings, one for each method in a reference section)
for i,r in headings.iterrows():
local_readme_filename = r[2]
heading_markdown = r[3]
abstracted_heading_markdown = r[4]
logging.info('Searching for abstracted heading: {0}'.format(abstracted_heading_markdown))
heading_text = r[5]
if (curr_filename is None) or (curr_filename != local_readme_filename):
curr_filename = local_readme_filename
logging.info('Reading {0}'.format(temp_abstracted_markdown_file_dir + curr_filename))
with open (temp_abstracted_markdown_file_dir + curr_filename, "r", encoding='utf-8') as myfile:
# Read as is, use rstrip instead of strip to only remove trailing whitespace
# We want to preserve leading whitespace to avoid treating line starting with space/tab followed by #
# from being treated as heading
curr_filename_lines = myfile.readlines()
curr_filename_lines = [x.rstrip() for x in curr_filename_lines]
curr_filename_line_number = 0
curr_section_content_lines = []
# Iterate through file until heading markdown is found or end of file is found
while (curr_filename_line_number<len(curr_filename_lines)):
if curr_filename_lines[curr_filename_line_number].startswith('#'):
# Found a potential heading. Is it the heading we want?
# Replace any type of newline (that may not count as newline in current system) with space
candidate_heading = curr_filename_lines[curr_filename_line_number].replace('\n',' ').strip()
# Perform comparison
if candidate_heading != abstracted_heading_markdown.strip():
# We've reached a new heading. The heading we wanted is not found.
# Possible with the case of a non-heading line starting with # being mislabeled as heading
logging.info('Encountered heading in document: {0}'.format(candidate_heading))
break
else:
logging.info('Found the heading for {0}'.format(heading_markdown))
else:
curr_section_content_lines.append(curr_filename_lines[curr_filename_line_number])
curr_filename_line_number += 1
curr_section_content = ' '.join(curr_section_content_lines)
curr_section_content_w_o_tags = extract_text_from_markdown_snippet(curr_section_content)
# logging.debug('Content of {0}'.format(heading_markdown))
# logging.debug(curr_section_content)
# logging.debug('After markdown removal')
# logging.debug(curr_section_content_w_o_tags)
headings.set_value(i,'content_text_w_o_tags',curr_section_content_w_o_tags)
df_to_save = headings[['file_id','section_id','content_text_w_o_tags']]
df_to_save.to_sql(name=content_table, con=conn, if_exists='replace', index=False)
except Error as e:
logging.exception(e)
except Exception as e:
logging.exception(e)
finally:
conn.close()
logging.info("Loading of section contents has been completed")
'''
Updated section extractor. Checks for underline-style H1 and H2
'''
def extract_section_from_abstracted_files_v2(temp_abstracted_markdown_file_dir, db_filename, overview_table, content_table):
conn = sqlite3.connect(db_filename)
try:
c = conn.cursor()
logging.info("Fetching information of section to extract and load")
headings = pandas.read_sql("""
SELECT file_id, section_id, local_readme_file, heading_markdown, abstracted_heading_markdown, heading_text, NULL as content_text_w_o_tags
FROM target_section_overview
ORDER BY file_id, section_id""", conn)
curr_filename = None
curr_filename_lines = None
curr_filename_line_number = 0
# Section definition: lines between current heading and next, regardless of level
# Can't simply use equivalent of left JOIN between list of headings and actual headings in file,
# as a file may have a several heading with same text and level (e.g. multiple "Example" subheadings, one for each method in a reference section)
for i,r in headings.iterrows():
heading_already_found = False
local_readme_filename = r[2]
heading_markdown = r[3]
abstracted_heading_markdown = r[4]
logging.info('Searching for abstracted heading: {0}'.format(abstracted_heading_markdown))
heading_text = r[5]
if (curr_filename is None) or (curr_filename != local_readme_filename):
curr_filename = local_readme_filename
logging.info('Reading {0}'.format(temp_abstracted_markdown_file_dir + curr_filename))
with open (temp_abstracted_markdown_file_dir + curr_filename, "r", encoding='utf-8', errors='backslashreplace') as myfile:
# Read as is, use rstrip instead of strip to only remove trailing whitespace
# We want to preserve leading whitespace to avoid treating line starting with space/tab followed by #
# from being treated as heading
curr_filename_lines = myfile.readlines()
curr_filename_lines = [x.rstrip() for x in curr_filename_lines]
curr_filename_line_number = 0
curr_section_content_lines = []
# Iterate through file until heading markdown is found or end of file is found
# Check also for underline-style formatting
while (curr_filename_line_number<len(curr_filename_lines)):
'''
If a candidate heading is found, compare it with the heading we're looking for.
If it's actually the one we want, set a flag, so if the next heading happens to have same string,
we can tell that it's a different heading.
'''
if curr_filename_lines[curr_filename_line_number].startswith('#'):
# Potential heading, starting with #. Is it the heading we want?
candidate_heading = curr_filename_lines[curr_filename_line_number].replace('\n',' ').strip()
if ((candidate_heading != abstracted_heading_markdown.strip()) or heading_already_found):
logging.info('Searching for {0}. Encountered new heading in document: {1}'.format(abstracted_heading_markdown, candidate_heading))
break
else:
logging.info('Found the heading for {0}'.format(heading_markdown))
heading_already_found = True
elif ((curr_filename_line_number<len(curr_filename_lines)-1) and
curr_filename_lines[curr_filename_line_number+1].startswith('===')):
# Potential H1, in underline markdown style
candidate_heading = curr_filename_lines[curr_filename_line_number].replace('\n',' ').strip()
if (('# ' + candidate_heading) != abstracted_heading_markdown.strip() or heading_already_found):
logging.info('Encountered candidate underline-style H1 in document: {0}'.format(candidate_heading))
# Skip next line (which is the underline)
curr_filename_line_number += 1
break
else:
logging.info('Found the heading for {0}'.format(heading_markdown))
heading_already_found = True
elif ((curr_filename_line_number<len(curr_filename_lines)-1) and
curr_filename_lines[curr_filename_line_number+1].startswith('---')):
# Potential H2, in underline markdown style
candidate_heading = curr_filename_lines[curr_filename_line_number].replace('\n',' ').strip()
if (('## ' + candidate_heading) != abstracted_heading_markdown.strip() or heading_already_found):
logging.info('Encountered candidate underline-style H2 in document: {0}'.format(candidate_heading))
# Skip next line (which is the underline)
curr_filename_line_number += 1
break
else:
logging.info('Found the heading for {0}'.format(heading_markdown))
heading_already_found = True
else:
curr_section_content_lines.append(curr_filename_lines[curr_filename_line_number])
# Proceed to next line
curr_filename_line_number += 1
curr_section_content = ' '.join(curr_section_content_lines)
curr_section_content_w_o_tags = extract_text_from_markdown_snippet(curr_section_content)
headings.set_value(i,'content_text_w_o_tags',curr_section_content_w_o_tags)
df_to_save = headings[['file_id','section_id','content_text_w_o_tags']]
# Use append when saving since table is already emptied at the beginning
df_to_save.to_sql(name='target_section_content', con=conn, if_exists='append', index=False)
except Error as e:
logging.exception(e)
except Exception as e:
logging.exception(e)
finally:
conn.close()
logging.info("Loading of section contents has been completed")
'''
Extracts headings from unprocessed README files in a specified directory.
To reduce false positives (lines starting with # that isn't actually a heading, such as comment lines in code snippets),
abstract out code section blocks.
Perform no other abstraction to allow headings such as "Section 2", "Section 3", etc. to be extracted as is
for later reference / manual checking (instead of having everything turned into "Section @abstr_number").
'''
def extract_headings_from_files_in_directory(target_readme_file_dir, db_filename, overview_table_name):
overview = DataFrame(columns=['section_id', 'file_id', 'url', 'local_readme_file', 'heading_markdown',
'abstracted_heading_markdown', 'heading_text', 'abstracted_heading_text',
'heading_level'])
file_id = 1
for filename in os.listdir(target_readme_file_dir):
# Used to construct the repo URL later
filename_w_o_ext = os.path.splitext(filename)[0]
s = filename_w_o_ext.split('.',1)
username = s[0]
repo_name = s[1]
url = 'https://github.com/{0}/{1}'.format(username, repo_name)
with open(target_readme_file_dir + filename, 'r', encoding='utf-8', errors='backslashreplace') as f:
try:
logging.info("Searching for candidate headings in file {0}".format(filename))
content = f.read()
# Perform abstraction on code section only before checking for potential headings
# This is to reduce possibility of code snippets starting with '#' being read as potential headings
# without changing genuine heading that happens to contain numbers or other things, e.g. '# Section 1'
content2 = abstract_out_code_section(content)
content_lines = content2.splitlines()
# Start at first nonempty line index
curr_filename_line_number = next(i for i, j in enumerate(content_lines) if j)
section_id = 1
while (curr_filename_line_number<len(content_lines)):
found_candidate_heading = False
line = content_lines[curr_filename_line_number]
if line.startswith('#'):
heading_level = len(re.search('^#+', line).group(0))
heading_markdown = line
found_candidate_heading = True
elif ((curr_filename_line_number<(len(content_lines)-1))
and (content_lines[curr_filename_line_number+1].startswith('---'))):
# H2 in underline markdown style
heading_level = 2
heading_markdown = '## ' + line
found_candidate_heading = True
# Skip next line (i.e. the underline)
curr_filename_line_number = curr_filename_line_number + 1
elif ((curr_filename_line_number<(len(content_lines)-1))
and (content_lines[curr_filename_line_number+1].startswith('==='))):
# H1 in underline markdown style
heading_level = 1
heading_markdown = '# ' + line
found_candidate_heading = True
# Skip next line (i.e. the underline)
curr_filename_line_number = curr_filename_line_number + 1
curr_filename_line_number = curr_filename_line_number + 1
# If heading is found
if found_candidate_heading:
logging.debug("Found candidate heading: {0}".format(line))
heading_text = extract_text_in_heading_markdown(heading_markdown)
abstracted_heading_markdown = abstract_text(heading_markdown).replace('\n', ' ').strip()
'''
Seems markdowner sometimes don't convert markdown reference-style link into HTML link if given only 1 line
(it gets converted if it's part of the text). Thus, manually apply regex to convert
any remaining markdown link
'''
abstracted_heading_markdown = re.sub('\[(.+)\]\[(.+)\]', r'@abstr_hyperlink', abstracted_heading_markdown)
logging.debug("After abstraction: {0}".format(abstracted_heading_markdown))
abstracted_heading_text = extract_text_in_heading_markdown(abstracted_heading_markdown)
overview = overview.append({'section_id':section_id, 'file_id':file_id, 'url':url, 'local_readme_file':filename,
'heading_markdown':heading_markdown,
'abstracted_heading_markdown':abstracted_heading_markdown, 'heading_text':heading_text,
'abstracted_heading_text':abstracted_heading_text,
'heading_level':heading_level}, ignore_index=True)
section_id = section_id + 1
except Exception as e:
logging.exception(e)
file_id = file_id + 1
conn = sqlite3.connect(db_filename)
try:
c = conn.cursor()
logging.info("Saving section overviews to database")
# Delete existing data
c.execute('DELETE FROM {0}'.format(overview_table_name))
conn.commit()
overview.to_sql(name='target_section_overview', con = conn, if_exists='replace', index=False)
conn.commit()
logging.info("Section headings loaded into database")
except Error as e:
logging.exception(e)
except Exception as e:
logging.exception(e)
finally:
conn.close()
return overview
def load_section_overview_from_csv(input_filename_csv, db_filename, target_overview_table_name):
df = pandas.read_csv(input_filename_csv, header=0, delimiter=',',
names=['section_id','file_id','url','heading_markdown','section_code'])
if sys.stdout.encoding != 'utf-8':
sys.stdout = codecs.getwriter('utf-8')(sys.stdout.buffer, 'strict')
if sys.stderr.encoding != 'utf-8':
sys.stderr = codecs.getwriter('utf-8')(sys.stderr.buffer, 'strict')
readme_file_generator = lambda x: x.replace('https://github.com/','').replace('/','.') + '.md'
df['local_readme_file'] = df['url'].apply(readme_file_generator)
df['heading_text'] = df['heading_markdown'].apply(extract_text_in_heading_markdown)
# In markdown, # = heading level 1, ## = heading level 2, etc.
df['heading_level'] = df['heading_markdown'].apply(lambda x : len(re.search('^#+', x).group(0)))
df['abstracted_heading_markdown'] = df['heading_markdown'].apply(lambda x : abstract_text(x).replace('\n', ' ').strip())
df['abstracted_heading_text'] = df['abstracted_heading_markdown'].apply(extract_text_in_heading_markdown)
# Don't convert to int, as data contains '-' for 'not in any class'
df['section_code'] = df['section_code'].apply(lambda x : merge_classes_1_and_2(x))
try:
logging.info('Emptying table and loading overviews')
conn = sqlite3.connect(db_filename)
# Delete existing data
c = conn.cursor()
c.execute('DELETE FROM {0}'.format(target_overview_table_name))
conn.commit()
df.to_sql(target_overview_table_name, conn, if_exists='append', index=False)
logging.info('Loading completed')
logging.info(df.shape)
logging.info('Deleting entries with only \'##\' as text')
# Delete '##' entries that correspond to horizontal lines and are all labeled as '-'
c.execute('DELETE FROM {0} WHERE heading_markdown=\'##\''.format(target_overview_table_name))
conn.commit()
except Exception as e:
logging.exception(e)
finally:
conn.close()
def delete_existing_section_content_data(temp_abstracted_markdown_file_dir, db_filename, section_content_table_name):
if (not temp_abstracted_markdown_file_dir.startswith('../../temp')):
logging.info('Please ensure that temp_abstracted_markdown_file_dir config variable is set correctly')
sys.exit()
else:
shutil.rmtree(temp_abstracted_markdown_file_dir)
os.mkdir(temp_abstracted_markdown_file_dir)
conn = sqlite3.connect(db_filename)
try:
c = conn.cursor()
logging.info("Cleaning existing data")
c.execute('DELETE FROM {0}'.format(section_content_table_name))
conn.commit()
except Error as e:
logging.exception(e)
except Exception as e:
logging.exception(e)
finally:
conn.close()
def retrieve_readme_filenames_from_db(db_filename, section_overview_table_name):
conn = sqlite3.connect(db_filename)
try:
c = conn.cursor()
logging.info("Fetching list of distinct filenames")
result = c.execute("""
SELECT DISTINCT local_readme_file
FROM {0}
ORDER BY file_id, section_id""".format(section_overview_table_name))
filenames = result.fetchall()
conn.commit()
except Error as e:
logging.exception(e)
except Exception as e:
logging.exception(e)
finally:
conn.close()
return filenames
|
[
"artha.prana@gmail.com"
] |
artha.prana@gmail.com
|
9154aa2b133d48042c380c3ba3ae1741743c911e
|
d8e0586e54fec1b8717caca960a565208185537d
|
/linecook/recipes/python.py
|
e1c5a1dc65d039ccb3f9d04c6a4258a8a54f212d
|
[
"BSD-3-Clause"
] |
permissive
|
tonysyu/linecook
|
2c6443e7cdeaf07e5653bd158fdcdc85d7ef59b9
|
fa7928347bb103d2ea1b25baefc23e5a11ff5d6c
|
refs/heads/master
| 2021-06-09T22:09:43.126376
| 2019-07-19T01:39:42
| 2019-07-19T01:39:42
| 134,798,271
| 0
| 0
|
NOASSERTION
| 2019-07-19T01:39:43
| 2018-05-25T03:24:41
|
Python
|
UTF-8
|
Python
| false
| false
| 951
|
py
|
# -*- coding: utf-8 -*-
"""
Example of linecook recipe for python code.
This is a toy example: Actual syntax highlighting isn't possible since linecook
doesn't (easily) store state between different lines, which prevents proper
highlighting of things like multi-line strings.
"""
from __future__ import unicode_literals
from toolz.functoolz import compose
from .. import patterns
from ..transforms import color_text, CountLines, partition
PYTHON_KEYWORDS = (
r"w:(False|class|finally|is|return|None|continue|for|lambda|try|True|def|"
r"from|nonlocal|while|and|del|global|not|with|as|elif|if|or|yield|assert|"
r"else|import|pass|break|except|in|raise)"
)
recipe = [
partition(
patterns.strings,
on_match=color_text(".*", color="yellow"),
on_mismatch=compose(
color_text(PYTHON_KEYWORDS, color="red"),
color_text(patterns.number, color="cyan"),
),
),
CountLines(),
]
|
[
"tsyu80@gmail.com"
] |
tsyu80@gmail.com
|
e54f4271094ab99694db7a64bcbaa40a2911fab0
|
8838d0c162be464ee46c3f8ef1b6c2b35c31aef1
|
/report/report_benef_inter.py
|
356c48263e0e2ca1011d49a1ae6eb58a5ba084da
|
[] |
no_license
|
crakkk/mcisogem_isa
|
d152c7ef8f459b728eb60fbbba3ea05169a49900
|
25281e7ffc6fe48f82c33477edf9501adb35b634
|
refs/heads/master
| 2020-12-03T20:15:34.356743
| 2016-09-10T21:27:42
| 2016-09-10T21:27:42
| 67,897,054
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,426
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
class report_benef_inter(osv.osv):
_name = "report.benef.inter"
_description = "Graphes sur les Bénéficiaires"
_auto = False
_columns = {
'intermediaire_id' : fields.many2one('mcisogem.courtier', 'Intermediaire', readonly=True),
'police_id': fields.many2one('mcisogem.police', 'Police', readonly=True),
'exercice_id': fields.many2one('mcisogem.exercice', 'Exercice', readonly=True),
'nbr_benef_total': fields.integer('Total Bénéficiaires', readonly=True)
}
_depends = {'mcisogem.courtier': ['id','name'] ,'mcisogem.police' : ['id','name'] , 'mcisogem.exercice' : ['id','name','date_debut','date_fin'] }
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_benef_inter')
cr.execute("""
create or replace view report_benef_inter as (
select
min(mcisogem_benef.id) AS id,
count(mcisogem_benef.id) as nbr_benef_total,
mcisogem_courtier.id as intermediaire_id,
mcisogem_exercice.id as exercice_id,
mcisogem_police.id as police_id
from
public.mcisogem_courtier, public.mcisogem_police, public.mcisogem_benef, public.mcisogem_exercice
where
mcisogem_courtier.id = mcisogem_police.courtier_id and mcisogem_exercice.id = mcisogem_police.exercice_id and mcisogem_police.id = mcisogem_benef.police_id
group by
mcisogem_courtier.id, mcisogem_exercice.id, mcisogem_police.id
)""")
|
[
"armand.kablan@smile.ci"
] |
armand.kablan@smile.ci
|
a13f11293b8781807d9414d5c6d272a2241cc593
|
71ef64cc45c3e6ac94d694f0d5ec4a5f2f52015c
|
/others/char_count.py
|
9cc9858964cfde79d6f938e429e10757694361dc
|
[] |
no_license
|
joyc/python-examples
|
a6dfcf53ba26f2002e7d97bb22be619702617e6e
|
ee02896cc8c900dc8879c8099a0f71d021a3aa98
|
refs/heads/master
| 2020-07-01T19:19:10.873250
| 2019-03-18T15:56:43
| 2019-03-18T15:56:43
| 74,264,555
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
mystring = 'hello'
char_count = dict()
for char in mystring:
count = char_count.get(char, 0)
count += 1
char_count[char] = count
print char_count
|
[
"pvh@sanbi.ac.za"
] |
pvh@sanbi.ac.za
|
02014af7fa8fae74ccbdbb56d8e4eaceebaacc21
|
0e3e1644bfc5b58977484ae88a75cac6b64f0ced
|
/scripts/optimizeBtagEff.py
|
25b0330294fa56d201b3668df159b94261820e86
|
[] |
no_license
|
mdunser/topskim
|
c05f998744acdb6cbde01489e40b7543522bf2e5
|
a71ea05e2feabf5fffcb28a3de0ca44a5526efe4
|
refs/heads/marc_dev
| 2020-04-30T02:03:08.740389
| 2019-10-25T15:53:39
| 2019-10-25T15:53:39
| 176,547,551
| 0
| 0
| null | 2020-01-08T10:56:17
| 2019-03-19T15:49:17
|
Python
|
UTF-8
|
Python
| false
| false
| 7,144
|
py
|
import ROOT
import sys
import numpy as np
from optimizeLeptonIsolation import canvasHeader
def getEfficiency(url,flavList,ptRange):
t=ROOT.TChain('tree')
t.AddFile(url)
baseHistos={'csv' : ROOT.TH1F('csv',';CSV;Jets',50,0,1),
'csvdisc' : ROOT.TH1F('csv',';CSV;Jets',20,0,1),
'pt' : ROOT.TH1F('pt',';Transverse momentum [GeV];Jets',20,30,250),
'eta' : ROOT.TH1F('eta',';Pseudo-rapidity;Jets',20,0,2.0),
'cent' : ROOT.TH1F('centrality',';Centrality;Jets',10,0,100) }
histos={}
for key in baseHistos:
for pfix in ['den','num','numtight']:
histos[key+'_'+pfix]=baseHistos[key].Clone(key+'_'+pfix)
histos[key+'_'+pfix].SetDirectory(0)
histos[key+'_'+pfix].Sumw2()
for i in range(t.GetEntries()):
t.GetEntry(i)
ncoll=t.ncollWgt
for j in range(t.nbjet):
genpt=t.bjet_genpt[j]
geneta=t.bjet_geneta[j]
if(abs(geneta)>2.0) : continue
if genpt<ptRange[0] : continue
if genpt>ptRange[1] : continue
bflav=t.bjet_flavorB[j]
if not abs(bflav) in flavList : continue
csvVal=min(1.,max(0.,t.bjet_csvv2[j]))
#for efficiency extraction
xbin=histos['csv_num'].GetXaxis().FindBin(csvVal)
for ix in range(histos['csv_num'].GetNbinsX()+1):
xcen=histos['csv_num'].GetXaxis().GetBinCenter(ix+1)
if ix<xbin:
histos['csv_num'].Fill(xcen,ncoll)
histos['csv_den'].Fill(xcen,ncoll)
histos['csvdisc_den'].Fill(csvVal,ncoll)
histos['pt_den'].Fill(genpt,ncoll)
histos['eta_den'].Fill(abs(geneta),ncoll)
histos['cent_den'].Fill(t.cenbin,ncoll)
if csvVal>0.81:
histos['csvdisc_num'].Fill(csvVal,ncoll)
histos['pt_num'].Fill(genpt,ncoll)
histos['eta_num'].Fill(abs(geneta),ncoll)
histos['cent_num'].Fill(t.cenbin,ncoll)
if csvVal>0.91:
histos['csvdisc_numtight'].Fill(csvVal,ncoll)
histos['pt_numtight'].Fill(genpt,ncoll)
histos['eta_numtight'].Fill(abs(geneta),ncoll)
histos['cent_numtight'].Fill(t.cenbin,ncoll)
gr_eff=ROOT.TGraphAsymmErrors()
gr_eff.Divide(histos['csv_num'],histos['csv_den'])
return gr_eff,histos
def showEfficiencyCurves(grColl,name):
c=ROOT.TCanvas('c','c',500,500)
c.SetTopMargin(0.05)
c.SetLeftMargin(0.12)
c.SetRightMargin(0.03)
c.SetBottomMargin(0.1)
c.SetGridy()
c.SetLogy()
mg=ROOT.TMultiGraph()
for g in grColl: mg.Add(g,'p')
mg.Draw('ap')
mg.GetXaxis().SetTitle('CSV')
mg.GetYaxis().SetTitle('Efficiency')
mg.GetXaxis().SetRangeUser(0,1)
mg.GetYaxis().SetRangeUser(1e-4,1)
leg=c.BuildLegend(0.15,0.15,0.4,0.3)
leg.SetFillStyle(0)
leg.SetBorderSize(0)
leg.SetTextFont(42)
leg.SetTextSize(0.035)
l=ROOT.TLine()
l.SetLineStyle(0)
l.SetLineColor(ROOT.kGray+1)
for y in [0.001,0.01,0.1]:
l.DrawLine(0,y,1,y)
canvasHeader()
for ext in ['png','pdf']:
c.SaveAs('%s.%s'%(name,ext))
def showHistos(histos,flav):
c=ROOT.TCanvas('c','c',500,500)
c.SetTopMargin(0.05)
c.SetLeftMargin(0.12)
c.SetRightMargin(0.03)
c.SetBottomMargin(0.1)
c.SetGridy()
c.SetLogy()
for key in histos:
if '_num' in key: continue
frame=histos[key].Clone('frame')
frame.Reset('ICE')
frame.GetYaxis().SetTitle('Efficiency or PDF')
frame.GetYaxis().SetRangeUser(1e-3,1)
frame.Draw()
gr_eff=ROOT.TGraphAsymmErrors()
gr_eff.SetMarkerStyle(20)
gr_eff.Divide(histos[key.replace('_den','_num')],histos[key])
gr_efftight=ROOT.TGraphAsymmErrors()
gr_efftight.SetMarkerStyle(24)
gr_efftight.SetMarkerColor(ROOT.kGray+1)
gr_efftight.SetLineColor(ROOT.kGray+1)
gr_efftight.Divide(histos[key.replace('_den','_numtight')],histos[key])
histos[key].Scale(1./histos[key].Integral())
histos[key].Draw('histsame')
histos[key].SetFillStyle(1001)
histos[key].SetFillColor(ROOT.kGray)
histos[key].SetLineColor(1)
gr_eff.Draw('p')
gr_efftight.Draw('p')
if flav!='b':
leg=ROOT.TLegend(0.65,0.93,0.95,0.75)
else:
leg=ROOT.TLegend(0.65,0.75,0.95,0.5)
leg.SetFillStyle(0)
leg.SetBorderSize(0)
leg.SetTextFont(42)
leg.AddEntry(gr_eff,'Loose w.p.','ep')
leg.AddEntry(gr_efftight,'Tight w.p.','ep')
leg.AddEntry(histos[key],'Distribution','l')
leg.Draw()
canvasHeader(extraTxt=[flav])
c.RedrawAxis()
c.Modified()
c.Update()
for ext in ['png','pdf']:
c.SaveAs('%s_%s.%s'%(key,flav,ext))
frame.Delete()
gr_eff.Delete()
def main():
ROOT.gROOT.SetBatch(True)
ROOT.gStyle.SetOptStat(0)
ROOT.gStyle.SetOptTitle(0)
baseDir=sys.argv[1]
plottag=''
if len(sys.argv)>2:
plottag=sys.argv[2]
mixSig='TTJets_TuneCP5_HydjetDrumMB_5p02TeV-amcatnloFXFX-pythia8.root'
ppSample='/eos/cms/store/cmst3/group/hintt/PbPb2018_skim27Apr/TT_TuneCP5_5p02TeV-powheg-pythia8.root'
ptRange=[30,500]
#get the efficiency curves
csv={}
for tag,url,flavList,ms,ci in [ ('b', baseDir+mixSig, [5], 20, 1),
('udsg', baseDir+mixSig, [1,2,3,21], 24, 1),
('unmatched', baseDir+mixSig, [0], 21, 1),
]:
csv[tag],histos=getEfficiency(url,flavList,ptRange)
csv[tag].SetTitle(tag)
csv[tag].SetMarkerStyle(ms)
csv[tag].SetLineColor(ci)
csv[tag].SetMarkerColor(ci)
showHistos(histos,tag)
#tune the working point
wpEff=0.05
wpEff=0.01
bestCut=0.0
bestEff=csv['udsg'].Eval(bestCut)
for x in np.arange(bestCut,1,0.01):
eff=max(csv['udsg'].Eval(x),csv['unmatched'].Eval(x))
if abs(eff-wpEff)>abs(bestEff-wpEff): continue
bestCut=x
bestEff=eff
print '<'*50
print 'csv>',bestCut
for tag in csv:
print tag,'eff=',csv[tag].Eval(bestCut)
print '<'*50
#compare the curves
for name,grNames in [ ('csveff', ['b', 'udsg', 'unmatched']),
#('csveff', ['b', 'b (pp)', 'udsg', 'udsg (pp)', 'unmatched', 'unmatched (pp)']),
('beff', ['b', 'b (0-30)', 'b (30-100)']),
('udsgeff', ['udsg', 'udsg (0-30)', 'udsg (30-100)']),
('unmatchedeff', ['unmatched', 'unmatched (0-30)', 'unmatched (30-100)']),
]:
showEfficiencyCurves(grColl=[csv[x].Clone() for x in grNames],name=name+plottag)
if __name__ == "__main__":
main()
|
[
"psilva@cern.ch"
] |
psilva@cern.ch
|
531af9c3a0cdf5bc80b86783dc49f62e0cfb097b
|
f8a3662d17abc8f54dc563faafabfc57e7f414de
|
/13-handles-json-file-with-pandas/analysis.py
|
3858f6abf09d18e0b9499a11c5eceebd383d2b27
|
[] |
no_license
|
zengfhui/practice
|
d5f6f5c2da4dd60c549e510476ee7e984280c1e6
|
896d8ed15819deee0117da261382ebf5afdb75a6
|
refs/heads/master
| 2021-08-19T08:39:58.245583
| 2017-11-25T15:01:34
| 2017-11-25T15:01:34
| 109,463,260
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
import json
import pandas as pd
from pandas import Series,DataFrame
def analysis(file,user_id):
times = 0
minutes = 0
try:
df = pd.read_json(file)
df1 = df[df['user_id'] == user_id]
times = df1['minutes'].count()
minutes = df1['minutes'].sum()
except:
times = 0
minutes = 0
return times,minutes
|
[
"1115342730@qq.com"
] |
1115342730@qq.com
|
811b214c40bcecffe5cf15cae4e7d2bd376c6213
|
fb663edbb89a72152b416d4a67352fa750f2d00b
|
/5/py14.py
|
149ab46cb6e525e7cb6f5c892d7ebbb8e08e4a47
|
[] |
no_license
|
eep0x10/Pyton-Basics
|
1260396a395b47f6c6778e604a4ec614b6dd4381
|
607db6349854239702c64078c8b4892af7d75fcb
|
refs/heads/master
| 2022-02-22T21:11:53.572887
| 2019-08-26T18:08:36
| 2019-08-26T18:08:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 580
|
py
|
"""
Quadrado mágico. Um quadrado mágico é aquele dividido em linhas e colunas, com um número em cada posição e no qual a soma das linhas, colunas e diagonais é a mesma. Por exemplo, veja um quadrado mágico de lado 3, com números de 1 a 9:
8 3 4
1 5 9
6 7 2
Elabore uma função que identifica e mostra na tela todos os quadrados mágicos com as características acima. Dica: produza todas as combinações possíveis e verifique a soma quando completar cada quadrado. Usar um vetor de 1 a 9 parece ser mais simples que usar uma matriz 3x3.
"""
|
[
"ericepaulo@hotmail.com"
] |
ericepaulo@hotmail.com
|
c301c003b252af8aab7125ac03bb7faed2de4675
|
0fc1bc9f5cf22c99cd5c73cfeaa986f5bd454541
|
/djangoProject6/wiki/urls.py
|
a5280cedbcc3eec44b75e8552d3e079a586fade4
|
[] |
no_license
|
sahil1-hue/wiki-v2
|
bff178417f844d8879d6fa07d5c64d7e3e20ee18
|
e68187a8d43a865db7ab7b709af9cd067e1a6d50
|
refs/heads/main
| 2023-04-01T18:11:49.490361
| 2021-04-05T15:46:46
| 2021-04-05T15:46:46
| 354,885,849
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 108
|
py
|
from django.urls import path
from .views import*
urlpatterns = [
path('', wikiSearch,name="wikiHome"),
]
|
[
"79180872+sahil1-hue@users.noreply.github.com"
] |
79180872+sahil1-hue@users.noreply.github.com
|
412510d4ed717d689c2bceec78aa82ed7e0d9782
|
e6dab5aa1754ff13755a1f74a28a201681ab7e1c
|
/.parts/lib/django-1.2/tests/urls.py
|
b3d291b5731dc752477c54b07af48367fb2543bd
|
[] |
no_license
|
ronkagan/Euler_1
|
67679203a9510147320f7c6513eefd391630703e
|
022633cc298475c4f3fd0c6e2bde4f4728713995
|
refs/heads/master
| 2021-01-06T20:45:52.901025
| 2014-09-06T22:34:16
| 2014-09-06T22:34:16
| 23,744,842
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 79
|
py
|
/home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.2/tests/urls.py
|
[
"ron.y.kagan@gmail.com"
] |
ron.y.kagan@gmail.com
|
0c0bfb0727717ecdc27c2176203461c375bd6521
|
019c050973129b153ae0c026e05e1c29b4891d34
|
/bravo/tests/test_region.py
|
ffdcbda40938a6b7fe985e7c5278e3b8230fa1c8
|
[
"MIT"
] |
permissive
|
justinnoah/bravo
|
c78f0d218c50af6a78fb30abb0f8a709c9edcec6
|
29c70ece54ac249584cba8279504b1b3a1a00a56
|
refs/heads/master
| 2021-01-15T20:29:40.760763
| 2013-01-02T07:35:55
| 2013-01-02T07:35:55
| 6,447,010
| 0
| 0
|
NOASSERTION
| 2020-01-28T21:17:23
| 2012-10-29T20:23:24
|
Python
|
UTF-8
|
Python
| false
| false
| 480
|
py
|
from unittest import TestCase
from tempfile import NamedTemporaryFile
from twisted.python.filepath import FilePath
from bravo.region import Region
class TestRegion(TestCase):
def setUp(self):
self.temp = NamedTemporaryFile()
self.fp = FilePath(self.temp.name)
self.region = Region(self.fp)
def test_trivial(self):
pass
def test_create(self):
self.region.create()
self.assertEqual(self.temp.read(), "\x00" * 8192)
|
[
"MostAwesomeDude@gmail.com"
] |
MostAwesomeDude@gmail.com
|
cb32a6f650e8ab6177d30e164cd632cff236167e
|
5dccf7ec817ee272694d8cda837ff4a781e31549
|
/Fraction-Decimal_Conversion.py
|
a535a1179ebf69d565c90af8461132dcee54f41c
|
[] |
no_license
|
lvsciagin/M-Soma-Python-Bootcamp
|
f9fa4d2c853ff127ac32e17d6e283a930171c326
|
d830772c6806adf75fcc61f5ce27ef049f19c075
|
refs/heads/master
| 2021-06-05T01:13:19.103378
| 2020-05-29T12:37:52
| 2020-05-29T12:37:52
| 94,984,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 74
|
py
|
for i in range(2, 10):
print ("1/" + str(i) + " = " + str(1.0/i))
|
[
"lvsciagin@gmail.com"
] |
lvsciagin@gmail.com
|
7dd2bc8ccbbff0da11a6f065e1b1f51172604fa1
|
c13cbf814ee1ba55aa59ea888f0ca5dcbd80b595
|
/ecoSys(complete).spec
|
1124b97f548a98415b00cfdfe225a8a4a7e6dafc
|
[] |
no_license
|
donganhxauxi/ecoSys
|
23bdc1c44d826802603bd82bfdd0856bbe61883b
|
b9eabee261ed42a2f3f1e174cbf44bb892e61b33
|
refs/heads/main
| 2023-03-18T12:01:28.322116
| 2021-03-04T02:00:45
| 2021-03-04T02:00:45
| 344,319,187
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,052
|
spec
|
# -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['ecoSys(complete).py'],
pathex=['C:\\Users\\ASUS\\Desktop\\Ecosys'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
[],
exclude_binaries=True,
name='ecoSys(complete)',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=False )
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='ecoSys(complete)')
|
[
"noreply@github.com"
] |
donganhxauxi.noreply@github.com
|
3e1f44de3ff12e43a9e423fa4fb1515a008a091a
|
a7a44019214dc7bea530a8c738b9a0901aa5d965
|
/DjangoPython/app/firstapp/views.py
|
7041eebd0b541c09054ce5382a313c86af82b485
|
[] |
no_license
|
loretavila1/djangoAPI
|
574a1abc3dab0313457818e5e9cda205ebc66cad
|
b30e918d39941bc33bb1cafd2dd301537d6bfb97
|
refs/heads/main
| 2023-04-12T16:15:13.259775
| 2021-04-28T19:07:39
| 2021-04-28T19:07:39
| 362,538,943
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,583
|
py
|
# Create your views here.
#IMPORT models
#IMPORT LIBRARIRES/FUNCTIONS
from django.shortcuts import render , HttpResponse
from django.http import JsonResponse
import json
#IMPORT DJANGO PASSWORD HASH GENERATOR AND COMPARE
from django.contrib.auth.hashers import make_password, check_password
from .models import Cellphones, Companies
#check_password(noHashPassword,HashedPassword) this funcion validate if the password match to the hash
#def vista(request):
# return render(request,'clase.html')
def vista(request):
#https://docs.djangoproject.com/en/3.0/ref/templates/language/#templates
return render(request, 'clase.html', {'title': "Gran Kanán" })
def vista2(request):
#https://docs.djangoproject.com/en/3.0/ref/templates/language/#templates
return render(request, 'dos.html', {'title': "Gran Kanán" })
def cellphones(request):
if request.method == 'GET':
responseData = {}
responseData['success'] = 'true'
responseData['data'] = list(Cellphones.objects.all().values())
return JsonResponse(responseData, status=200)
else:
responseData = {}
responseData['success'] = 'false'
responseData['mesage'] = 'Wrong Method'
return JsonResponse(responseData, status=400)
def cellphonesAdd(request):
if request.method == 'POST':
try:
json_object = json.loads(request.body)
newCellphone = Cellphones(name=json_object['phone_name'], brand=json_object['brand_name'], color=json_object['phone_color'], company=json_object['phone_company_id'])
#INSERT INTO dogs (name, type_id,color,size) values ('Solovino',4,'black','big')
newCellphone.save()
responseData = {}
responseData['success'] = 'true'
responseData['message'] = 'Cellphone inserted'
return JsonResponse(responseData, status=200)
except ValueError as e:
responseData = {}
responseData['success'] = 'false'
responseData['message'] = 'Invalid Json'
return JsonResponse(responseData, status=400)
else:
responseData = {}
responseData['success'] = 'false'
responseData['mesage'] = 'Wrong Method'
return JsonResponse(responseData, status=400)
def cellphoneDelete(request):
if request.method == 'DELETE':
try:
json_object = json.loads(request.body)
try:
one_entry = Cellphones.objects.get(id=json_object["phone_id"])
except:
responseData = {}
responseData['success'] = 'false'
responseData['message'] = 'The phone_id its not valid'
return JsonResponse(responseData, status=400)
Cellphones.objects.filter(id=json_object["phone_id"]).delete()
responseData = {}
responseData['success'] = 'true'
responseData['message'] = 'The Phone has been deleted'
return JsonResponse(responseData, status=200)
except ValueError as e:
responseData = {}
responseData['success'] = 'false'
responseData['data'] = 'Invalid Json'
return JsonResponse(responseData, status=400)
else:
responseData = {}
responseData['success'] = 'false'
responseData['mesage'] = 'Wrong Method'
return JsonResponse(responseData, status=400)
def cellphonesGet(request):
if request.method == 'POST':
try:
json_object = json.loads(request.body)
try:
one_entry = Cellphones.objects.get(id=json_object["phone_id"])
except:
responseData = {}
responseData['success'] = 'false'
responseData['message'] = 'The phone_id its not valid'
return JsonResponse(responseData, status=400)
responseData = {}
responseData['success'] = 'true'
responseData['data'] = {}
responseData['data']['name'] = one_entry.name
responseData['data']['brand'] = one_entry.brand
responseData['data']['color'] = one_entry.color
responseData['data']['company'] = one_entry.company
return JsonResponse(responseData, status=200)
except ValueError as e:
responseData = {}
responseData['success'] = 'false'
responseData['data'] = 'Invalid Json'
return JsonResponse(responseData, status=400)
else:
responseData = {}
responseData['success'] = 'false'
responseData['mesage'] = 'Wrong Method'
return JsonResponse(responseData, status=400)
def cellphonesGetId(request, phoneid):
if request.method == 'GET':
try:
one_entry = Cellphones.objects.get(id=phoneid)
except:
responseData = {}
responseData['success'] = 'false'
responseData['message'] = 'The phone_id its not valid'
return JsonResponse(responseData, status=400)
responseData = {}
responseData['success'] = 'true'
responseData['data'] = {}
responseData['data']['name'] = one_entry.name
responseData['data']['brand'] = one_entry.brand
responseData['data']['color'] = one_entry.color
responseData['data']['company'] = one_entry.company
return JsonResponse(responseData, status=200)
else:
responseData = {}
responseData['success'] = 'false'
responseData['mesage'] = 'Wrong Method'
return JsonResponse(responseData, status=400)
def cellphonesUpdate(request, phoneid):
if request.method == 'POST':
try:
one_entry = Cellphones.objects.get(id=phoneid)
except:
responseData = {}
responseData['success'] = 'false'
responseData['message'] = 'The phone_id its not valid'
return JsonResponse(responseData, status=400)
try:
json_object = json.loads(request.body)
contador = 0
#AQUI VA EL CODIGO DEL UPDATE
try:
value = json_object["phone_name"]
Cellphones.objects.filter(id=phoneid).update(name=json_object["phone_name"])
contador = contador + 1
except KeyError:
responseData = {}
try:
value = json_object["brand_name"]
Cellphones.objects.filter(id=phoneid).update(brand=json_object["brand_name"])
contador = contador + 1
except KeyError:
responseData = {}
try:
value = json_object["phone_color"]
Cellphones.objects.filter(id=phoneid).update(color=json_object["phone_color"])
contador = contador + 1
except KeyError:
responseData = {}
try:
value = json_object["phone_company_id"]
Cellphones.objects.filter(id=phoneid).update(company=json_object["phone_company_id"])
contador = contador + 1
except KeyError:
responseData = {}
if contador == 0:
responseData = {}
responseData['success'] = 'false'
responseData['message'] = 'Nada por actualizar'
return JsonResponse(responseData, status=400)
else:
responseData = {}
responseData['success'] = 'true'
responseData['message'] = 'Datos actualizados'
return JsonResponse(responseData, status=200)
except ValueError as e:
responseData = {}
responseData['success'] = 'false'
responseData['data'] = 'Invalid Json'
return JsonResponse(responseData, status=400)
else:
responseData = {}
responseData['success'] = 'false'
responseData['mesage'] = 'Wrong Method'
return JsonResponse(responseData, status=400)
def companies(request):
if request.method == 'GET':
responseData = {}
responseData['success'] = 'true'
responseData['data'] = list(Companies.objects.all().values())
return JsonResponse(responseData, status=200)
else:
responseData = {}
responseData['success'] = 'false'
responseData['mesage'] = 'Wrong Method'
return JsonResponse(responseData, status=400)
|
[
"loret.avila0308@gmail.com"
] |
loret.avila0308@gmail.com
|
392de31fc5ee9c7578c078ae0f7d99e76cdbc3b1
|
9f4b2c058f2c56e2c27004ee9446bd30c3579021
|
/info/utils.py
|
e78bab87054c52706adfb6c393ce004a4f4f6874
|
[
"MIT"
] |
permissive
|
VoltK/COVID19-HelpLine
|
55806a8e4b80bcaaeb0ccbca08346fc58f299a38
|
1ed76740dfde49cc5832a41a7e96b8df917d9b23
|
refs/heads/master
| 2021-03-24T06:31:07.293456
| 2020-03-16T22:39:29
| 2020-03-16T22:39:29
| 247,526,017
| 0
| 0
| null | 2020-03-15T19:16:52
| 2020-03-15T18:25:42
|
Python
|
UTF-8
|
Python
| false
| false
| 2,070
|
py
|
import pandas as pd
from datetime import datetime, timedelta
from urllib.error import HTTPError
import numpy as np
def load_csv():
date = datetime.now().strftime("%m-%d-%Y")
counter = 1
while True:
try:
url = f'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{date}.csv'
df = pd.read_csv(url)
return df
except HTTPError:
print("Report wasn't found moving one day back...")
date = (datetime.now() - timedelta(counter)).strftime("%m-%d-%Y")
counter += 1
def load_data():
df = load_csv()
df['Last Update'] = df['Last Update'].apply(lambda x: x.split("T")[0])
df = df.replace(np.nan, '', regex=True)
locations = list(df['Country/Region'].unique()) + list(df['Province/State'].unique())
return df, locations
def get_data_bas_location(location, df):
if location == "US" or location == "China":
return ["", location, ""] + list(
df[df['Country/Region'].str.contains(location)].groupby("Country/Region").sum().values[0])
try:
dd = df[df['Province/State'].str.contains(location)].values[0]
except:
dd = df[df['Country/Region'].str.contains(location)].values[0]
return list(dd)
def generate_message_from_row(row):
message = f'As of {datetime.now().strftime("%B %d, %Y")} In {row[0]} {row[1]} there are currenty \n' \
f'{row[3]} confirmed, \n' \
f'{row[5]} Recovered \n' \
f'and {row[4]} Deaths'
message = message.replace(" ", " ")
return message
def handle_message(location):
df, locations = load_data()
if location in locations:
#rows = get_data_bas_location(location, df)
row = get_data_bas_location(location, df)
#msg_out = "\n\n".join(generate_message_from_row(row) for row in rows)
msg_out = generate_message_from_row(row)
else:
msg_out = "There is no data for this location or check you spelling"
return msg_out
|
[
"khud44@icloud.com"
] |
khud44@icloud.com
|
c21d31b8f33523c994753df77d80db37a0c76812
|
d8d9c24d61a90b338dfc78ffe098deb5209aa9fc
|
/core/basics/cards.py
|
b905eac587ff3b9f1ca799bc9566b5d186cf41f8
|
[
"MIT"
] |
permissive
|
Maaack/ClusterBuster
|
8430d934c3f775fec570965fa54f10e39a8bdfd5
|
6e13cce6f8c0e622dfc087bffc4379adcfbc84f2
|
refs/heads/master
| 2020-03-25T09:12:59.061889
| 2019-04-29T01:13:42
| 2019-04-29T01:13:42
| 143,654,015
| 1
| 0
| null | 2019-04-29T01:13:43
| 2018-08-05T23:09:05
|
Python
|
UTF-8
|
Python
| false
| false
| 3,054
|
py
|
import random
from abc import ABC, abstractmethod
from .generic import ObjectList
class Card(object):
all = []
def __init__(self, value=None):
self.value = value if value else ""
self.id = len(self.all) + 1
self.all.append(self)
def __eq__(self, other):
if not isinstance(other, Card):
return False
return self.value == other.value
def __str__(self):
return "Card " + str(self.id)
def __repr__(self):
return 'Card({!r})'.format(self.value)
class CardStack(ObjectList):
def __init__(self):
super(CardStack, self).__init__(Card)
class Deck(CardStack):
all = []
def __init__(self):
self.id = len(self.all) + 1
self.all.append(self)
super(Deck, self).__init__()
def shuffle(self):
random.shuffle(self)
def draw(self, number=1):
number = min(number, len(self))
if number <= 0:
return
if number == 1:
return self.pop()
elif number >1:
cards = CardStack()
for i in range(number):
cards.append(self.pop())
return cards
class AbstractDeckParameters(ABC):
def __init__(self):
self.parameters = {}
self.set_default_parameters()
def __getattr__(self, item):
try:
return self.parameters[item]
except KeyError:
pass
raise AttributeError
@abstractmethod
def set_default_parameters(self):
pass
class AbstractDeckBuilder(ABC):
@staticmethod
@abstractmethod
def build_deck(parameters=None):
pass
class PatternDeckParameters(AbstractDeckParameters):
def set_default_parameters(self):
self.parameters['options'] = [1, 2, 3, 4]
self.parameters['spots'] = 3
class PatternDeckBuilder(AbstractDeckBuilder):
@staticmethod
def build_deck(parameters=None):
if parameters is None:
parameters = PatternDeckParameters()
return PatternDeckBuilder.__build_pattern_deck(parameters.options, parameters.spots, [])
@staticmethod
def __build_pattern_deck(options, open_spots, card_values=None):
"""
@type options: list
@type open_spots: number
@type card_values: list
"""
if card_values is None:
card_values = []
deck = Deck()
if open_spots > 0 and len(options) > 0:
for option in options:
new_card_values = card_values.copy()
new_card_values.append(option)
remaining_options = options.copy()
remaining_spots = open_spots - 1
try:
remaining_options.remove(option)
except ValueError:
continue
deck.extend(PatternDeckBuilder.__build_pattern_deck(remaining_options, remaining_spots, new_card_values))
else:
card = Card(card_values)
deck.append(card)
return deck
|
[
"marek.belski@gmail.com"
] |
marek.belski@gmail.com
|
4352e7dcc76837ca9b89c65f62a1d42c6bbf2315
|
6d27bc7c53cbbad8ee65d833c7736547427e3034
|
/01-webotron/webotron/__init__.py
|
893f8d56323034ba7d8f2cf6f7c75ab7da295219
|
[] |
no_license
|
perspicacity-net/automating-aws-with-python
|
9fe40eea8e7b9dd8678040607af8f686f9db7bcb
|
b6f4224bed176cbf95ae194b067da800549d5867
|
refs/heads/master
| 2023-02-20T13:29:42.170223
| 2022-01-21T20:30:22
| 2022-01-21T20:30:22
| 220,298,932
| 0
| 0
| null | 2023-02-10T23:11:54
| 2019-11-07T18:03:41
|
Python
|
UTF-8
|
Python
| false
| false
| 59
|
py
|
"""Webotron script and modules. Deploy websites to S3."""
|
[
"57500925+perspicacity-net@users.noreply.github.com"
] |
57500925+perspicacity-net@users.noreply.github.com
|
27d35d6be0b6d0eb60aed7f70b25ad30517b0e78
|
a8e47b1c682d89708f2ab7b21128aa30a8d1e031
|
/pages/base_page.py
|
b30af9e0641bdf01bc42a53d6e77652b689fe876
|
[] |
no_license
|
snlnrush/stepik-final-project-test-automation
|
77df29135c2503825acff3d87182a3ea3dd4270a
|
f4f68742a117027017e7309daf8245923a7ca510
|
refs/heads/main
| 2023-06-27T19:45:36.261897
| 2021-07-28T07:15:37
| 2021-07-28T07:15:37
| 380,244,290
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,452
|
py
|
import math
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from .locators import BasePageLocators
class BasePage:
def __init__(self, browser, url, timeout=10):
self.browser = browser
self.url = url
self.browser.implicitly_wait(timeout)
def go_to_login_page(self):
login_link = self.browser.find_element(*BasePageLocators.LOGIN_LINK_INVALID)
login_link.click()
def should_be_login_link(self):
assert self.is_element_present(*BasePageLocators.LOGIN_LINK), "Login link is not presented"
def open(self):
self.browser.get(self.url)
def is_element_present(self, how, what):
try:
self.browser.find_element(how, what)
except NoSuchElementException:
return False
return True
def solve_quiz_and_get_code(self):
alert = self.browser.switch_to.alert
x = alert.text.split(" ")[2]
answer = str(math.log(abs((12 * math.sin(float(x))))))
alert.send_keys(answer)
alert.accept()
try:
alert = self.browser.switch_to.alert
alert_text = alert.text
print(f"Your code: {alert_text}")
alert.accept()
except NoAlertPresentException:
print("No second alert presented")
def is_not_element_present(self, how, what, timeout=4):
try:
WebDriverWait(self.browser, timeout).until(EC.presence_of_element_located((how, what)))
except TimeoutException:
return True
return False
def is_disappeared(self, how, what, timeout=4):
try:
WebDriverWait(self.browser, timeout, 1, TimeoutException).until_not(EC.presence_of_element_located((how, what)))
except TimeoutException:
return False
return True
def go_to_basket_page(self):
basket = self.browser.find_element(*BasePageLocators.BASKET_LINK)
basket.click()
def should_be_authorized_user(self):
assert self.is_element_present(*BasePageLocators.USER_ICON), "User icon is not presented," \
" probably unauthorised user"
|
[
"cross-fire@list.ru"
] |
cross-fire@list.ru
|
0514e1e6255a5659438028fdc92c7b2a42d06e55
|
1d890872181eb88a8e5e3e8e19baa633dd6a06ec
|
/tools/fetch_allowed_addons.py
|
065e490261112e7bc663c7a54f947236a297ff09
|
[] |
no_license
|
JeremyRand/tor-browser-build
|
afc45eee15c390549f3bd036cee5108521af6ddf
|
41d0330f0741d6a78fcea050352f03002ca8520e
|
refs/heads/master
| 2023-03-04T10:26:39.539400
| 2020-12-10T18:30:36
| 2020-12-10T18:30:36
| 320,384,168
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,376
|
py
|
#!/usr/bin/env python3
import urllib.request
import json
import base64
import sys
def fetch(x):
with urllib.request.urlopen(x) as response:
return response.read()
def find_addon(addons, addon_id):
results = addons['results']
for x in results:
addon = x['addon']
if addon['guid'] == addon_id:
return addon
sys.exit("Error: cannot find addon " + addon_id)
def fetch_and_embed_icons(addons):
results = addons['results']
for x in results:
addon = x['addon']
icon_data = fetch(addon['icon_url'])
addon['icon_url'] = 'data:image/png;base64,' + str(base64.b64encode(icon_data), 'utf8')
def patch_https_everywhere(addons):
addon = find_addon(addons, 'https-everywhere@eff.org')
addon['guid'] = 'https-everywhere-eff@eff.org'
addon['url'] = 'https://www.eff.org/https-everywhere'
def main(argv):
amo_collection = argv[0] if argv else '83a9cccfe6e24a34bd7b155ff9ee32'
url = 'https://addons.mozilla.org/api/v4/accounts/account/mozilla/collections/' + amo_collection + '/addons/'
data = json.loads(fetch(url))
fetch_and_embed_icons(data)
patch_https_everywhere(data)
data['results'].sort(key=lambda x: x['addon']['guid'])
find_addon(data, '{73a6fe31-595d-460b-a920-fcc0f8843232}') # Check that NoScript is present
print(json.dumps(data, indent=2, ensure_ascii=False))
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"acat@torproject.org"
] |
acat@torproject.org
|
381c8951f2181d89b056a8405f2b0d19698cbd03
|
a08d9599da8efdcf7992551851d85f76119d384d
|
/main.py
|
f2a20f7f3ee6ed50201f6f296209e2088bb728e4
|
[] |
no_license
|
samloik/yandex
|
87afb5d5f21d0ce1f298beb0ca2f0dc826bccec3
|
98e10fa8fc5c03bbd8b3b467e299f16a6f22ab56
|
refs/heads/master
| 2023-08-27T06:17:24.568564
| 2021-10-25T00:39:17
| 2021-10-25T00:39:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 979
|
py
|
import sys
Rn = { }
oSum = {}
def find_min_R(min, max):
if min < max:
if Rn[min] < Rn[max]:
if Rn[max-1] < Rn[max]:
find_min_R(min, max - 1)
oSum[max] = oSum[max-1] + 500
return
elif Rn[max-1] == Rn[max]:
find_min_R(min, max-1)
oSum[max] = oSum[max-1]
return
else:
find_min_R(min, max-1)
oSum[max] = oSum[max-1] - 500
return
elif Rn[min] > Rn[max]:
if Rn[min] > Rn[min+1]:
find_min_R(min+1, max)
oSum[min] = oSum[min+1] + 500
return
elif Rn[min] == Rn[min+1]:
find_min_R(min+1, max)
oSum[min] = oSum[min+1]
return
else:
find_min_R(min+1, max)
oSum[min] = oSum[min+1]- 500
return
else:
find_min_R(min + 1, max)
oSum[min] = oSum[min+1]
return
else:
oSum[min] = 500
return
j = input().split()
Rmin = 0
Rmax = int(j[0])
for N in range(Rmax):
Rn[N] = input().split()
for i in range(Rmax):
oSum[i] = 0
find_min_R(Rmin,Rmax-1)
print( sum(oSum.values()) )
|
[
"alexey.loik@mail.ru"
] |
alexey.loik@mail.ru
|
18f08b116bda4773773bd7d94ab7eabefb2bdc0f
|
647366e0bdaa79972731a87fe7da58a4af2703d7
|
/BlackScreen.py
|
85207b3bfbb3e23ebad854064db395f8da230e00
|
[] |
no_license
|
chandhan-j/Invisible-cloak
|
fe5def0420e62dacd13578f1e0eba4a878738395
|
9f4db4ebe3952f93204891e61e43973f478cd5e6
|
refs/heads/main
| 2023-04-28T12:26:26.159279
| 2021-05-25T23:24:17
| 2021-05-25T23:24:17
| 370,852,060
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 699
|
py
|
import cv2
import numpy as np
video = cv2.VideoCapture(0)
image = cv2.imread("Bangkok.jpg")
while True:
ret, frame = video.read()
print(frame)
frame = cv2.resize(frame, (640, 480))
image = cv2.resize(image, (640, 480))
u_black = np.array([104, 153, 70])
l_black = np.array([30, 30, 0])
mask = cv2.inRange(frame, l_black, u_black)
res = cv2.bitwise_and(frame, frame, mask = mask)
f = frame - res
f = np.where(f == 0, image, f)
cv2.imshow("video", frame)
cv2.imshow("mask", f)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video.release()
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
chandhan-j.noreply@github.com
|
7f6e52c7e4d26a352c634c83d6f26fa0ddad4af4
|
c43677ed82377d923d501554d431ab573a6e811b
|
/Main/AlphaZero/LabelGenerator.py
|
ef2156248efbf390e388e00d38a892f113266544
|
[
"MIT"
] |
permissive
|
FreddeFrallan/AlphaHero
|
cb0b7dcaf34b3f4fd4a912362fcd451592fdf1ea
|
bda8f78424294a4f52359b6591296abb229611ae
|
refs/heads/master
| 2020-05-31T09:06:18.699889
| 2020-01-21T10:15:36
| 2020-01-21T10:15:36
| 190,205,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,410
|
py
|
import numpy as np
from Main.Environments.Connect4 import Constants, GetPossibleActions, Utils
# Instead of recursively returning elements we just append them to an global list
def generateLabels(root):
# validateTree(root)
# return Utils.state2ConvState(root.state, root.currentPlayer), _createPolicyLabel(root)
return Utils.bitBoard2ConvState(root.state), _createPolicyLabel(root)
def generateQLabels(root):
# return Utils.state2ConvState(root.state, root.currentPlayer), _createValueLabels(root), _createPolicyLabel(root)
return Utils.bitBoard2ConvState(root.state), _createValueLabels(root), _createPolicyLabel(root)
# Assumes that every child has been visited
def _createPolicyLabel(node):
qValues = np.zeros(Constants.AMOUNT_OF_POSSIBLE_ACTIONS)
for c in node.children:
qValues[c.action] = c.visits
qValues /= np.sum(qValues) # Normalize
return qValues
# Assumes that the node has been visited
def _createValueLabels(node):
return node.score / max(1, node.visits)
# Only for debugging!
def validateTree(node):
# possibleActions = GetPossibleActions.getPossibleActions(node.state)
import Main.Environments.Connect4.Connect4Bitmaps as bitMaps
possibleActions = bitMaps.getPossibleActions(node.state)
for c in node.children:
assert (c.action in possibleActions)
validateTree(c)
|
[
"freddeFc@gmail.com"
] |
freddeFc@gmail.com
|
eceecb14e8b93521caabd4234411b43e7ff7d170
|
9e8e37388c81726509085e6f59fd61ca4c383d70
|
/Server/Maze_Server.py
|
fcfb97b404411857d71b2f2c1f785af39aa2589c
|
[] |
no_license
|
JPeck567/MazeScape
|
ed2a82e015a28d28b8ca72fb30d20a02e09d4cba
|
706d6d668a8814d188259f9f5ce41d68c601d8e6
|
refs/heads/main
| 2023-03-10T18:04:33.086473
| 2021-02-24T20:05:36
| 2021-02-24T20:05:36
| 342,018,435
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,551
|
py
|
from PodSixNet.Channel import Channel
from PodSixNet.Server import Server
from random import randint
from time import sleep
import MazeGen
import Scoring
# send method of class edited so JSON serialised, not rencode
# json doesnt recognise tuples, so will sends a lists. need to convert at other end, client or server.
######## Player Class ########
# Parameters:- channel: ClientChannel object, p_id: integer, name: string, c_type: string
# Return type:- n/a
# Purpose: Stores attributes for each player.
##########################
class Player:
def __init__(self, channel, p_id, name, c_type):
self.channel = channel
self.p_id = p_id
self.c_type = c_type
self.name = name
######## ClientChannel Class ########
# Parameters:- n/a
# Return type:- n/a
# Purpose: Contains the methods which can be called from the client, using PodSixNet and ascyncore
########################## # inheritance of Channel
class ClientChannel(Channel): # representation of a client connection
def __init__(self, *args, **kwargs): # unpacks all parameters, unknown or not
super().__init__(*args, **kwargs)
######## get_client Method ########
# Parameters:- gameid: integer, p: string
# Return type:- self.Player object
# Purpose: Returns the other client of the 2 player game, given the var p.
##########################
def get_client(self, gameid, p):
p = "p2" if p is 1 else "p1" # player number 1 is p1. player number 2 p2. so here chooses opposite player for
player = self._server.games_dct[gameid][p] # given player number
return player
######## Network_room_full_close Method ########
# Parameters:- data: dictionary
# Return type:- a/n
# Purpose: Will receive the 'okay' message to close the connection to the clients channel, after the server has sent
# the close message. It reads the clients current channel and closes it.
##########################
def Network_room_full_close(self, data):
pass
#client = self._server.players[data["id"]]
# client[0].close() # index 0 is channel
def Network_ping(self, data):
conn_type = data["type"]
if conn_type is "menu":
channel = self._server.players[data["id"]][0]
channel.Send({"action": "ping"})
else:
channel = self._server.players[data["id"]].channel
channel.Send({"action": "ping"})
######## Network_menu Method ########
# Parameters:- data: dictionary
# Return type:- n/a
# Purpose: To call another method to send the scores to the client
##########################
def Network_menu(self, data): # if menu wants to get db
channel = self._server.players[data["p_id"]][0]
self._server.menu_setup(channel)
######## Network_give_info Method ########
# Parameters:- data: dictionary
# Return type:- n/a
# Purpose: After id is send to client, client sends back id, name and connection type used for the setup of the game
##########################
def Network_give_info(self, data):
p_tup = self._server.players[data["id"]]
channel = p_tup[0]
self._server.game_setup(data["id"], channel, data["name"], data["c"])
######## Network_start Method ########
# Parameters:- data: dictionary
# Return type:- n/a
# Purpose: Setup for the game, setting gameid and removing clients from lists.
##########################
def Network_start(self, data): # for first time, as score and level are 0 and 1 respectively. other levels start
# through network_finish
p1 = self._server.players[data["p1_id"]] # p1 is always the player who creates a new game.
p2 = self._server.players[data["p2_id"]]
del self._server.joining_player_dict[p2.p_id] # p2 always joins a game
del self._server.new_player_dict[p1.p_id] # p1 is always a player to create a game
self._server.send_to_all_j_clients(({"action": "game_slots", "players": self._server.new_player_dict}))
while True:
gameid = randint(0, 1000)
try:
self._server.games_dct[gameid]
except KeyError: # if not used
break
self._server.games_dct[gameid] = {"p1": p1, "p2": p2, "score": 0, "level": 1}
p1.channel.Send({"action": "wait", "name": p2.name, "gameid": gameid})
p2.channel.Send({"action": "wait", "name": p1.name, "gameid": gameid})
######## Network_begin Method ########
# Parameters:- data: dictionary
# Return type:- n/a
# Purpose: After clients have waited 5 seconds from wait screen, sends a message to begin.
##########################
def Network_begin(self, data):
gameid = data["gameid"]
p1 = self._server.games_dct[gameid]["p1"]
p2 = self._server.games_dct[gameid]["p2"]
self._server.begin(p1, p2, 0, 1) # score and level
######## Network_move Method ########
# Parameters:- data: dictionary
# Return type:- n/a
# Purpose: Send coordinates of one client to the other
##########################
def Network_move(self, data):
player = self.get_client(data["gameid"], data["player"])
player.channel.Send({"action": "move", "x": data["x"], "y": data["y"]})
######## Network_enemy_move Method ########
# Parameters:- data: dictionary
# Return type:- n/a
# Purpose: Sends coordinates of enemy to the other client from one client.
##########################
def Network_enemy_move(self, data):
player = self.get_client(data["gameid"], data["player"])
player.channel.Send({"action": "enemy_move", "x": data["x"], "y": data["y"] })
######## Network_key_collide Method ########
# Parameters:- data: dictionary
# Return type:- n/a
# Purpose: Sends message to remove collided key from one client to the other
##########################
def Network_key_collide(self, data):
player = self.get_client(data["gameid"], data["player"])
player.channel.Send({"action": "key_collide", "key": data["key"], "door_open": data["door_open"]})
######## Network_door_collide Method ########
# Parameters:- data: dictionary
# Return type:- n/a
# Purpose: Sends message from one client to the other that the client has entered the door
##########################
def Network_door_collide(self, data):
player = self.get_client(data["gameid"], data["player"])
player.channel.Send({"action": "door_collide"})
######## Network_player_kill Method ########
# Parameters:- data: dictionary
# Return type:- n/a
# Purpose: Sends message from one client to the other that the client has died.
##########################
def Network_player_kill(self, data):
player = self.get_client(data["gameid"], data["player"])
player.channel.Send({"action": "player_kill"})
######## Network_finish Method ########
# Parameters:- data: dictionary
# Return type:- n/a
# Purpose: Will setup for another level or to quit the game. For the next level, saves current score and increments
# the level. For quitting, will send a message to quit game to both clients, and adds score to database.
##########################
def Network_finish(self, data):
other = self.get_client(data["gameid"], data["player"])
cont = data["cont"]
gameid = data["gameid"]
game = self._server.games_dct[gameid]
if cont: # wants to carry on playing
other.channel.Send({"action": "clear"}) # clears on player 2 side, as only has cleared for player 1.
game["score"] += data["score"] # tally score so far
game["level"] += 1 # add 1 level
self._server.begin(game["p1"], game["p2"], game["score"], game["level"])
elif not cont: # wants to quit
p1 = game["p1"]
p2 = game["p2"]
p1.channel.Send({"action": "quit"})
p2.channel.Send({"action": "quit"}) # sends p2 the quit signal
Scoring.add_score(data["name"], data["score"]) # adds score to json db
del self._server.games_dct[gameid] # deletes game
del self._server.players[p1.p_id] # deletes players from lists
del self._server.players[p2.p_id]
######## Method ########
# Parameters:-
# Return type:-
# Purpose:
##########################
def Network_inst_win(self, data):
player = self.get_client(data["gameid"], data["player"])
player.channel.Send({"action": "inst_win"})
######## Network_get_names Method ########
# Parameters:- data: dictionary
# Return type:- n/a
# Purpose: Sends a list of names to client to ensure name is not taken
##########################
def Network_get_names(self, data):
player = self._server.players[data["id"]]
names = Scoring.get_names()
player.channel.Send({"action": "get_names", "names": names})
######## Network_manual_disconnect Method ########
# Parameters:- data: dictionary
# Return type:- n/a
# Purpose: Sent from the client quitting unexpectedly. Will send a message to the other client to quit to menu.
##########################
def Network_manual_disconnect(self, data):
if data["game"]:
other_player = self.get_client(data["gameid"], data["player"])
other_player.channel.Send({"action": "quit"})
else:
del self._server.players[data["id"]]
if data["c_type"] == "new":
del self._server.new_player_dict[data["id"]]
self._server.send_to_all_j_clients({"action": "game_slots", "players": self._server.new_player_dict})
else:
del self._server.joining_player_dict[data["id"]]
######## Network_disconnect Method ########
# Parameters:- data: dictionary
# Return type:- n/a
# Purpose: Disconnects client from server after accessing score table in menu.
##########################
def Network_disconnect(self, data): # for menu
channel = self._server.players[data["id"]][0]
# channel.close()
del self._server.players[data["id"]]
######## Network_close_signal Method ########
# Parameters:- data: dictionary
# Return type:- n/a
# Purpose: After client quits and doesnt need network connection anymore, deletes the variables
##########################
def Network_close_signal(self, data):
id = data["id"]
p = "p1" if data["userNum"] == 1 else "p2"
if data["gameid"] is not None:
if len(self._server.games_dct[data["gameid"]]) is 1: # deletes game only if other player has quit.
del self._server.games_dct[data["gameid"]]
del self._server.games_dct[data["gameid"]][p] # deletes player from lists and dicts
# player.channel.close()
del self._server.players[id]
######## GameServer class ########
# Parameters:- n/a
# Return type:- n/a
# Purpose: Handles the methods called from the client for the server. Also contains own methods for setting up game
# connections and menu connections
##########################
class GameServer(Server): # methods. inheritance though server
channelClass = ClientChannel
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) # attributes
print("Server started on LOCALHOST")
self.games_dct = {}
self.joining_player_dict = {}
self.new_player_dict = {}
self.players = {}
######## Connected Method ########
# Parameters:- channel: ClientChannel object, addr: string
# Return type:- n/a
# Purpose: Called for each new client connection. Saves player in players dict, sets player id, and checks if server
# is full or not and subsequently send a message to give id the client their id.
##########################
def Connected(self, channel, addr): # is called each time a new client connects to the server
print("New Connection; ", channel, addr)
while True: # random id, not iteration
p_id = randint(0, 1000)
try: # checks if used
self.players[p_id]
except KeyError: # if not used
break
self.players[p_id] = (channel, p_id)
if len(self.players) > 10:
channel.Send({"action": "room_full_disconnect", "id": p_id}) # room full, send message to close screen, and
# client sends back confirmation.
else:
channel.Send({"action": "get_id", "id": p_id}) # room is not full, so continue with setup
######## send_to_all_j_clients Method ########
# Parameters:- data: dictionary
# Return type:- n/a
# Purpose: Sends all joining clients in the dictionary the data passed in
##########################
def send_to_all_j_clients(self, data):
for client in self.joining_player_dict.values(): # .values, not keys
client.channel.Send(data)
######## menu_setup Method ########
# Parameters:- channel: ClientChannel object
# Return type:- n/a
# Purpose: Is called from ClientChannel when the client sends a message to the server requesting for the score list
##########################
def menu_setup(self, channel):
score_list = Scoring.get_scores()
channel.Send({"action": "give_scores", "scores": score_list})
######## game_setup Method ########
# Parameters:- p_id: integer, channel: ClientChannel object, name: string, c_type: string
# Return type:- n/a
# Purpose: Called once the client requests to either join or make a new game. Adds client to respective list(new or
# joining), and sets client as a player class in the players list.
##########################
def game_setup(self, p_id, channel, name, c_type):
player_class = Player(channel, p_id, name, c_type)
self.players[p_id] = player_class # adds new player class to dict of players
if player_class.c_type == "new":
self.new_player_dict[player_class.p_id] = (player_class.p_id, player_class.name)
self.send_to_all_j_clients({"action": "game_slots", "players": self.new_player_dict}) # send all joining clients
# an updated list of the new game clients.
elif player_class.c_type == "join":
self.joining_player_dict[player_class.p_id] = player_class
player_class.channel.Send({"action": "game_slots", "players": self.new_player_dict}) # send new players list to
# client
######## starting Method ########
# Parameters:- p1: Player class, p2: Player class, gameid: integer, score: integer, level: integer
# Return type:- n/a
# Purpose: Creates the maze used for the game, as the set level/difficulty. Sends the clients the needed information
# for the game screen, including a dict representation of each cell in the maze
##########################
def begin(self, p1, p2, score, level):
e_speed_dict = {1: 90, 2: 90, 3: 80, 4: 80, 5: 70}
speed = 60 if level > 5 else e_speed_dict[level] # the levels are endless, so in the case the level is > 8,
# speed is kept to 25.
m = MazeGen.Maze(level)
m.main()
simple_maze = [["" for _ in range(m.length)] for _ in range(m.width)] # _ is unused var. Creates a empty array
# representation of the final maze.
for x in range(0, 20):
for y in range(0, 20):
obj = m.maze[x][y]
simple_maze[x][y] = obj.__dict__ # turns each cell object to dict representation, as we cannot send
# classes through the network.
# sends all info
p1.channel.Send({"action": "send_game_info", "maze": simple_maze, "length": m.length, "width": m.width,
"root_node": m.root_node, "size": m.size, "key_list": m.key_list, "door": m.door,
"o_name": p2.name, "colour": (0, 0, 255), "o_col": (255, 0, 0),
"e_col": (0, 0, 100), "oe_col": (100, 0, 0), "o_p_coords": (478, 3), "o_e_coords": (478, 478),
"level": level, "score": score, "e_speed": speed})
p2.channel.Send({"action": "send_game_info", "maze": simple_maze, "length": m.length, "width": m.width,
"root_node": m.root_node, "size": m.size, "key_list": m.key_list, "door": m.door,
"o_name": p1.name, "colour": (255, 0, 0), "o_col": (0, 0, 255),
"e_col": (100, 0, 0), "oe_col": (0, 0, 100), "o_p_coords": (3, 3), "o_e_coords": (3, 478),
"level": level, "score": score, "e_speed": speed})
######## launch Method ########
# Parameters:- n/a
# Return type:- n/a
# Purpose: Will constantly loop self.Pump which checks for incoming messages, which the server will deal with using
# the methods in ClientChannel
##########################
def launch(self):
while True:
self.Pump()
######## main Method ########
# Parameters:- n/a
# Return type:- n/a
# Purpose: Sets up the server using the host and port specified.
##########################
def main():
host, port = "localhost", 31425
game_server = GameServer(localaddr=(host, int(port)))
game_server.launch()
if __name__ == "__main__":
main()
|
[
"61948082+JPeck567@users.noreply.github.com"
] |
61948082+JPeck567@users.noreply.github.com
|
660173d6a93850bc9e023c7d494c5a4ba9a4ab8d
|
49fa3231ed1811acbd967d95db6f0a8a312ab3c5
|
/hashes.py
|
227c5c57570b466ab977ef04728f064cc162ab7f
|
[] |
no_license
|
smashery/cryptopals-challenges
|
6d7830bd410b9ca53666a022054c62b299c0a973
|
3597e0373c521d6b31a690997afc5f7b92188071
|
refs/heads/master
| 2021-01-19T16:03:20.145579
| 2018-01-01T11:23:53
| 2018-01-01T11:24:21
| 88,242,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,876
|
py
|
from sha1 import *
from decode_xor import *
def hmac_sha1(key, message):
return hmac(key, message, sha1)
def hmac(key, message, hash_func):
blocksize = 64
if len(key) > blocksize:
key = hash_func(key) # keys longer than blocksize are shortened
if len(key) < blocksize:
# keys shorter than blocksize are zero-padded
key = key + '\x00' * (blocksize - len(key))
o_key_pad = xor_bytes('\x5c' * blocksize, key)
i_key_pad = xor_bytes('\x36' * blocksize, key)
return hash_func(o_key_pad + hash_func(i_key_pad + message))
def add_sha1_padding(text):
# append 0 <= k < 512 bits '0', so that the resulting message length (in bytes)
# is congruent to 56 (mod 64)
original_length = len(text)
text += b'\x80'
padding_needed = ((56 - (len(text)) % 64) % 64)
text += b'\x00' * padding_needed
assert len(text) % 64 == 56
message_bit_length = original_length * 8
text += struct.pack(b'>Q', message_bit_length)
return text
def append_to_end_of_message_and_create_new_mac(mac, suffix, message_length_guess):
"""
Add items to the end of a message and still produce a valid mac, without knowing
the key.
:param mac: The original mac, in raw bytes
:param suffix: The suffix we wish to add
:param message_length_guess: The length we think the message was, including the key
:return: A new MAC, in raw bytes
"""
assert message_length_guess % 64 == 0
# Create new MAC
sha_obj = create_sha_object_from_sha_value(mac, message_length_guess)
return sha_obj.update(suffix).digest()
def create_sha_object_from_sha_value(sha_bytes, message_length_guess):
assert message_length_guess % 64 == 0
sha_obj = Sha1Hash()
h = struct.unpack('>IIIII', sha_bytes)
sha_obj._h = h
sha_obj._message_byte_length = message_length_guess
return sha_obj
|
[
"smashery@gmail.com"
] |
smashery@gmail.com
|
c64643145d446c4d1692c9124c6e5fc0de78c141
|
5e44aa03dbb1aaac354473189f024e8dbf701536
|
/Neural_Network/Neural_Network_test.py
|
d260b5bf3631bb841a1f343b6dea4cb4abc2a3f3
|
[] |
no_license
|
zwang17/Machine-Learning
|
b2210f33c0788efa338ccddf819b82b22acf5405
|
e553bc0513ddf2b1dd847ec5c7e0296749a2d893
|
refs/heads/master
| 2020-12-30T18:02:11.011185
| 2017-08-27T15:54:17
| 2017-08-27T15:54:17
| 90,938,183
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 508
|
py
|
import Neural_Network as NN
import sys
sys.path.append('D:\Machine Learning\Machine-Learning\Random_Data_Generator')
import RandGen
import numpy as np
Generator = RandGen.RandomDataGenerator()
InputData = Generator.GenerateLinearComboData(1000,[1,2,3,4],0.01,normalNoise=True)
X = InputData[:,0:4]
y = InputData[:,4:]
Network = NN.Neural_Network(X,y)
inputX = np.array(([2,2,2,4])) # remember to match the input size with the training set
T = NN.trainer(Network)
T.train()
print(Network.forward(inputX))
|
[
"563726573@qq.com"
] |
563726573@qq.com
|
9572daafd1e010b415fefc2d1909dc9bd2d663a9
|
9786e9f30f576063556b8065e7896606519481f0
|
/projects/puerto_rico_stoch/fragility_curves/select_fragility_curves_ms.py
|
22def2da06737dfedc9a5e541f9145fdb5cce520
|
[] |
no_license
|
aranyavenkatesh/temoatools
|
ed13e279f90bff7a1feb9f429bccc6e9d7c2b99c
|
7c62b60152cc1bf5302c158b5b7b134762225da5
|
refs/heads/master
| 2022-11-10T00:19:18.480170
| 2020-07-01T20:36:45
| 2020-07-01T20:36:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,714
|
py
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import temoatools as tt
# =============================================================================#
# Select Fragility Curves
# =============================================================================#
curves = {"trans": "trans_UK_base",
"sub": "sub_HAZUS_severe_k3",
"dist_cond": "dist_cond_TX",
"dist_twr": "dist_60yr",
"wind": "wind_nonyaw",
"solar": "solar_utility",
"coal_biomass": "secbh_severe",
"natgas_petrol": "secbm_severe",
"battery": "secbl_severe",
"hydro": "cecbl_severe",
"UGND": "secbm_severe"}
# ================================#
# Calculate damage across a range of windspeeds
# ================================#
mph = np.arange(0.1, 178, 1) # 80 m/s
# mph = np.arange(0.1, 223, 1) # 100 m/s
ms = mph * 0.44704
trans = tt.fragility(mph, curve=curves['trans'])
sub = tt.fragility(mph, curve=curves['sub'])
dist_cond = tt.fragility(mph, curve=curves['dist_cond'])
dist_twr = tt.fragility(mph, curve=curves['dist_twr'])
UGND = tt.fragility(mph, curve=curves['UGND'])
coal_biomass = tt.fragility(mph, curve=curves['coal_biomass'])
natgas_petrol = tt.fragility(mph, curve=curves['natgas_petrol'])
wind = tt.fragility(mph, curve=curves['wind'])
solar = tt.fragility(mph, curve=curves['solar'])
hydro = tt.fragility(mph, curve=curves['hydro'])
battery = tt.fragility(mph, curve=curves['battery'])
# ================================#
# Plot - Compare Used Damage Functions
# ================================#
plt.figure(1)
# plt.subplots(constrained_layout=True)
f = plt.gcf()
width = 3.58 # inches
height = 3.58 # inches
f.set_size_inches(height, width) # s
# sns.set_style("white")
sns.set_style("white", {"font.family": "serif", "font.serif": ["Times", "Palatino", "serif"]})
sns.set_context("paper")
sns.set_palette("colorblind")
colors = sns.color_palette('colorblind')
plt.plot(ms, dist_cond, label="Distribution lines", color=colors[0])
plt.plot(ms, wind, label="Wind turbines", color=colors[2])
plt.plot(ms, dist_twr, label="Distribution towers", color=colors[5])
plt.plot(ms, solar, label="Solar panels", color=colors[8])
plt.plot(ms, coal_biomass, '--', label="Coal & biomass power plants", color=colors[7])
plt.plot(ms, battery, label="Battery storage plants", color=colors[3])
plt.plot(ms, hydro, '--', label="Hydroelectric power plants", color=colors[9])
plt.plot(ms, sub, '--', label="Substations", color=colors[6])
plt.plot(ms, trans, label="Transmission lines", color=colors[4])
plt.plot(ms, natgas_petrol, label="Natural gas, oil, diesel \n& landfill gas power plants", color=colors[1])
plt.plot(ms, UGND, ':', label="Buried lines", color=colors[0])
# Legend and Labels
# plt.legend(bbox_to_anchor=(0.5, -0.15), loc='upper center', ncol=3)
plt.legend(bbox_to_anchor=(1.0, 0.5), loc='center left', ncol=1, frameon=False)
# plt.legend(bbox_to_anchor=(0.1, 0.6), loc='center left', ncol=1, frameon=True)
plt.ylabel("Probability of damage (-)")
plt.xlabel("Wind speed ($ms^{-1}$)")
# Add vertical lines for category cases
y = [0, 1.02]
low = 22.*0.44704 # mph to m/s
med = 113.0*0.44704
high = 154.0*0.44704
plt.plot([low, low], y, color="gray")
plt.plot([med, med], y, color="gray")
plt.plot([high, high], y, color="gray")
y_txt = 1.03
plt.text(low, y_txt, "1", ha="center")
plt.text(med, y_txt, "2-3", ha="center")
plt.text(high, y_txt, "4-5", ha="center")
plt.text((low+high)/2.0, y_txt+0.07, "Hurricane category", ha="center")
sns.despine()
# Save and show
plt.savefig("Figure4_fragility_curves_selected_ms.png", dpi=1000, bbox_inches="tight")
|
[
"jab6ft@virginia.edu"
] |
jab6ft@virginia.edu
|
f2ab771741155f1f626cdaac491444998d99371e
|
ffe350e7a2443d04521b9bdf80634beab06ff4e9
|
/ndmg/preproc/rescale_bvec.py
|
23be2121bb1e52bfff52855de14f4dc1c3317d01
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
davclark/ndmg
|
487af5f8799763144c3eefe9d1e505a5086fa751
|
2474f33ea589fd36d89e0515f5b25b718242300b
|
refs/heads/master
| 2021-01-13T03:45:32.342021
| 2016-12-16T22:44:35
| 2016-12-16T22:44:35
| 77,243,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,771
|
py
|
#!/usr/bin/env python
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ndmg/preproc/rescale_bvec.py
# Created by Greg Kiar on 2016-02-12.
# Email: gkiar@jhu.edu
import numpy as np
import os.path as op
def rescale_bvec(bvec, bvec_new):
"""
Normalizes b-vectors to be of unit length for the non-zero b-values. If the
b-value is 0, the vector is untouched.
Positional Arguments:
- bvec:
File name of the original b-vectors file
- bvec_new:
File name of the new (normalized) b-vectors file. Must have
extension `.bvec`
"""
bv1 = np.array(np.loadtxt(bvec))
# Enforce proper dimensions
bv1 = bv1.T if bv1.shape[0] == 3 else bv1
# Normalize values not close to norm 1
bv2 = [b/np.linalg.norm(b) if not np.isclose(np.linalg.norm(b), 0)
else b for b in bv1]
try:
assert(op.splitext(bvec_new)[1] == '.bvec')
np.savetxt(bvec_new, bv2)
pass
except AssertionError:
print 'Error: your new b-vector file must have extension .bvec to' +\
' be compatible with the the pipeline.'
pass
else:
pass
pass
|
[
"gkiar07@gmail.com"
] |
gkiar07@gmail.com
|
d809f07ad8fe1a11816669658b638526b0eb97d9
|
ab72bf3ea0e15f8b30a88d20babf3de32e4f8aca
|
/octoprint_lui/test/util_firmware_tests.py
|
8843beac74bc2d2e67b159f86bdf878110964096
|
[] |
no_license
|
bmelim/OctoPrint-LUI
|
ca883174618693bf4885e839750834313fdd200d
|
fd4d6b1269204b7c0db35762d26ebb1b9d429dd3
|
refs/heads/master
| 2021-10-29T02:11:37.522090
| 2019-04-25T08:41:09
| 2019-04-25T08:41:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,028
|
py
|
from __future__ import absolute_import
import unittest
import os
from octoprint_lui.util.firmware import FirmwareUpdateUtility
class util_firmware_tests(unittest.TestCase):
data_folder = r'C:\Users\erikh\AppData\Roaming\OctoPrint\data\lui'
def get_object(self):
return FirmwareUpdateUtility(self.data_folder)
def test_get_latest_version(self):
# Assign
c = self.get_object()
# Act
version_info = c.get_latest_version("Bolt")
# Assert
self.assertIsNotNone(version_info)
self.assertEqual(version_info["version"], 2.6)
def test_download_firmware(self):
# Assign
c = self.get_object()
version_info = c.get_latest_version("Bolt")
# Act
if version_info:
path = c.download_firmware(version_info["url"])
# Assert
self.assertIsNotNone(path)
if path:
self.assertTrue(os.path.exists(path))
if __name__ == '__main__':
unittest.main()
|
[
"erikheidstra@live.nl"
] |
erikheidstra@live.nl
|
e53c11eecb38b9c7028997c4e7267979c021e357
|
d0b2b287f5295f93535bf75f562df4aaf16da7fa
|
/image_getter.py
|
6bf644fabcd4d3dbbc0c476358e43ae6ccfd17bf
|
[] |
no_license
|
PolyProgrammist/ircman
|
96e4d1cc433a0ec77630e037c11d276716f4ffbe
|
cbc2bb8b71a85a2e4ba7c8befad302ddf122b908
|
refs/heads/master
| 2020-04-10T02:30:09.380902
| 2018-12-16T05:30:58
| 2018-12-16T05:30:58
| 160,745,918
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
from download_file import download
from jpg2ascii import ascii_from_image
def get_correct_image_url(urls, file_name):
for url in urls:
try:
download(url, file_name)
ascii_from_image(file_name, 50)
return url
except:
continue
|
[
"pechkin350@gmail.com"
] |
pechkin350@gmail.com
|
47edb1af1f66074b0d6d738f2559031ba8fcedec
|
22c45c7c5de76818378e562af0f1462586b8dd65
|
/catkin_ws/build/catkin_generated/installspace/_setup_util.py
|
c2b8637287ceef9551e8771ef5c9f95d7b19b8e2
|
[] |
no_license
|
dikayudha90/QMLLeaning
|
5133cc9267e26cee17b30bf6e659cc021ac3d30c
|
10c380528b720a003a47bd99685885d39ffeda86
|
refs/heads/master
| 2020-04-03T17:04:17.025097
| 2018-11-02T16:55:23
| 2018-11-02T16:55:23
| 155,430,540
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,460
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': 'bin',
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
value = _rollback_env_variable(unmodified_environ, key, subfolders)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolders):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolders: list of str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
for subfolder in subfolders:
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# skip nonexistent paths
if not os.path.exists(path_tmp):
continue
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
# environment at generation time
CMAKE_PREFIX_PATH = '/media/ayudha/workspace/workspace/catkin_ws/devel;/opt/ros/kinetic'.split(';')
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potential "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
|
[
"andika.yudha@ukaea.uk"
] |
andika.yudha@ukaea.uk
|
0c150c9a284c5be68997505461cf2a4b03bcc857
|
d8001c18ecb699e7708a34f0f3bf278e154f6177
|
/program69.py
|
d8762b1647282f347942c5a205b4b703e71454a8
|
[] |
no_license
|
Juvar1/microstrip-calc-1
|
b4da8bf8a0f834e2483dd7154f87bcd412f021ca
|
0415fe9ca136a8dc6a3706be031606f8a3c4da90
|
refs/heads/master
| 2020-07-16T22:30:16.007492
| 2019-09-06T15:02:43
| 2019-09-06T15:02:43
| 205,882,018
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,469
|
py
|
#programs from late 60'
#https://apps.dtic.mil/dtic/tr/fulltext/u2/708720.pdf
#Green's function:
#https://apps.dtic.mil/dtic/tr/fulltext/u2/a117364.pdf
#IBM System 360 Scientific Routine Package
#http://www.ebyte.it/library/codesnippets/IBM_System360_SSP.html
import math
DIST=[0,[0]*101,[0]*101]
XINT=0 #global variables
D1=0
D2=0
D3=0
D4=0
N1=0
N2=0
N3=0
N4=0 #global variables
E1=16.0
WD=0.5
SD=0.2
I=1
D=0.3048 #d board thickness
U1= 4.7#Ur
F= 3000000#f frequency
#input data for MSTRIP2 program
WH1 = 0.1 # starting point
DELW = 0.2 # step size
NT = 20 # lines in result table
R = 0.0 # 0.0 = no upper ground plane
DIEK = 9.6 # Ur dielectric constant
SH1 = 0.4 # spacing/height ratio
AIR = 1 # 0 = single strip, 1 = coupled strips
#function subprogram
def gint(U,R,CO,BO,DIEK):
V=(R-1.0)*U
W1=CO*U
W2=math.cosh(U)
W3=math.sinh(U)
W4=math.cos(BO*U)
if (R != 0):
W4=W4*math.sinh(V)
DEN=W3*math.cosh(V)+DIEK*W2*math.sinh(V)
if (R == 0):
DEN=W3+DIEK*W2
GINT=math.sin(W1)/W1*W3/U*W4/DEN
return GINT
#compute integral
def dqg32(XL,XU,R,CO,BO,DIEK):
A=0.5*(XU+XL)
B=XU-XL
C=0.49863193092474078e0*B
Y=0.35093050047350483e-2*(gint(A+C,R,CO,BO,DIEK)+gint(A-C,R,CO,BO,DIEK))
C=0.49280575577263417e0*B
Y=Y+0.8137197365452835e-2*(gint(A+C,R,CO,BO,DIEK)+gint(A-C,R,CO,BO,DIEK))
C=0.48238112779375322e0*B
Y=Y+0.12696032654631030e-1*(gint(A+C,R,CO,BO,DIEK)+gint(A-C,R,CO,BO,DIEK))
C=0.46745303796886984e0*B
Y=Y+0.17136931456510717e-1*(gint(A+C,R,CO,BO,DIEK)+gint(A-C,R,CO,BO,DIEK))
C=0.44816057788302606e0*B
Y=Y+0.21417949011113340e-1*(gint(A+C,R,CO,BO,DIEK)+gint(A-C,R,CO,BO,DIEK))
C=0.42468380686628499e0*B
Y=Y+0.25499029631188088e-1*(gint(A+C,R,CO,BO,DIEK)+gint(A-C,R,CO,BO,DIEK))
C=0.39724189798397120e0*B
Y=Y+0.29342046739267774e-1*(gint(A+C,R,CO,BO,DIEK)+gint(A-C,R,CO,BO,DIEK))
C=0.36609105937014484e0*B
Y=Y+0.32911111388180923e-1*(gint(A+C,R,CO,BO,DIEK)+gint(A-C,R,CO,BO,DIEK))
C=0.33152213346510760e0*B
Y=Y+0.36172897054424253e-1*(gint(A+C,R,CO,BO,DIEK)+gint(A-C,R,CO,BO,DIEK))
C=0.29385787862038116e0*B
Y=Y+0.39096947893535153e-1*(gint(A+C,R,CO,BO,DIEK)+gint(A-C,R,CO,BO,DIEK))
C=0.25344995446611470e0*B
Y=Y+0.41655962113473378e-1*(gint(A+C,R,CO,BO,DIEK)+gint(A-C,R,CO,BO,DIEK))
C=0.21067563806531767e0*B
Y=Y+0.43826046502201906e-1*(gint(A+C,R,CO,BO,DIEK)+gint(A-C,R,CO,BO,DIEK))
C=0.16593430114106382e0*B
Y=Y+0.45586939347881942e-1*(gint(A+C,R,CO,BO,DIEK)+gint(A-C,R,CO,BO,DIEK))
C=0.11964368112606854e0*B
Y=Y+0.46922199540402283e-1*(gint(A+C,R,CO,BO,DIEK)+gint(A-C,R,CO,BO,DIEK))
C=0.7223598079139825e-1*B
Y=Y+0.47819360039637430e-1*(gint(A+C,R,CO,BO,DIEK)+gint(A-C,R,CO,BO,DIEK))
C=0.24153832843869158e-1*B
Y=B*(Y+0.48270044257363900e-1*(gint(A+C,R,CO,BO,DIEK)+gint(A-C,R,CO,BO,DIEK)))
return Y
# computes the sine and cosine integral
def sici(X):
Z=abs(X)
if (Z-4.0 <= 0):
Y=(4.0-Z)*(4.0+Z)
SI=X*(((((1.753141e-9*Y+1.568988e-7)*Y+1.374168e-5)*Y+6.939889e-4)*\
Y+1.964882e-2)*Y+4.395509e-1)
CI=((5.772156e-1+math.log(Z))/Z-Z*(((((1.386985e-10*Y+1.584996e-8)*\
Y+1.725752e-6)*Y+1.185999e-4)*Y+4.990920e-3)*Y+1.315308e-1))*Z
else:
SI=math.sin(Z)
Y=math.cos(Z)
Z=4.0/Z
U=((((((((4.048069e-3*Z-2.279143e-2)*Z+5.515070e-2)*Z-7.261642e-2)*\
Z+4.987716e-2)*Z-3.332519e-3)*Z-2.314617e-2)*Z-1.134958e-5)*\
Z+6.250011e-2)*Z+2.583989e-10
V=(((((((((-5.108699e-3*Z+2.819179e-2)*Z-6.537283e-2)*Z+\
7.902034e-2)*Z-4.400416e-2)*Z-7.945556e-3)*Z+2.601293e-2)*Z-\
3.764000e-4)*Z-3.122418e-2)*Z-6.646441e-7)*Z+2.500000e-1
CI=Z*(SI*V-Y*U)
SI=-Z*(SI*U+Y*V)+1.570796
ret=dict()
ret["si"]=SI
ret["ci"]=CI
return ret
# To solve a system of simultaneous linear equations with symmetric
# coefficient matrix, upper triangular part of which is assumed to be
# stored columnwise.
def dgels(A,M,N,EPS):#,AUX):
R=[1.0]*(M+1) # return values
AUX=[0]*M # auxiliary storage array
if(M <= 0):
IER=-1
return (R,IER)
#search for greatest main diagonal element
IER=0
PIV=0.0
L=0
for K in range(1,M+1):
L=L+K
TB=abs(A[L])
if (TB-PIV > 0):
PIV=TB
I=L
J=K
TOL=EPS*PIV
#Main diagonal element A[I]=A[0.0] is first pivot element.
#PIV kontains the absolute value of A[I].
#Start elimination loop
LST=0
NM=N*M
LEND=M-1
for K in range(1,M+1):
#test on usefulness of symmetric algorithm
if (PIV <= 0):
IER=-1
return (R,IER)
if (IER == 0):
if (PIV-TOL <= 0):
IER=K-1
LT=J-K
LST=LST+K
#Pivot row reduction and row interchange in right hand side R
PIVI=1.0/A[I]
for L in range(K,NM+1,M):
LL=L+LT
TB=PIVI*R[LL]
R[LL]=R[L]
R[L]=TB
#is elimination terminated
if (K-M < 0):
#row and column interchange and pivot row reduction in matrix A.
#Elements of pivot column are saved in auxiliary vector AUX.
LR=int(LST+(LT*(K+J-1))/2)
LL=LR
L=LST
for II in range(K,LEND+1):
L=L+II
LL=LL+1
if (L-LR == 0):
A[LL]=A[LST]
TB=A[L]
elif (L-LR > 0):
LL=L+LT
TB=A[LL]
A[LL]=A[L]
elif (L-LR < 0):
TB=A[LL]
A[LL]=A[L]
AUX[II]=TB
A[L]=PIVI*TB
#save column interchange information
A[LST]=LT
#element reduction and search for next pivot
PIV=0.0
LLST=LST
LT=0
for II in range(K,LEND+1):
PIVI=-AUX[II]
LL=LLST
LT=LT+1
for LLD in range(II,LEND+1):
LL=LL+LLD
L=LL+LT
A[L]=A[L]+PIVI*A[LL]
LLST=LLST+II
LR=LLST+LT
TB=abs(A[LR])
if (TB-PIV > 0):
PIV=TB
I=LR
J=II+1
for LR in range(K,NM+1,M):
LL=LR+LT
R[LL]=R[LL]+PIVI*R[LR]
#back substitution and back interchange
if (LEND < 0):
IER=-1
return (R,IER)
elif (LEND == 0):
return (R,IER)
else:
II=M
for I in range(2,M+1):
LST=LST-II
II=II-1
L=A[LST]+0.5
for J in range(II,NM+1,M):
TB=R[J]
LL=J
K=LST
for LT in range(II,LEND+1):
LL=LL+1
K=K+LT
TB=TB-A[K]*R[LL]
K=int(J+L)
R[J]=R[K]
R[K]=TB
return (R,IER)
# generates X
def xgen(M,WH,SH1):
X=[0]*60
WHM=WH/M
for I in range(M):
X[I+1]=I*WHM
if (SH1 != 0.0):
for II in range(1,2*M):
X[II+M]=II*WHM+SH1
return X
# creates PHI with free air
def mphi(WH,M,S,X):
PHI=[0]*60
AM=M
INDEX1=3*M-1
if (S == 0):
INDEX1=M
EXWON=WH/(2.0*AM)
WYWON=1.0
for K in range(1,INDEX1+1):
XO=X[K]
EXP=XO+EXWON
EXN=XO-EXWON
WYP=2.0*WYWON
PHI[K]=(EXN/(2.0*EXWON))*math.log((EXN**2)/(EXN**2+WYP**2))-\
(EXP/(2.0*EXWON))*math.log((EXP**2)/(EXP**2+WYP**2))+\
(WYP/EXWON)*(math.atan(EXP/WYP)-math.atan(EXN/WYP))
return PHI
# M = substrips count
# WH =
# SH1 = 0.4 spacing/height ratio
# DIEK = 9.6 Ur
# R = 0
# creates PHI with dielectric substrate other than free air
def mgreen(M,WH,SH1,DIEK,R,X):
PHI=[0]*60
CO=WH/M*0.5
X1=5.0
INT=2
H=X1/float(INT)
if (SH1 == 0.0):
MA=M
else:
MA=3*M-1
for MM in range(1,MA+1):
BO=X[MM]
YTOT=0.0
XU=0.0
XL=0.0
# compute first integral
for I in range(INT):
XU+=H
YTOT+=dqg32(XL,XU,R,CO,BO,DIEK)
XL+=H
AI1=YTOT
#compute second integral
S1=(CO+BO)*X1
S2=(BO-CO)*X1
AI2A=math.sin(S1)/X1-(CO+BO)*sici(S1)["ci"]
AI2B=math.sin(S2)/X1-(BO-CO)*sici(S2)["ci"]
AI2=AI2A-AI2B
PHI[MM]=4.0*(AI1+1.0/((1.0+DIEK)*CO*2.0)*AI2)
return PHI
#creates A and B
def amat(S,M,PHI):
A=[0]*211
B=[0]*211
for I in range(1,M+1):
INDEX1=M+1-I
for J in range(1,INDEX1+1):
NF1=0
for K in range(1,J+1):
NF1=NF1+K
NF2=0
if (I > 2):
INDEX2=I-2
for L in range(1,INDEX2+1):
NF2=NF2+L
N=NF1+NF2+(I-1)*J
INDEX3=3*M+2*(1-J)-I
A[N]=PHI[I]+S*PHI[INDEX3]
B[N]=PHI[I]-S*PHI[INDEX3]
return [A,B]
#calculate final output
def output(CAP1E,CAPKE,CAP1O,CAPKO,WH,AIR):
C=2.99792458
EFFKE=CAPKE/CAP1E
#print("%f %f"%(CAPKE,CAP1E))
RKE=math.sqrt(EFFKE)
ZOE=1.0e+4/(C*CAP1E*RKE)
VELE=(1.0/RKE)*C
if (AIR == 1):
EFFKO=CAPKO/CAP1O
#print("%f %f"%(CAPKO,CAP1O))
RKO=math.sqrt(EFFKO)
ZOO=1.0e+4/(C*CAP1O*RKO)
VELO=(1.0/RKO)*C
print("W/H1=%.3f ZOE=%.3f ZOO=%.3f VE=%.3f VO=%.3f EFFE=%.3f EFFO=%.3f"%(WH,ZOE,ZOO,VELE,VELO,EFFKE,EFFKO))
else:
print("W/H1=%.3f ZOE=%.3f VE=%.3f EFFE=%.3f"%(WH,ZOE,VELE,EFFKE))
return
def mstrip2(WH1,DELW,NT,R,DIEK,SH1,AIR):
M=20 # substrips count
N=1 # number of right hand side vectors for DGELS function
EPS=1.0e-7 # loss of significance limit level for DGELS function
for K in range(NT): # NT = lines on the output file
WH=WH1+K*DELW # starting point + K * step size
X = xgen(M,WH,SH1)
CAP1E=0
CAPKE=0
CAP10=0
CAPKO=0
for IJ in range(2):
if (IJ == 0):
ADIEK=1.0 # permittivity of air
if (IJ == 1):
ADIEK=DIEK # substrate permittivity
if (R == 0.0 and ADIEK == 1.0): #R = 0 no upper ground plane
PHI = mphi(WH,M,AIR,X)
else:
PHI = mgreen(M,WH,SH1,ADIEK,R,X)
for JJ in range(AIR+1):
AB = amat(AIR,M,PHI)
#
(V,IER) = dgels(AB[JJ],M,N,EPS)
if (IER != 0):
print("IER= %.0f IN SUBROUTINE DGELS, SO THE CHARGE DENSITY COULD NOT BE CALCULATED TO THE PRECISION OF %.13f DIGITS" % (IER,EPS))
CAPSUM=0.0
#print(V)
for I in range(M):
CAPSUM+=V[I+1]
#print(CAPSUM)
CC=CAPSUM*111.256
if (JJ == 0 and IJ == 0):
CAP1E=CC
if (JJ == 0 and IJ == 1):
CAPKE=CC
if (JJ == 1 and IJ == 0):
CAP1O=CC
if (JJ == 1 and IJ == 1):
CAPKO=CC
output(CAP1E,CAPKE,CAP1O,CAPKO,WH,AIR)
return
#integration subroutine
def dqtfe(H,Y,Z,NDIM):
SUM2=0.0
if (NDIM-1 < 0):
return Z
elif (NDIM-1 > 0):
HH=0.500*H
for I in range(2,NDIM+1):
SUM1=SUM2
SUM2=SUM2+HH*(Y[I]+Y[I-1])
Z[I-1]=SUM1
Z[NDIM]=SUM2
return Z
#fourier transform program for coupled strip
def tsum(XN,ARG1):
global DIST,F,U1,E1,WD,SD,D,XINT,I
MS=(SD/2.0)/0.025+1
MW=(SD/2.0+WD)/0.025
S=0.0
M=int(MS)-1
while True:
M=M+1
SIGMA=DIST[I][M]
ARG3=ARG1*M*0.025
if (I == 1):
C=math.sin(ARG3)
if (I == 2):
C=math.cos(ARG3)
S=S+SIGMA*C
if (M >= MW):
break
TSUM=S
return TSUM
#integrand evaluation program for coupled strip
def fn(XN,SQ):
global F,U1,E1,WD,SD,D,XINT,I
if (XN == 0):
FN=0.0
return FN
DL=D/30.0
T=U1*E1-SQ
Q=(SQ-1.0)/T
R=(U1*E1-1.0)/T
ARG1=0.2*math.pi*XN/WD
ARG2=0.1*math.pi*XN*(SD/WD+1.0)
DTDEF=math.tanh(ARG1)
A=tsum(XN,ARG1)
if (I == 1):
E=math.sin(ARG2)
if (I == 2):
E=math.cos(ARG2)
if (F == 0.0):
PART=Q*U1*DTDEF-1.0
XNUM=1.0/XN*A*PART*E
XDEN=R**2-E1/SQ*PART*(Q*1.0/DTDEF-1.0/E1)
else:
A1=(ARG1/(F*DL))**2
A2=(2.0*math.pi)**2*T
SIGN=1.0
B12=(A2-A1)
if (B12 < 0):
SIGN=-1.0
B1=math.sqrt(SIGN*B12)
Z=math.tan(F*DL*B1)
if (B12 < 0):
Z=math.tanh(F*DL*B1)
B2=math.sqrt((2.0*math.pi)**2*(SQ-1.0)+A1)
PART=SIGN*(Q*U1*Z+SIGN*B2/B1)
XNUM=A*PART*B1*E
XDEN=A1*R**2+SIGN*E1/SQ*B12*PART*(Q*1.0/Z-1.0/E1*B2/B1)
if (XDEN == 0.0):
print("NUM=%.5f DEN=%.5f" % (XNUM,XDEN))
FN=XNUM/XDEN
return FN
#integrand evaluation program
def fn1(XN,SQ):
global F,U1,E1,WD,SD,D,XINT,I
if (XN == 0):
FN=0.0
return FN
DL=D/30.0
T=U1*E1-SQ
Q=(SQ-1.0)/T
R=(U1*E1-1.0)/T
ARG1=0.2*math.pi*XN/WD
ARG3=0.1*math.pi*XN #*(SD/WD+1.0)
DTDEF=math.tanh(ARG1)
P3=193.5092066/XN**3
P1=9.54929668/XN
P2=60.79271019/XN**2
A=1.0/XN*(P3*(P1-P3)*math.cos(ARG3)*(2.0-P2)*math.sin(ARG3))
if (F == 0.0):
PART=Q*U1*DTDEF-1.0
XNUM=1.0/XN*A*PART
XDEN=R**2-E1/SQ*PART*(Q*1.0/DTDEF-1.0/E1)
else:
A1=(ARG1/(F*DL))**2
A2=(2.0*math.pi)**2*T
SIGN=1.0
B12=(A2-A1)
if (B12 < 0):
SIGN=-1.0
B1=math.sqrt(SIGN*B12)
Z=math.tan(F*DL*B1)
if (B12 < 0):
Z=math.tanh(F*DL*B1)
B2=math.sqrt((2.0*math.pi)**2*(SQ-1.0)+A1)
PART=SIGN*(Q*U1*Z+SIGN*B2/B1)
XNUM=A*PART*B1
XDEN=A1*R**2+SIGN*E1/SQ*B12*PART*(Q*1.0/Z-1.0/E1*B2/B1)
if (XDEN == 0.0):
print("NUM=%.5f DEN=%.5f" % (XNUM,XDEN))
FN=XNUM/XDEN
return FN
#summation program
def summ(SQ):
global F,U1,E1,WD,SD,D,XINT,I
global D1,D2,D3,D4,N1,N2,N3,N4
FF=Z=[0]*801
XB=0.0
for J in range(1,N1+1):
XN=XB+(J-1)*D1
FF[J]=fn(XN,SQ)
Z=dqtfe(D1,FF,Z,N1)
S1=Z[N1]
XB=25.0
for J in range(1,N2+1):
XN=XB+(J-1)*D2
FF[J]=fn(XN,SQ)
Z=dqtfe(D2,FF,Z,N2)
S2=Z[N2]
XB=50.0
for J in range(1,N3+1):
XN=XB+(J-1)*D3
FF[J]=fn(XN,SQ)
Z=dqtfe(D3,FF,Z,N3)
S3=Z[N3]
XB=75.0
for J in range(1,N4+1):
XN=XB+(J-1)*D4
FF[J]=fn(XN,SQ)
Z=dqtfe(D4,FF,Z,N4)
S4=Z[N4]
XINT=S1+S2+S3+S4
SUM=XINT
return SUM
#this subroutine solves transcendental equation of one unknown
#by half interval search method
def trans(A,B,EPS):
FA=summ(A)
IER=0
FB=summ(B)
if (FA*FB > 0):
IER=1
print("IER=%f" % IER)
return (1,IER)
if (abs(FA) <= EPS):
ROOT=A
return (ROOT,IER)
if (abs(FB) <= EPS):
ROOT=B
return (ROOT,IER)
for IK in range(1,41):
X=(A+B)/2.0
FX=summ(X)
if (abs(FX) <= EPS):
ROOT=X
return (ROOT,IER)
if (FA*FX < 0):
B=X
FB=FX
continue
if (FA*FX > 0):
A=X
FA=FX
continue
#general program
def generalProgram():
global F,U1,E1,WD,SD,D,XINT,I
global D1,D2,D3,D4,N1,N2,N3,N4
N1=601
N2=301
N3=201
N4=101
for JP in range(1,3):
TRY=[0,1.0,((E1+1.0)/2.0),E1]
D1=25.0/600.0
D2=25.0/300.0
D3=25.0/200.0
D4=25.0/100.0
for LL in range(1,5):
if (LL+1 > 3):
print("ROOT TROUBLE")
return
AB=TRY[LL]+.01
AE=TRY[LL+1]-.01
EPS=.00001
(ROOT,IER)=trans(AB,AE,EPS)
SQ=ROOT
if (IER == 0):
break
print("PSI=%.5f VALUE OF INTEGRAL=%.5f" % (SQ, XINT))
#run programs
mstrip2(WH1,DELW,NT,R,DIEK,SH1,AIR)
generalProgram()
|
[
"noreply@github.com"
] |
Juvar1.noreply@github.com
|
f3cacfe1a5ef7e12b283dc374f680866fd74265a
|
25a77144c1f08c28cc03074a3b163294ac6319d1
|
/myvenv/bin/rst2pseudoxml.py
|
2812616640a85ba167892c06e18e8fcb1061a4bf
|
[] |
no_license
|
silvablack/kivy-googlemaps
|
e0c6acaa8ace9487589db5acf79824fe2cf934a7
|
758606b0d1110d8538f9e28a03177d2319f17a5e
|
refs/heads/master
| 2021-01-01T15:33:08.903444
| 2017-07-18T20:11:23
| 2017-07-18T20:11:23
| 97,639,197
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 649
|
py
|
#!/media/paulosilva/DATA/dev/python/kivy/iapy/myvenv/bin/python3.5
# $Id: rst2pseudoxml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing pseudo-XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates pseudo-XML from standalone reStructuredText '
'sources (for testing purposes). ' + default_description)
publish_cmdline(description=description)
|
[
"paulosilvadev3@gmail.com"
] |
paulosilvadev3@gmail.com
|
84b9021d08a9de3bff0f7452db343d6f9eff2737
|
6189f34eff2831e3e727cd7c5e43bc5b591adffc
|
/WebMirror/management/rss_parser_funcs/feed_parse_extractFeelinthedarkWordpressCom.py
|
9249f38f20a8d6c421a2fdeb188b451d178b04c1
|
[
"BSD-3-Clause"
] |
permissive
|
fake-name/ReadableWebProxy
|
24603660b204a9e7965cfdd4a942ff62d7711e27
|
ca2e086818433abc08c014dd06bfd22d4985ea2a
|
refs/heads/master
| 2023-09-04T03:54:50.043051
| 2023-08-26T16:08:46
| 2023-08-26T16:08:46
| 39,611,770
| 207
| 20
|
BSD-3-Clause
| 2023-09-11T15:48:15
| 2015-07-24T04:30:43
|
Python
|
UTF-8
|
Python
| false
| false
| 566
|
py
|
def extractFeelinthedarkWordpressCom(item):
'''
Parser for 'feelinthedark.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
[
"something@fake-url.com"
] |
something@fake-url.com
|
4c37f8756787f66c89714abf199eafe4472ea8da
|
a3cbbc30ffa09b1261cba23b39c7b89610ea0882
|
/代码/BinaryTree/isPostArray.py
|
d02be8bc7fefb6bec4bc52ad7b569cb7b6c06366
|
[] |
no_license
|
SemieZX/InterviewBook
|
239594b94c20f8b5c4b7f55c0f2101f387f28472
|
e9feda57305ce4b4614ec5c8ab339c41de95a40c
|
refs/heads/master
| 2020-03-23T07:19:55.594395
| 2018-07-24T04:07:27
| 2018-07-24T04:07:27
| 141,264,105
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,324
|
py
|
#判断数组是不是搜索二叉树的后序遍历结果
def isPostArray(arr):
if arr == None or len(arr) == 0:
return False
return isPost(arr,0,len(arr)-1)
def isPost(arr,start,end):
if start == end:
return True
less = -1
more = end
for i in range(start,end):
if arr[i] < arr[end]:
less = i
else:
more = i if more == end else more
#一边没有的情况
if less == -1 or more == end:
return isPost(arr,start,end-1)
#前边for循环使得less和more差1
if less != more - 1:
return False
return isPost(arr,start,less) and isPost(arr,more,end-1)
# 通过arr 重构二叉树
class Node:
def __init__(self,val):
self.left = None
self.right = None
self.val = val
def posArrayToBST(posArr):
if posArr == None:
return None
return posToBST(posArr, 0, len(posArr)-1)
def posToBST(posArr, start, end):
if start > end:
return True
head = Node(posArr[end])
less = -1
more = end
for i in range(start,end):
if posArr[i] < posArr[end]:
less = i
else:
more = i if more == end else more
head.left = posToBST(posArr,start,less)
head.right = posToBST(posArr,more,end-1)
return head
|
[
"zx18846031359@aliyun.com"
] |
zx18846031359@aliyun.com
|
3a435ff99ea88c70ffbd1a4bd2d9f55e2dda7251
|
a2f399e2d37cc0db6fe5224658c74273cf93a329
|
/L2/file_read/file_read1.py
|
4934796114fdb5d07b51c1fb48ca42779f105a2c
|
[] |
no_license
|
LuLuuD/Data_Analysis_with_Python
|
aa282cc192060f899b60525dc0e8b60fb1104f7a
|
d6ed51ab91a945710b5d58932e492f7a5205ebab
|
refs/heads/master
| 2021-05-17T00:06:49.229306
| 2020-03-27T09:26:17
| 2020-03-27T09:26:17
| 250,528,827
| 1
| 0
| null | 2020-03-27T12:29:37
| 2020-03-27T12:29:36
| null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
# 读取imagenet的分类ID说明
# 读文件
f = open('imagenet_class.csv')
text = f.read()
#print(text)
# 读文件,按行来读取
f = open('imagenet_class.csv')
lines = f.readlines()
#print(lines)
print(len(lines))
print(lines[0])
# 写文件
f = open('temp.txt', 'w')
f.write('hello world!')
f.close()
|
[
"noreply@github.com"
] |
LuLuuD.noreply@github.com
|
a75b0a6d1618d4a26675106f1fa18f43587b5694
|
0df028c081ede62e6ac89cac305a778d0cc69906
|
/Week_08/146.py
|
0170797cac0777e91175035a0a18302fe9ceefa6
|
[] |
no_license
|
szwxj/algorithm014-algorithm014
|
583b86f0ce37292829bef2efa38c11379f16da22
|
ba2a51c9122765f696e62a22244db7e57eeb25d1
|
refs/heads/master
| 2023-01-07T04:58:41.319823
| 2020-10-28T05:01:12
| 2020-10-28T05:01:12
| 287,697,125
| 0
| 0
| null | 2020-08-15T07:12:20
| 2020-08-15T07:12:20
| null |
UTF-8
|
Python
| false
| false
| 2,436
|
py
|
class ListNode:
#双向链表
def __init__(self,key = None, value = None):
self.key = key
self.value = value
self.prev = None
self.next = None
class LRUCache:
#最近最少使用缓存,采用字典+双向链表
def __init__(self, capacity: int):
self.capacity = capacity
self.hashmap = {}
self.head = ListNode()
self.tail = ListNode()
#self.head.prev = self.tail
self.head.next = self.tail
self.tail.prev = self.head
#self.tail.next = self.head
def move_node_to_tail(self,key: int) -> None:
#从hashmap中取出key对应的节点,此处不用再判断node是否为空,在get和put中判断
node = self.hashmap[key]
#调整node的前后节点
node.prev.next = node.next
node.next.prev = node.prev
#调整尾节点
node.prev = self.tail.prev
node.next = self.tail
self.tail.prev.next = node
self.tail.prev = node
def get(self, key: int) -> int:
if key in self.hashmap:
self.move_node_to_tail(key)
return self.hashmap.get(key).value
return -1
def put(self, key: int, value: int) -> None:
if key in self.hashmap:
self.move_node_to_tail(key)
self.hashmap[key].value = value
else:
if self.capacity == len(self.hashmap):
#pop hash表对应项
self.hashmap.pop(self.head.next.key)
#调整头指针
self.head.next = self.head.next.next
self.head.next.prev = self.head
newNode = ListNode(key,value)
self.hashmap[key] = newNode
#插入最后
newNode.prev = self.tail.prev
newNode.next = self.tail
self.tail.prev.next = newNode
self.tail.prev = newNode
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
cache = LRUCache(2)
cache.put(1, 1)
cache.put(2, 2)
print(cache.get(1)) # 返回 1
cache.put(3, 3) # 该操作会使得关键字 2 作废
print(cache.get(2)) # 返回 -1 (未找到)
cache.put(4, 4) # 该操作会使得关键字 1 作废
print(cache.get(1)) # 返回 -1 (未找到)
print(cache.get(3)) # 返回 3
print(cache.get(4)) # 返回 4
|
[
"13501586491@139.com"
] |
13501586491@139.com
|
3fc9e53a229181c9eb2abbbc34e1a1cf657fd95d
|
01ef56390e9687385df29aede327e6e8ce0c5434
|
/jxty/__init__.py
|
7ca58a6e54312e5540426ede46fbb0138c8f8266
|
[] |
no_license
|
DitS-03/jxty
|
f8bcb565ddeace6f0dc8cebfccbc9da11fe8c84e
|
b4282d9afcb5c3702efa8b5a41da7fbed7bbe355
|
refs/heads/master
| 2023-04-21T01:25:14.604281
| 2021-05-05T12:52:35
| 2021-05-05T12:52:35
| 364,571,730
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28
|
py
|
from .jxty import load, dump
|
[
"dits.3rd@gmail.com"
] |
dits.3rd@gmail.com
|
c25cdc7238dd7e4c54ac9483b61d768d5355593a
|
42c6356db85ac5bb83b3434a4a445b92736eb9c9
|
/offload.py
|
0562d398a63b3ae4604704a8c30350559eed0922
|
[
"Apache-2.0"
] |
permissive
|
atolab/eclipse-fog05-ros2-demo
|
a2671072c694588285a3574dc4d24dba20cab11c
|
4414e5eb8d3d98f4dadeca8873bea158adcaa3ca
|
refs/heads/master
| 2022-12-04T01:49:08.302497
| 2020-07-24T10:53:57
| 2020-07-24T10:53:57
| 281,626,421
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,132
|
py
|
from fog05 import FIMAPI
import uuid
import json
import sys
import os
import time
def main(ip):
a = FIMAPI(ip)
fdus={}
nodes = a.node.list()
if len(nodes) == 0:
print('No nodes')
exit(-1)
print('Nodes:')
for n in nodes:
print('UUID: {}'.format(n))
discs = a.fdu.list()
for d_id in discs:
info = a.fdu.instance_list(d_id)
print ('info : {}'.format(info))
if n in info:
time.sleep(1)
i_ids=info[n]
for iid in i_ids:
print ('Terminating iid : {}'.format(iid))
a.fdu.terminate(iid)
a.fdu.offload(d_id)
nets = a.network.list()
if nets:
print ('networks : {}'.format(nets))
for n in nets:
net_uuid=n['uuid']
print ('net_id : {}'.format(net_uuid))
a.network.remove_network(net_uuid)
exit(0)
if __name__ == '__main__':
if len(sys.argv) < 2:
print('[Usage] {} <zenoh ip:port>'.format(
sys.argv[0]))
exit(0)
main(sys.argv[1])
|
[
"gabriele.baldoni@gmail.com"
] |
gabriele.baldoni@gmail.com
|
4ae4beed0304871a2f9f5c63092192fe5c9b6ca9
|
540afd391a9806c84e9715e9fabbbee34c95bb00
|
/ptah/cmsapp/actions.py
|
8a52aa38f39a65c1ab19d632e3d9d21c5a976ecd
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
mcdonc/ptah
|
c0e496741b88af7229904137d843982cb30da886
|
b54083b4ba358f70cf7ce473133f1b45d071a10f
|
refs/heads/master
| 2021-01-18T10:48:11.862294
| 2011-10-18T23:06:07
| 2011-10-18T23:06:07
| 2,554,198
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,131
|
py
|
""" default actions """
import ptah, ptah.cmsapp
ptah.cmsapp.uiaction(
ptah.cms.IContent, **{'id': 'view',
'title': 'View',
'action': '',
'permission': ptah.cms.View,
'sortWeight': 0.5})
ptah.cmsapp.uiaction(
ptah.cms.IContent, **{'id': 'edit',
'title': 'Edit',
'action': 'edit.html',
'permission': ptah.cms.ModifyContent,
'sortWeight': 0.6})
ptah.cmsapp.uiaction(
ptah.cms.IContainer, **{'id': 'adding',
'title': 'Add content',
'action': '+/',
'permission': ptah.cms.AddContent,
'sortWeight': 5.0})
ptah.cmsapp.uiaction(
ptah.ILocalRolesAware, **{'id': 'sharing',
'title': 'Sharing',
'action': 'sharing.html',
'permission': ptah.cms.ShareContent,
'sortWeight': 10.0})
|
[
"fafhrd91@gmail.com"
] |
fafhrd91@gmail.com
|
38ee8d5857ce1363921fa6e4f9f533c95fc72ee9
|
e1ac46d306eddc0929544690a68dbe4f5931c4ce
|
/build/lib/japTrans/transCat.py
|
ddccb65bd48a53610fcaaea548bfef51965e81f3
|
[] |
no_license
|
mayukhg/mytranscat
|
f51da6446fe298a39de8bd8f3c9e648aebad205b
|
0be5405f6cdad1a731e74a21730fcb90f940672f
|
refs/heads/master
| 2021-01-21T21:29:18.887925
| 2017-06-20T05:38:55
| 2017-06-20T05:38:55
| 94,853,503
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 587
|
py
|
# Imports the Google Cloud client library
from google.cloud import translate
def run_quickstart():
# Instantiates a client
translate_client = translate.Client()
# The text to translate
text = 'My Test'
# The target language
target = 'ja'
# Translates input text to Japanese
translation = translate_client.translate(
text, target_language=target)
# Print the translated text
trans_txt = str(translation['translatedText'])
print('Translated text')
print(translation['translatedText'])
#return trans_txt
#run_quickstart()
|
[
"mayukh.ghosh@pb.com"
] |
mayukh.ghosh@pb.com
|
b44e169f60258d75c52f9ffc53104d7a7b59c90e
|
3260f6b7594417813e8a78ec746e380353d7dcb4
|
/ex18/ex18.py
|
53bb656d374d585fdda65b055405ea3e534dd926
|
[] |
no_license
|
Drewleks/learn-python-the-hard-way
|
62f317c1b71ce79d981c8de24d9bcad91ae8e878
|
2074727244d1490aaa5b8c62b0bc1414687e5286
|
refs/heads/master
| 2022-12-04T12:42:14.045555
| 2020-08-24T18:08:03
| 2020-08-24T18:08:03
| 278,914,292
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 647
|
py
|
# похоже на сценарии с argv
def print_two(*args):
arg1, arg2 = args
print(f"arg1: {arg1}, arg2: {arg2}")
# ок, здесь вместо *args мы делаем следующее
def print_two_again(arg1, arg2):
print(f"arg1: {arg1}, arg2: {arg2}")
# принимает только один аргумент
def print_one(arg1):
print(f"arg1: {arg1}")
# не принимает аргументов
def print_none():
print("А я ничего не получаю.")
print_two("Михаил", "Райтман")
print_two_again("Михаил", "Райтман")
print_one("Первый!")
print_none()
|
[
"drew.leks@gmail.com"
] |
drew.leks@gmail.com
|
25b68d91f3d56fc02c96b75755170471e8c38473
|
4d3651a1448848cd991a9e84acb23cb567d4b016
|
/mh_code/report_gen.py
|
6bc13ae8d4b81606ba53943594a1884a0b598032
|
[] |
no_license
|
magentalab/personalization-models
|
c3bbb1e8885a2be52bec6b5eef404197c841aa9d
|
a6180a50bbd47068b9046c322d5cdfaaf00cf32d
|
refs/heads/master
| 2021-01-25T12:31:54.424168
| 2018-08-28T21:01:01
| 2018-08-28T21:01:01
| 123,478,636
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 815
|
py
|
from datetime import datetime, timedelta
from reports import reports, now
import omniture
import boto3
def lambda_handler(event, context):
#initialize dynamodb tables
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('reports')
#log in and get report suite
un = "APIteam:T-Mobile USA"
sec ="936bf8cd2f1964e19e87ad81d7e04c32"
analytics = omniture.authenticate(un,sec)
tmobusprod = analytics.suites["tmobusprod"]
#create reports and write to array
for i in reports:
try:
report = tmobusprod.jsonReport(i[1]).async();
while (report.check() == False):
report.check()
else:
table.put_item(
Item={
'title': i[0],
'report_id': getattr(report,"id"),
'datetime': str(now)
}
)
except KeyError:
continue
|
[
"noreply@github.com"
] |
magentalab.noreply@github.com
|
c2246cf1ebc461f0682810d48c462cee0b921e2d
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_confounding.py
|
8581f71a9ed68951c64d85a8220fd9717c6a7b51
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 256
|
py
|
from xai.brain.wordbase.verbs._confound import _CONFOUND
#calss header
class _CONFOUNDING(_CONFOUND, ):
def __init__(self,):
_CONFOUND.__init__(self)
self.name = "CONFOUNDING"
self.specie = 'verbs'
self.basic = "confound"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
5f0169bcf640b58c1523063dc47c0a2f4e9f9f7e
|
7832e7dc8f1583471af9c08806ce7f1117cd228a
|
/aliyun-python-sdk-acs/aliyunsdkacs/request/v20150101/PutVersionErrorMappingRequest.py
|
dd766d6417263d42e4c5842c78c5c286848841d3
|
[
"Apache-2.0"
] |
permissive
|
dianplus/aliyun-openapi-python-sdk
|
d6494850ddf0e66aaf04607322f353df32959725
|
6edf1ed02994245dae1d1b89edc6cce7caa51622
|
refs/heads/master
| 2023-04-08T11:35:36.216404
| 2017-11-02T12:01:15
| 2017-11-02T12:01:15
| 109,257,597
| 0
| 0
|
NOASSERTION
| 2023-03-23T17:59:30
| 2017-11-02T11:44:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,131
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
class PutVersionErrorMappingRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'Acs', '2015-01-01', 'PutVersionErrorMapping')
self.set_uri_pattern('/[ProductName]/[VersionName]?ErrorMapping')
self.set_method('PUT')
def get_Accept(self):
return self.get_headers().get('Accept')
def set_Accept(self,Accept):
self.add_header('Accept',Accept)
def get_Content-Type(self):
return self.get_headers().get('Content-Type')
def set_Content-Type(self,Content-Type):
self.add_header('Content-Type',Content-Type)
def get_Content-Length(self):
return self.get_headers().get('Content-Length')
def set_Content-Length(self,Content-Length):
self.add_header('Content-Length',Content-Length)
def get_Content-MD5(self):
return self.get_headers().get('Content-MD5')
def set_Content-MD5(self,Content-MD5):
self.add_header('Content-MD5',Content-MD5)
def get_ProductName(self):
return self.get_path_params().get('ProductName')
def set_ProductName(self,ProductName):
self.add_path_param('ProductName',ProductName)
def get_VersionName(self):
return self.get_path_params().get('VersionName')
def set_VersionName(self,VersionName):
self.add_path_param('VersionName',VersionName)
def get_BodyContent(self):
def set_BodyContent(self,BodyContent):
|
[
"haowei.yao@alibaba-inc.com"
] |
haowei.yao@alibaba-inc.com
|
222a80bc4ebc18b5dbf21ff24c91774fece862ad
|
0e79d785137f091668f25a48e42ba0e3a924988a
|
/chat/migrations/0001_initial.py
|
44cece06a69ce9282a9ef5bb8ef4a9f89f963dd8
|
[] |
no_license
|
OnkeTshaka/realtimeChat
|
da554145abcf366331303c2cc42a4941ffcb6730
|
92a178618973291992f02e3e585347c40e7e0c9b
|
refs/heads/master
| 2023-06-30T12:22:47.772635
| 2021-08-11T09:10:02
| 2021-08-11T09:10:02
| 389,781,103
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,004
|
py
|
# Generated by Django 3.2.5 on 2021-07-21 20:41
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.TextField(max_length=10000)),
('date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('user', models.CharField(max_length=100)),
('room', models.CharField(max_length=10000)),
],
),
migrations.CreateModel(
name='Room',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
]
|
[
"onktshaka200@gmail.com"
] |
onktshaka200@gmail.com
|
62158c7dbf0255cd6a0cf4b8b25a85f8ec363ecb
|
d0b9f796da6487906c18971b3d58b8aa36e05f7b
|
/meiduo_mall/meiduo_mall/apps/oauth/urls.py
|
4c8165508caef27f9c0521108059bd4f56491e41
|
[] |
no_license
|
kristenin/meiduo_25
|
5144eed885f01147a93f8b8ff18d5358fbd73148
|
bfbe43eea2466cb4b6dbc26472c9b46e0203b494
|
refs/heads/master
| 2020-05-16T09:37:09.650841
| 2019-05-19T07:48:22
| 2019-05-19T07:48:22
| 182,954,747
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
# 获取QQ登录界面url
url(r'^qq/authorization/$', views.OAuthURLView.as_view()),
# QQ登录成功后的回调处理
url(r'^oauth_callback/$', views.OAuthUserView.as_view()),
# 获取weibo登录界面url
url(r'^weibo/authorization/$', views.OAuthWeiboURLView.as_view()),
url(r'^sina_callback/$', views.OAuthWeiboUserView.as_view()),
]
|
[
"827406976@qq.com"
] |
827406976@qq.com
|
4c47a9e5a2e7028068843b8cc248f08e68a14e61
|
18535460ab8ecb84e2861b76e28bc10aa11fce1a
|
/config.py
|
d23275afcb5c737c0f578129d7b4c8fe0ad4997d
|
[] |
no_license
|
jongmok1031/sba-api-3
|
dab4ba1faaa240ee7b69e894f296d37dfdb09e03
|
91d030854a525c2ff376d67d0990c03be0304199
|
refs/heads/master
| 2022-12-28T19:10:34.547187
| 2020-10-19T00:52:13
| 2020-10-19T00:52:13
| 301,314,843
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,115
|
py
|
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
import mysql.connector
basedir = os.path.dirname(os.path.abspath(__file__))
db = {
'user' : 'root',
'password' : 'root',
'host' : 'localhost',
'port' : '3306',
'database' : 'mariadb'
}
mysql_con = None
#------------------------------------------------------#
def query_executor(cursor):
sql = "select * from food"
cursor.execute(sql,)
#------------------------------------------------------#
if __name__ == "__main__":
print('test')
try:
mysql_con = mysql.connector.connect(host='localhost', port='3306', database='mariadb', user='root', password='root')
mysql_cursor = mysql_con.cursor(dictionary=True)
query_executor(mysql_cursor)
for row in mysql_cursor:
print('price is: '+str(row['price']))
mysql_cursor.close()
except Exception as e:
print(e.message)
finally:
if mysql_con is not None:
mysql_con.close()
|
[
"jongmok1031@gmail.com"
] |
jongmok1031@gmail.com
|
b45aa3b369e51d3af7bdd0e497a40f1b327873a8
|
04b86e00e6c097f3d29fe1ae133416c77388c4db
|
/pyfinex/v2/positions.py
|
2d6a7cc3d04b9ce253c73524b4be925792d58c9d
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ddxv/pyfinex
|
299747fbfe800d34bb059cdbb9828234e83a46ec
|
2e299c24a38a3299a6feb625d2b3c0be135dcd72
|
refs/heads/master
| 2020-03-27T13:03:23.268218
| 2018-08-16T13:23:44
| 2018-08-16T13:23:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 347
|
py
|
from pyfinex.api import request
def active(key, secret_key, **params):
""" Get active positions
Docs: https://bitfinex.readme.io/v2/reference#rest-auth-positions
"""
endpoint = 'auth/r/positions'
return request(authenticate=True, key=key, secret_key=secret_key, version=2, endpoint=endpoint, method='POST', body_params=params)
|
[
"michel.z.gaston@gmail.com"
] |
michel.z.gaston@gmail.com
|
0d9d664b991f9c98271dd520854461b29bb2ccba
|
45681de2f46bb8712ab65588fc0f8dece7ef97b2
|
/bipolar/scan_net.py
|
aac6adc0984fbba388834d40994109ac58d55542
|
[] |
no_license
|
cleverhandle1/bipolar
|
3db51315c574367df383d27d24f07d1c645236ca
|
36b66a7b299ebbc875c8d9263e98a70548b1a1fe
|
refs/heads/master
| 2020-04-05T18:14:38.197680
| 2019-02-12T06:23:51
| 2019-02-12T06:23:51
| 157,093,716
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,316
|
py
|
#!/usr/bin/env python
import bipolar
from celery import group
from time import sleep
from elasticsearch import helpers, Elasticsearch
import sys
import json
ip_net = sys.argv[1]
ips = bipolar.net_explode(ip_net)
my_group = group([bipolar.scan_nmap.s(ip) for ip in ips])
group_results = my_group.apply_async(queue='scan')
for child in group_results.children:
print(child.as_tuple()[0][0])
#group_results = my_group.apply_async()
#while not group_results.ready():
# print('waiting for jobs to complete')
# sleep(10)
#group_results = group_results.get()
#
#scan_data = {}
#for results in group_results:
# ip = results['scan'].keys()[0]
# scan_data[ip] = results['scan'][ip]
#
#output = []
#for ip in scan_data.keys():
# open_ports = []
# if 'tcp' in scan_data[ip].keys():
# for port in scan_data[ip]['tcp'].keys():
# state = scan_data[ip]['tcp'][port]['state']
# if state == 'open':
# open_ports.append(int(port))
# output.append(json.dumps({'ip': ip, 'open_ports':open_ports}))
#
#try:
# es = Elasticsearch(timeout=999999)
# helpers.bulk(es, output, index='fnscan', doc_type="doc")
#except Exception as e:
# print(e)
# pass
#
#with open('data/output/scan_output.log', 'w') as f:
# for out in output:
# f.write(out + '\n')
#
|
[
"cleverhandle1@protonmail.com"
] |
cleverhandle1@protonmail.com
|
0370b6175b59970d5d82aa30238319974ee6a517
|
593df604930c84405fa36f1cd7a12279426ecb7c
|
/users/forms.py
|
a1b267cd075f2987e7f544c40d47d24e9b63f07f
|
[] |
no_license
|
rainy0824/blog_project
|
f0c55b6af4e623ab6f36eb3d9cf703c8b74161a9
|
01cc90e3c4901e0208813ff397fee0ff494a16d0
|
refs/heads/master
| 2021-01-21T14:57:15.305895
| 2017-07-02T00:30:34
| 2017-07-02T00:30:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
from django.contrib.auth.forms import UserCreationForm
from users.models import User
#自定义用户的form
class RegisterForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model=User
fields = ("username","email",)
|
[
"564312974@qq.com"
] |
564312974@qq.com
|
422219e68c8fc6b1d9c7665884f7854a5781f89c
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/394/usersdata/348/75042/submittedfiles/ex11.py
|
a86725ac7a49dce4e69e343fad26c23422956c61
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
# -*- coding: utf-8 -*-
dia1 = int(input( 'informe dia 1: '))
mes1 = int(input( 'informe mes 1: '))
ano1 = int(input( 'informe ano 1: '))
dia2 = int(input( 'informe dia 2: '))
mes2 = int(input( 'informe mes 2: '))
ano2 = int(input( 'informe ano 2: '))
if (dia1==dia2) and (mes1==mes2) and (ano1==ano2):
print ('IGUAIS')
elif (ano1<ano2):
print ('DATA 2')
elif (mes1<mes2):
print ('DATA 2')
elif (dia1<dia2):
print ('DATA 2')
else:
print ('DATA 1')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
b99914fd42cf43830dfecd852de8946174cad379
|
2bdb23e8bebe307039d11519826e05d4744304c8
|
/Agent5/main_agent_Agent5_rewards.py
|
7145b73631b8a0d03d220a6fb40eb072dccb6196
|
[
"MIT"
] |
permissive
|
schigeru/Bachelorarbeit_Code
|
608b2f9f97a319d6fd4de36e0e244d4d7c148c4a
|
261b2552221f768e7022abc60a4e5a7d2fedbbae
|
refs/heads/main
| 2023-04-10T10:12:20.440503
| 2021-04-29T14:42:03
| 2021-04-29T14:42:03
| 362,824,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 772
|
py
|
#!/usr/bin/env python3
import gym
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.common.vec_env import SubprocVecEnv
from stable_baselines import PPO2
from gym_Agent4_rewards import PandaRobotGymEnv
import rospy
import numpy as np
import os
def main():
robot = PandaRobotGymEnv()
robot = DummyVecEnv([lambda: robot])
model = PPO2.load("/home/valentin/BA_Logs/Agent4/reaching_policy/1mio_timesteps", env=robot)
obs = robot.reset()
for i in range(0, 500):
if i == 499:
print("Last Step")
action, _ = model.predict(obs, deterministic=True)
obs, reward, done, info = robot.step(action)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
schigeru.noreply@github.com
|
d5393221a951177318a182ebfd77d15eda641d19
|
a6fc32dcff876ea2d080aa7c83da413880b51917
|
/test/server.py
|
3691039f24edf19a07d259363bfc395c7618766c
|
[] |
no_license
|
penkich/esp32
|
f6d396d11780e0ff753644b73f2336a9c121c779
|
82aa0e6e3ed3b2b619cdc3d9271bd9063284c881
|
refs/heads/master
| 2021-01-21T17:02:09.332036
| 2017-05-21T01:31:14
| 2017-05-21T01:31:14
| 91,924,708
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,466
|
py
|
#
# socket を使った簡単なテスト 2017-05-20
# サーバー、クライアントとも、sta_ifでWiFiに接続しておく
# 時間経過により、サーバ、クライアント双方のNeopixelの色が変わる
# タイマーのオーバーフロー問題ありそう
#
from machine import Pin
from neopixel import NeoPixel
import socket
import time
def callback(p): # たぶんこの方法よくない。
global start
start = time.ticks_ms()
print(p)
time.sleep(0.5)
p5 = Pin(5, Pin.IN, Pin.PULL_UP) # タイマーをリセットするSWをつなぐ。
p5.irq(trigger=Pin.IRQ_FALLING, handler=callback)
pin = Pin(4,Pin.OUT) # NeoPixel の信号線を接続
np = NeoPixel(pin, 1) # NeoPixel は1つ接続
port = 6809 # 適当
addr = socket.getaddrinfo('0.0.0.0',port)[0][-1]
s = socket.socket()
s.bind(addr)
s.listen(1)
start = time.ticks_ms()
def led(t):
if t < 600000:
rgb = [0,100,0]
elif t < 1200000:
rgb = [30,80,0]
elif t < 1800000:
rgb = [50,50,0]
elif t < 6000000:
rgb = [80,0,0]
else:
rgb = [0,0,0]
np[0] = rgb
np.write()
while True:
cl,addr = s.accept()
# cl_file = cl.makefile('rwb',0)
delta = time.ticks_diff(time.ticks_ms(), start)
print('client connected from',addr)
cl.send(bytes(str(delta),'utf8'))
print(str(delta))
led(delta) # クライアントからの接続なければ更新されませんね。
cl.close()
|
[
"noreply@github.com"
] |
penkich.noreply@github.com
|
256120e63c4d5d9ea63f3c11da220556fdff2d99
|
146bf9e65cd26b1f672323e95e9ad063eed7fcb8
|
/polympics_server/models/awards.py
|
5d153073881a1d15234e2804f0e9f2ea5bf67e3a
|
[] |
no_license
|
polympics/server
|
e9ad31c672b28d60f252acc24be9545aac0f76c6
|
1e12ebd512641f8dcabb8730ea786ec235a4726a
|
refs/heads/main
| 2023-06-16T22:43:18.321235
| 2021-07-13T12:27:49
| 2021-07-13T12:27:49
| 345,300,709
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 921
|
py
|
"""Models for giving awards to players and their teams."""
from typing import Any
import peewee
from . import accounts
from .database import BaseModel, db
from .teams import Team
class Award(BaseModel):
"""An award given to a team."""
title = peewee.CharField(max_length=32)
image_url = peewee.CharField(max_length=512)
team = peewee.ForeignKeyField(Team, null=True, backref='awards')
def as_dict(self) -> dict[str, Any]:
"""Get the award as a dict to be returned as JSON."""
return {
'id': self.id,
'title': self.title,
'image_url': self.image_url
}
class Awardee(BaseModel):
"""A player who recieved an award.
Can be multiple per award.
"""
award = peewee.ForeignKeyField(Award, on_delete='CASCADE')
account = peewee.ForeignKeyField(accounts.Account, on_delete='CASCADE')
db.create_tables([Award, Awardee])
|
[
"artemisdev21@gmail.com"
] |
artemisdev21@gmail.com
|
c79c02c034f54342d85d430c85b53196edaad645
|
0a82cbec2555404a8db0a0b55c774d939384d528
|
/website/tempCodeRunnerFile.py
|
797fe6f6506614b2590dbce9e37162192933296f
|
[] |
no_license
|
shubham405/notes_app
|
1ddba7ce59a27bfceb99ecdd5e16095483bf9125
|
b2d7eb1742382c30c19c86c3685bfea3aa0b6646
|
refs/heads/main
| 2023-04-15T05:57:36.595228
| 2021-05-06T15:55:30
| 2021-05-06T15:55:30
| 363,679,107
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 74
|
py
|
from flask_login import login_user,login_required,logout_user,current_user
|
[
"noreply@github.com"
] |
shubham405.noreply@github.com
|
47e6bf3e7e36708ee37f165e578633ede9fa3751
|
07e7444952a97b97212f30d892298efda9b67dd9
|
/precioussecret/client/tests/test_views.py
|
9b29a65171e92682a49dfc5d12cffa5e2ee67f06
|
[] |
no_license
|
lucekdudek/precioussecret
|
453a06a1b85939b221e02b1c78235cc9d4d44f2c
|
846a15e1f4f42ae1acd0042ec0f304ce0b4f1665
|
refs/heads/master
| 2021-09-27T19:43:35.189468
| 2020-04-09T11:52:45
| 2020-04-09T11:52:45
| 252,746,277
| 0
| 0
| null | 2021-09-22T18:50:49
| 2020-04-03T13:50:18
|
Python
|
UTF-8
|
Python
| false
| false
| 3,128
|
py
|
from django.contrib.auth.models import AnonymousUser
from django.contrib.auth.models import User
from django.contrib.sessions.middleware import SessionMiddleware
from django.test import RequestFactory
from django.test import TestCase
from rest_framework import status
from precioussecret.client.views import AddSecretView
from precioussecret.client.views import SecretDetailsView
class AddSecretViewTest(TestCase):
"""Test module for add secret view.
"""
def setUp(self):
self.user = User.objects.create_user(
username='gollum',
email='gollum@ring.lord',
password='myprecioussss'
)
self.factory = RequestFactory()
def test_post_anonymous_user(self):
request = self.factory.post("N/A")
request.user = AnonymousUser()
response = AddSecretView.as_view()(request)
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
def test_post_authenticated_user(self):
request = self.factory.post("N/A")
request.user = self.user
response = AddSecretView.as_view()(request)
self.assertEqual(status.HTTP_200_OK, response.status_code)
class SecretDetailsViewTest(TestCase):
"""Test module for secret details view.
"""
def setUp(self):
self.factory = RequestFactory()
self.session_middleware = SessionMiddleware()
def __fill_request_with_session(self, request, access_name=None, access_code=None):
self.session_middleware.process_request(request)
if access_name:
request.session['access_name'] = access_name
if access_code:
request.session['access_code'] = access_code
request.session.save()
def test_get_context_data_empty_session(self):
request = self.factory.get("N/A")
self.__fill_request_with_session(
request=request
)
response = SecretDetailsView.as_view()(request)
self.assertIsNone(response.context_data.get('access_url'))
self.assertIsNone(response.context_data.get('access_code'))
def test_get_context_data_half_empty_session(self):
access_code_sample = 'DKERLA'
request = self.factory.get("N/A")
self.__fill_request_with_session(
request=request,
access_code=access_code_sample
)
response = SecretDetailsView.as_view()(request)
self.assertIsNone(response.context_data.get('access_url'))
self.assertEqual(access_code_sample, response.context_data.get('access_code'))
def test_get_context_data_complete_session(self):
access_code_name = 'sa-mp-le'
access_code_sample = 'SAMPLE'
request = self.factory.get("N/A")
self.__fill_request_with_session(
request=request,
access_code=access_code_sample,
access_name=access_code_name
)
response = SecretDetailsView.as_view()(request)
self.assertIn(access_code_name, response.context_data.get('access_url'))
self.assertEqual(access_code_sample, response.context_data.get('access_code'))
|
[
"lucekdudek@gmail.com"
] |
lucekdudek@gmail.com
|
417b4a042215ae3af7738115158fe0e36a4ae88c
|
00c9f5101f326a8eb374fba78c3f3c4589937bb4
|
/I2provisioner.py
|
deff7a86273ea348f6af658b7785401f546ab7a4
|
[] |
no_license
|
w4rc0n/Icinga-2-Provisioner
|
72b7e5f1b266c06c9d842c4d69539881f0ef27f1
|
a017a61c87512a0d8785b1c0e4b3a4125419b73d
|
refs/heads/main
| 2023-01-23T19:37:20.179179
| 2020-11-26T07:47:36
| 2020-11-26T07:47:36
| 315,475,864
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,897
|
py
|
#!/usr/bin/env python3
import socket
import requests
import argparse
import subprocess
# Adjust these variables to your environment. This variable should be your CA master node, as well as your configuration master.
master = 'https://yourmaster.com'
masterip = 'ip address of the above host'
# If you have a second master set up in an HA cluster, provide those details below like the above. If you do not have a second master, leave them as is.
master2 = ''
masterip2 = ''
# Your icinga web 2 login information
webuser = 'web_user'
webpass = 'web_password'
# Take in positional command line args
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--domain', action='store', dest='domain', nargs='*', type=str, required=True, help='Enter host name to be provisioned into icinga2')
parser.add_argument('-k', '--key', action='store', dest='key', nargs='*', type=str, required=True, help='Enter API key for desired host template')
args = parser.parse_args()
# Take those args and put them in variables
domain = args.domain
key = args.key
# args.key returns a list of 1, this variable takes the list of 1, and just turns it into a string
keystring = ''.join(key)
domainstring = ''.join(domain)
# Pretty obvious
api_endpoint = master + '/director/self-service/register-host?'
'''
Get IP of the given host.
This opens a socket to cloudflare DNS server but doesnt send any data, its just to find the IP the server uses to call out to the world, the one we want to provision.
I would advise that you change this to query a server of your own.
'''
result = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
result.connect(("1.1.1.1", 53))
result = result.getsockname()[0]
# Form the data to be POST'd
data = {'address': result}
# Headers that the API requires
headers = {'Accept': 'application/json'}
# Form the URL from the 3 arguments provided by the command line, and by this loop
url = str(api_endpoint +'name=' + domainstring + '&key=' + keystring)
# POST to the API, provisioning the host
requests.post(url=url, data=data, headers=headers)
# Form ticket URL from item and ticket API URL
ticketurl = master + '/director/host/ticket?name=' + domainstring
# GET for the icinga ticket
response = requests.get(url=ticketurl, headers={'Accept': 'application/json'}, auth=(webuser, webpass))
# Capture content of the GET request
ticket = response.text.replace('"','')
ticket = ticket.rstrip()
# Hold on to your pants, we're generating a big ass shell script
with open ('icinga2kickstart.sh', 'w+') as rsh:
rsh.write('''\
#!/bin/bash
# This generates and signs your required certificates. Please do not
# forget to install the Icinga 2 package and your desired monitoring
# plugins first.
# Config from Director
ICINGA2_NODENAME=''
ICINGA2_CA_TICKET=''
ICINGA2_PARENT_ZONE='master'
ICINGA2_PARENT_ENDPOINTS=()
ICINGA2_CA_NODE=''
ICINGA2_GLOBAL_ZONES=('director-global')
# Internal defaults
: "${ICINGA2_OSFAMILY:=}"
: "${ICINGA2_HOSTNAME:="$(hostname -f)"}"
: "${ICINGA2_NODENAME:="${ICINGA2_HOSTNAME}"}"
: "${ICINGA2_CA_NODE:=}"
: "${ICINGA2_CA_PORT:=5665}"
: "${ICINGA2_CA_TICKET:=}"
: "${ICINGA2_PARENT_ZONE:=master}"
: "${ICINGA2_PARENT_ENDPOINTS:=()}"
: "${ICINGA2_GLOBAL_ZONES:=director-global}"
: "${ICINGA2_DRYRUN:=}"
: "${ICINGA2_UPDATE_CONFIG:=}"
# Helper functions
fail() {
echo "ERROR: $1" >&2
exit 1
}
warn() {
echo "WARNING: $1" >&2
}
info() {
echo "INFO: $1" >&2
}
check_command() {
command -v "$@" &>/dev/null
}
install_config() {
if [ -e "$1" ] && [ ! -e "${1}.orig" ]; then
info "Creating a backup at ${1}.orig"
cp "$1" "${1}.orig"
fi
echo "Writing config to ${1}"
echo "$2" > "${1}"
}
[ "$BASH_VERSION" ] || fail "This is a Bash script"
errors=
for key in NODENAME CA_NODE CA_PORT CA_TICKET PARENT_ZONE PARENT_ENDPOINTS; do
var="ICINGA2_${key}"
if [ -z "${!var}" ]; then
warn "The variable $var needs to be configured!"
errors+=1
fi
done
[ -z "$errors" ] || exit 1
# Detect osfamily
if [ -n "$ICINGA2_OSFAMILY" ]; then
info "Assuming supplied osfamily $ICINGA2_OSFAMILY"
elif check_command rpm && ! check_command dpkg; then
info "This should be a RedHat system"
if [ -e /etc/sysconfig/icinga2 ]; then
# shellcheck disable=SC1091
. /etc/sysconfig/icinga2
fi
ICINGA2_OSFAMILY=redhat
elif check_command dpkg; then
info "This should be a Debian system"
if [ -e /etc/default/icinga2 ]; then
# shellcheck disable=SC1091
. /etc/default/icinga2
fi
ICINGA2_OSFAMILY=debian
else
fail "Could not determine your os type!"
fi
# internal defaults
: "${ICINGA2_CONFIG_FILE:=/etc/icinga2/icinga2.conf}"
: "${ICINGA2_CONFIGDIR:="$(dirname "$ICINGA2_CONFIG_FILE")"}"
: "${ICINGA2_DATADIR:=/var/lib/icinga2}"
: "${ICINGA2_SSLDIR_OLD:="${ICINGA2_CONFIGDIR}"/pki}"
: "${ICINGA2_SSLDIR_NEW:="${ICINGA2_DATADIR}"/certs}"
: "${ICINGA2_SSLDIR:=}"
: "${ICINGA2_BIN:=icinga2}"
case "$ICINGA2_OSFAMILY" in
debian)
: "${ICINGA2_USER:=nagios}"
: "${ICINGA2_GROUP:=nagios}"
;;
redhat)
: "${ICINGA2_USER:=icinga}"
: "${ICINGA2_GROUP:=icinga}"
;;
*)
fail "Unknown osfamily '$ICINGA2_OSFAMILY'!"
;;
esac
icinga_version() {
"$ICINGA2_BIN" --version 2>/dev/null | grep -oP 'version: [rv]\K\d+\.\d+\.\d+[^\)]*'
}
icinga_major() {
icinga_version | grep -oP '^\d+\.\d+'
}
# only can do float like versions (1.x)
version_compare() {
# args: v1 op v2
return "$(awk "BEGIN {print !($1 $2 $3)}")"
}
# Make sure icinga2 is installed and running
echo -n "check: icinga2 installed - "
if version=$(icinga_version); then
echo "OK: $version"
else
fail "You need to install icinga2!"
fi
if [ -z "${ICINGA2_SSLDIR}" ]; then
if [ -f "${ICINGA2_SSLDIR_OLD}/${ICINGA2_NODENAME}.crt" ]; then
info "Using old SSL directory: ${ICINGA2_SSLDIR_OLD}"
info "Because you already have a certificate in ${ICINGA2_SSLDIR_OLD}/${ICINGA2_NODENAME}.crt"
ICINGA2_SSLDIR="${ICINGA2_SSLDIR_OLD}"
elif version_compare "$(icinga_major)" ">=" 2.8 ; then
info "Using new SSL directory: ${ICINGA2_SSLDIR_NEW}"
ICINGA2_SSLDIR="${ICINGA2_SSLDIR_NEW}"
else
info "Using old SSL directory: ${ICINGA2_SSLDIR_OLD}"
ICINGA2_SSLDIR="${ICINGA2_SSLDIR_OLD}"
fi
fi
if [ ! -d "$ICINGA2_SSLDIR" ]; then
mkdir "$ICINGA2_SSLDIR"
chown "$ICINGA2_USER.$ICINGA2_GROUP" "$ICINGA2_SSLDIR"
fi
if [ -f "${ICINGA2_SSLDIR}/${ICINGA2_NODENAME}.crt" ]; then
warn "ERROR: a certificate for '${ICINGA2_NODENAME}' already exists"
warn "Please remove ${ICINGA2_SSLDIR}/${ICINGA2_NODENAME}.??? in case you want a"
warn "new certificate to be generated and signed by ${ICINGA2_CA_NODE}"
if [ -z "${ICINGA2_UPDATE_CONFIG}" ] && [ -z "${ICINGA2_DRYRUN}" ]; then
warn "Aborting here, you can can call the script like this to just update config:"
info " ICINGA2_UPDATE_CONFIG=1 $0"
exit 1
fi
elif [ -z "${ICINGA2_DRYRUN}" ]; then
if ! "$ICINGA2_BIN" pki new-cert --cn "${ICINGA2_NODENAME}" \\
--cert "${ICINGA2_SSLDIR}/${ICINGA2_NODENAME}.crt" \\
--csr "${ICINGA2_SSLDIR}/${ICINGA2_NODENAME}.csr" \\
--key "${ICINGA2_SSLDIR}/${ICINGA2_NODENAME}.key"
then fail "Could not create self signed certificate!"
fi
if ! "$ICINGA2_BIN" pki save-cert \\
--host "${ICINGA2_CA_NODE}" \\
--port "${ICINGA2_CA_PORT}" \\
--key "${ICINGA2_SSLDIR}/${ICINGA2_NODENAME}.key" \\
--trustedcert "${ICINGA2_SSLDIR}/trusted-master.crt"
then fail "Could not retrieve trusted certificate from host ${ICINGA2_CA_NODE}"
fi
if ! "$ICINGA2_BIN" pki request \\
--host "${ICINGA2_CA_NODE}" \\
--port "${ICINGA2_CA_PORT}" \\
--ticket "${ICINGA2_CA_TICKET}" \\
--key "${ICINGA2_SSLDIR}/${ICINGA2_NODENAME}.key" \\
--cert "${ICINGA2_SSLDIR}/${ICINGA2_NODENAME}.crt" \\
--trustedcert "${ICINGA2_SSLDIR}/trusted-master.crt" \\
--ca "${ICINGA2_SSLDIR}/ca.crt"
then "Could not retrieve final certificate from host ${ICINGA2_CA_NODE}"
fi
else
info "Would create certificates under ${ICINGA2_SSLDIR}, but in dry-run!"
fi
# Prepare Config Files
content_config=$(cat << EOF
/** Icinga 2 Config - proposed by Icinga Director */
include "constants.conf"
$([ "${ICINGA2_HOSTNAME}" != "${ICINGA2_NODENAME}" ] || echo '// ')const NodeName = "${ICINGA2_NODENAME}"
include "zones.conf"
include "features-enabled/*.conf"
include <itl>
include <plugins>
include <plugins-contrib>
include <manubulon>
include <windows-plugins>
include <nscp>
EOF
)
endpoint_list=''
for item in "${ICINGA2_PARENT_ENDPOINTS[@]}"; do
endpoint=$(echo "$item" | cut -d, -f1)
endpoint_list+="\\"${endpoint}\\", "
done
content_zones=$(cat << EOF
/** Icinga 2 Config - proposed by Icinga Director */
object Endpoint "${ICINGA2_NODENAME}" {}
object Zone "${ICINGA2_NODENAME}" {
parent = "${ICINGA2_PARENT_ZONE}"
endpoints = [ "${ICINGA2_NODENAME}" ]
}
object Zone "${ICINGA2_PARENT_ZONE}" {
endpoints = [ ${endpoint_list%, } ]
}
EOF
)
for item in "${ICINGA2_PARENT_ENDPOINTS[@]}"; do
endpoint=$(echo "$item" | cut -d, -f1)
host=$(echo "$item" | cut -s -d, -f2)
content_zones+=$(cat << EOF
object Endpoint "${endpoint}" {
$([ -n "$host" ] && echo " host = \\"${host}\\"" || echo " //host = \\"${endpoint}\\"")
}
EOF
)
done
for zone in "${ICINGA2_GLOBAL_ZONES[@]}"; do
content_zones+=$(cat << EOF
object Zone "${zone}" {
global = true
}
EOF
)
done
content_api="/** Icinga 2 Config - proposed by Icinga Director */
object ApiListener \\"api\\" {"
if [ "${ICINGA2_SSLDIR}" = "${ICINGA2_SSLDIR_OLD}" ]; then
content_api+="
cert_path = SysconfDir + \\"/icinga2/pki/${ICINGA2_NODENAME}.crt\\"
key_path = SysconfDir + \\"/icinga2/pki/${ICINGA2_NODENAME}.key\\"
ca_path = SysconfDir + \\"/icinga2/pki/ca.crt\\"
"
fi
content_api+="
accept_commands = true
accept_config = true
}
"
if [ -z "${ICINGA2_DRYRUN}" ]; then
install_config "$ICINGA2_CONFIGDIR"/icinga2.conf "$content_config"
install_config "$ICINGA2_CONFIGDIR"/zones.conf "$content_zones"
install_config "$ICINGA2_CONFIGDIR"/features-available/api.conf "$content_api"
"$ICINGA2_BIN" feature enable api
"$ICINGA2_BIN" daemon -C
echo "Please restart icinga2:"
echo " service icinga2 restart"
else
output_code() {
sed 's/^/ /m' <<<"$1"
}
echo "### $ICINGA2_CONFIGDIR"/icinga2.conf
echo
output_code "$content_config"
echo
echo "### $ICINGA2_CONFIGDIR"/zones.conf
echo
output_code "$content_zones"
echo
echo "### $ICINGA2_CONFIGDIR"/features-available/api.conf
echo
output_code "$content_api"
fi
systemctl restart icinga2
''')
# Adjust the second master string for placement in the bash script above
if master2 != '' and masterip2 != '':
master2string = ' ' + master2 + ',' + masterip2
else:
master2string = ''
# Form the strings to be placed into the kickstart script for the host
replacementCA = 'ICINGA2_CA_NODE=' + "'" + master + "'"
replacementmaster = 'ICINGA2_PARENT_ENDPOINTS=(' + master + ',' + masterip + master2string + ')'
replacementname = 'ICINGA2_NODENAME=' + "'" + domainstring + "'"
replacementticket = 'ICINGA2_CA_TICKET=' + "'" + domainstring + "'"
# Set master(s)
sedcmd = "s/ICINGA2_PARENT_ENDPOINTS=()/" + replacementmaster + '/g'
subprocess.call(["sed", "-i", sedcmd, "icinga2kickstart.sh"])
# Set CA node variable
sedcmd = "s/ICINGA2_CA_NODE=''/" + replacementCA + '/g'
subprocess.call(["sed", "-i", sedcmd, "icinga2kickstart.sh"])
# Set the node name
sedcmd = "s/ICINGA2_NODENAME=''/" + replacementname + '/g'
subprocess.call(["sed", "-i", sedcmd, "icinga2kickstart.sh"])
# Set ticket
sedcmd = "s/ICINGA2_CA_TICKET=''/" + replacementticket + '/g'
subprocess.call(["sed", "-i", sedcmd, "icinga2kickstart.sh"])
# Execute icinga2kickstart.sh script
subprocess.call(["bash", "icinga2kickstart.sh"])
# Deploy new icinga configuration
deployurl = master + '/director/config/deploy'
requests.post(url=deployurl, headers=headers, auth=(webuser, webpass))
# Cleanup
subprocess.call(["rm", "icinga2kickstart.sh"])
|
[
"noreply@github.com"
] |
w4rc0n.noreply@github.com
|
14a522c0b0e3b4d419eb126c4e740041354edf1b
|
1dede328b31eef387e130a468a4bd928a2ad55d8
|
/day19.py
|
6f667756d2fbf76b2833c25eaf5a5eb651e7d230
|
[] |
no_license
|
Kurocon/AdventOfCode2017
|
3b21f4241c0dd0c702eb2bc31684d215bb9c3150
|
af85df23cac2036fd1f425204bf9f29f5a53aebd
|
refs/heads/master
| 2021-03-16T05:18:57.787010
| 2017-12-25T11:34:58
| 2017-12-25T11:34:58
| 112,751,939
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,451
|
py
|
with open('day19.in') as f:
inp = [[y for y in x.replace("\n", "")] for x in f.readlines()]
start_index = (0, 0)
for i in range(len(inp[0])):
if inp[0][i] == "|":
start_index = (0, i)
break
direction = "down"
current_index = start_index
collected = []
stop = False
stepcount = 0
dtx = {
'up': (-1, 0),
'down': (1, 0),
'left': (0, -1),
'right': (0, 1),
}
opp = {
'up': 'down',
'down': 'up',
'left': 'right',
'right': 'left',
}
def get(index):
return inp[index[0]][index[1]]
while not stop:
cur = get(current_index)
print(current_index, ": '", cur, "'")
if cur not in "|-+":
if cur == " ":
print("Stepped out of line on ", current_index)
stop = True
else:
collected.append(cur)
if direction in ['up', 'down']:
cur = "|"
else:
cur = "-"
if cur == "|" or cur == "-":
current_index = (current_index[0]+dtx[direction][0], current_index[1]+dtx[direction][1])
elif cur == "+":
other_dirs = filter(lambda x: x != opp[direction], ["up", "down", "left", "right"])
new_dir = direction
for d in other_dirs:
try:
if get((current_index[0]+dtx[d][0], current_index[1]+dtx[d][1])) != " ":
new_dir = d
break
except IndexError:
continue
direction = new_dir
current_index = (current_index[0]+dtx[direction][0], current_index[1]+dtx[direction][1])
stepcount += 1
stepcount -= 1
print("Collected letters: {}".format("".join(collected)))
print("Stepped {} steps".format(stepcount))
|
[
"kevin@kevinalberts.nl"
] |
kevin@kevinalberts.nl
|
4406e498e53d36d972223ba8851239a9ce4817d0
|
ed77872e3a680eb78554d1316f11de4169777d00
|
/build.py
|
281e0c21a1e58b20c1ca68e6598efd7a472dac75
|
[] |
no_license
|
Karthiga1/CICD_Jenkins
|
70fd547591c766933baacb70fe39e40625b6dd73
|
da3e7c8eacdd3041ff98be543d4a22246ecf6e85
|
refs/heads/main
| 2023-02-01T21:47:02.970737
| 2020-12-18T04:06:35
| 2020-12-18T04:06:35
| 322,308,370
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 615
|
py
|
import json
import os
import shutil
import zipfile
try:
with open('Build.json') as f:
data = json.loads(f.read())
for x in data:
StageName = data[x]['Name']
StageContent = data[x]['Content']
if not os.path.exists('builds'):
os.makedirs('builds')
filepath = os.path.join("builds", StageName)
with open(filepath, 'w') as json_file:
json.dump(StageContent, json_file)
shutil.make_archive('builds', 'zip', 'builds')
finally:
f.close()
json_file.close()
|
[
"noreply@github.com"
] |
Karthiga1.noreply@github.com
|
29ad22edf618900af13dc410769ed8873085e808
|
9ae27c38e583982174e07dc1b19507f1d96e4fa2
|
/cyber_security/util/httpTools.py
|
1b58a632a6e1d668d20b278bb25a5ba31a5d39b6
|
[] |
no_license
|
lyx199504/cyber-security-backend
|
e38c2e690c3ed3d4b8f2c88f4b7b4ab0b18476c3
|
dbe93143dda36759a0ae4ecbd5c322696f7c15f7
|
refs/heads/master
| 2023-01-19T01:51:40.655030
| 2020-11-22T12:22:42
| 2020-11-22T12:22:42
| 312,642,156
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,233
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/11/5 11:59
# @Author : LYX-夜光
from django.http import JsonResponse
class RestResponse:
SUCCESS = 200 # 成功
AUTH_ERROR = 401 # 认证错误
USER_ERROR = 402 # 用户操作错误
FRONT_ERROR = 411 # 前端操作错误
SERVER_ERROR = 500 # 服务端错误
@staticmethod # 封装json接口
def success(msg='', data={}):
return JsonResponse({'code': RestResponse.SUCCESS, 'msg': msg, 'data': data})
@staticmethod
def failure(code, msg='', data={}):
return JsonResponse({'code': code, 'msg': msg, 'data': data})
@staticmethod
def authFail():
return JsonResponse({'code': RestResponse.AUTH_ERROR, 'msg': '用户认证失败,请重新登录!', 'data': {}})
@staticmethod
def userFail(msg='', data={}):
return JsonResponse({'code': RestResponse.USER_ERROR, 'msg': msg, 'data': data})
@staticmethod
def frontFail(msg='', data={}):
return JsonResponse({'code': RestResponse.FRONT_ERROR, 'msg': msg, 'data': data})
@staticmethod
def serverFail():
return JsonResponse({'code': RestResponse.SERVER_ERROR, 'msg': '服务器出错!', 'data': {}})
|
[
"765020742@qq.com"
] |
765020742@qq.com
|
cd84845dcf66c5636243e40470165c0fecc97a32
|
ed8e1eeda5f5fc5dd9436a90439ae1ef3e69b98c
|
/s3_manifest.py
|
18c19ef77fe12255e996d28adf56d9b46ae67894
|
[
"MIT"
] |
permissive
|
vile8/S3-Version-Utilities
|
64417da9ce350110ecf5194f92d510ddd195cb3d
|
a888a9d5f2d5f8199f26df3f43f5966e8764b879
|
refs/heads/master
| 2021-01-01T18:30:02.055773
| 2017-07-26T16:33:49
| 2017-07-26T16:33:49
| 98,346,809
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,835
|
py
|
import os
import sys
import getopt
import boto
import uuid
import time
import tarfile
import shutil
import math
import csv
import traceback
from filechunkio import FileChunkIO
from datetime import datetime, timedelta
from os.path import splitext, join
from os import walk
from os import path
from pprint import pprint
#Process files from a given bucket
def getFilesFromBucket(fWriter):
global manBucket, breakManRun
#alternative method
#access_key = '<YOUR_KEY>'
#secret_key = '<YOUR_SECRET_KEY>'
#conn = boto.connect_s3(
# aws_access_key_id = access_key,
# aws_secret_access_key = secret_key
# )
conn = boto.connect_s3()
#Connect to our desired Bucket
s3 = conn.get_bucket(manBucket)
#Fetch individual objects from the bucket, "folders", files etc...
#We are after current versions here.
print "Bucket key | Version ID"
for manKey in s3.list_versions():
try:
#little elaborate, last minute... just a means to avoid trying to process delete_marker entries
canProcessRecord = True
if(hasattr(manKey, 'DeleteMarker')):
canProcessRecord = False
if(hasattr(manKey, 'delete_marker')):
if(manKey.delete_marker == True):
canProcessRecord = False
if(manKey.is_latest and canProcessRecord == True):
if breakManRun:
debugPrintKey(manKey)
sys.exit(1)
print str(manKey.path) + manKey.name + " | " + str(manKey.version_id)
if manKey.path is None:
fullKey = manKey.name
else:
fullKey = manKey.path + manKey.name
fWriter.writerow([fullKey,str(manKey.version_id)])
except:
print "If we hit an exception we should close and remove the manifest as it will likely be corrupt or at least incomplete."
print "Something wrong with: " + manKey.name
print "Unexpected Error: ", sys.exc_info()[0]
traceback.print_exc(file=sys.stdout)
debugPrintKey(manKey)
breakManRun = True
sys.exit(1)
#Debug method to see what the heck is in the object we are looking at
def debugPrintKey(keyObj):
pprint(vars(keyObj))
#Desired Action: [write-manifest, download-by-manifest]
manAction = "write-manifest"
#Bucket Name
manBucket = ""
#Manifest Name
manFile = ""
#Global for managing manifest list
manList = []
#Flag to handle connection and listing exceptions to avoid writing or reading bad manifests
breakManRun = False
#setup Usage handler
def main(argv):
global manFile, manBucket
try:
opts, args = getopt.getopt(argv, "hm:b:")
except getopt.GetoptError:
printHelp()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h","help","--help"):
printHelp()
sys.exit(2)
elif opt == '-b':
manBucket = arg
elif opt == '-m':
manFile = arg
if not manFile:
printHelp()
sys.exit(2)
if not manAction:
printHelp()
sys.exit(2)
if not manBucket:
printHelp()
sys.exit(2)
#argv help for execution
def printHelp():
print 's3_manifest.py -m <manifest output filename> -b <AWS bucket name>\n'
#n usage handler on main program init
if __name__ == "__main__":
main(sys.argv[1:])
if manAction == "write-manifest":
#open csvWriter
with open(manFile, 'wb') as csvFH:
csvWriter = csv.writer(csvFH, delimiter=',', quotechar='\\', quoting=csv.QUOTE_MINIMAL)
#create csv from current bucket
getFilesFromBucket(csvWriter)
#Set the current date for use in writing manifest files
checkdateCurr = datetime.now()
|
[
"jmcdermott@queuesoftware.com"
] |
jmcdermott@queuesoftware.com
|
25e4af19a5867e04f0a5781eb7c32f8b7ea09009
|
bdb4bf882ec27984cc8682747843f1ff5c928652
|
/Tkinter Graph (using mouseclicks)
|
ea2502b1b7c46503914d6128cd8d6b17a63988cc
|
[] |
no_license
|
arslanamir8/pervysage
|
016aaf4ceeb2d25a354ac92039c65329221a57bf
|
2a0001ffd438d6d58cd069565d46daa8540be91c
|
refs/heads/master
| 2022-01-16T12:52:53.193943
| 2022-01-10T01:38:16
| 2022-01-10T01:38:16
| 208,152,792
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,026
|
#!/bin/python3
from tkinter import *
#varaibles and data
radius = 20
nodecount = []
coords = {}
graph = {}
start = None
node1 = None
node2 = None
#functions
def DrawNode(event):
canvas.create_oval(event.x - radius, event.y - radius, event.x + radius, event.y + radius, outline="#000", width=1)
x = len(nodecount)
canvas.create_text(event.x, event.y, font="Times 30 bold", text=x)
nodecount.append(x)
coords[len(nodecount)-1] = (event.x, event.y)
graph[len(nodecount)-1] = []
def line_click(event):
global start, node1
for k, v in coords.items():
if coords[k][0] - radius < event.x < coords[k][0] + radius and coords[k][1] - radius < event.y < coords[k][1] \
+ radius:
start = (event.x, event.y)
node1 = k
def line_release(event):
global start, node2, node1
if start:
for k, v in coords.items():
if coords[k][0] - radius < event.x < coords[k][0] + radius and coords[k][1] - radius < event.y < \
coords[k][1] + radius:
x = start[0]
y = start[1]
event.widget.create_line(x, y, event.x, event.y)
node2 = k
graph[node1].append(node2)
graph[node2].append(node1)
start, node1 , node2 = None, None, None
def save():
f = open('coords.text', 'w')
for k,v in graph.items():
f.write(str(k) + ':' + str(v) + '\n')
f.close()
#widgets
root = Tk()
root.title('Graph')
#menu
menubar = Menu(root)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label='Save', command=save)
filemenu.add_command(label='Exit', command=root.quit())
menubar.add_cascade(label='File', menu=filemenu)
root.config(menu=menubar)
canvas = Canvas(root, width=600, height=700)
canvas.grid(row=0, column=0)
canvas.create_image(300, 350)
#event
canvas.bind('<Button-2>', DrawNode)
canvas.bind("<Button-1>", line_click)
canvas.bind("<ButtonRelease-1>", line_release)
#main loop
root.mainloop()
|
[
"noreply@github.com"
] |
arslanamir8.noreply@github.com
|
|
264323a3ccc30d0b73c69303417ef144f9025d54
|
a8b17b17f9b2a640013064c50e1cebc27a7a68de
|
/unsupervised-learning-in-python/03-decorrelating-your-data-and-dimension-reduction/03-the-first-principal-component.py
|
647a417a9bf9c989e58abcbb84d033e610d11b34
|
[] |
no_license
|
JohnnyFang/datacamp
|
20eae09752521f14006cb3fda600b10bd7b12398
|
0fa8fa7682c23b0eb07bd03e4b75f5b77aeafa75
|
refs/heads/master
| 2020-04-18T00:27:37.358176
| 2020-02-04T20:54:19
| 2020-02-04T20:54:19
| 167,078,316
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,470
|
py
|
"""
The first principal component
The first principal component of the data is the direction in which the data varies the most. In this exercise, your job is to use PCA to find the first principal component of the length and width measurements of the grain samples, and represent it as an arrow on the scatter plot.
The array grains gives the length and width of the grain samples. PyPlot (plt) and PCA have already been imported for you.
Instructions
100 XP
Make a scatter plot of the grain measurements. This has been done for you.
Create a PCA instance called model.
Fit the model to the grains data.
Extract the coordinates of the mean of the data using the .mean_ attribute of model.
Get the first principal component of model using the .components_[0,:] attribute.
Plot the first principal component as an arrow on the scatter plot, using the plt.arrow() function. You have to specify the first two arguments - mean[0] and mean[1].
"""
# Make a scatter plot of the untransformed points
plt.scatter(grains[:,0], grains[:,1])
# Create a PCA instance: model
model = PCA()
# Fit model to points
model.fit(grains)
# Get the mean of the grain samples: mean
mean = model.mean_
# Get the first principal component: first_pc
first_pc = model.components_[0,:]
# Plot first_pc as an arrow, starting at mean
plt.arrow(mean[0], mean[1], first_pc[0], first_pc[1], color='red', width=0.01)
# Keep axes on same scale
plt.axis('equal')
plt.show()
|
[
"fangdejavu@gmail.com"
] |
fangdejavu@gmail.com
|
bcc8798fb5df258fb2fb24949bb32f5ced90b7d0
|
755fd91e945dbcd2f8f6004b41ac72fe671c90e4
|
/djangodecoy/djangodecoy/urls.py
|
5920011cecdf441a334aebb4c5a42750a119996a
|
[] |
no_license
|
Kurolox/DjangoDecoy
|
dea9f6b34af9d59ad6d0add902a0e443672da29f
|
ff6d2e8e52b6aa0b52e26ccce9e61e7f7df066cf
|
refs/heads/master
| 2020-03-21T15:43:55.642987
| 2018-06-26T12:05:25
| 2018-06-26T12:05:25
| 138,729,381
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 804
|
py
|
"""djangodecoy URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from boards import views
urlpatterns = [
path('', views.home),
path('admin/', admin.site.urls),
]
|
[
"kurolox@gmail.com"
] |
kurolox@gmail.com
|
c779f0537fab2734252534db980e9b435b635669
|
7d615915304bdeebfd1047dfe40807b877305121
|
/appblog/config.py
|
6b5d4a95810579c57385a31bacc5bba365c74d31
|
[] |
no_license
|
Jaouadeddadsi/My-blog-post
|
1650bed499c064682431aa47952b4e570bcb8f3c
|
a89a27935943602d0e29a34eebb5cafb5ec55a18
|
refs/heads/master
| 2020-06-01T02:32:01.251560
| 2019-06-12T15:39:46
| 2019-06-12T15:39:46
| 190,598,758
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 327
|
py
|
import os
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI')
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('EMAIL_USER')
MAIL_PASSWORD = os.environ.get('EMAIL_PASS')
|
[
"jaouadeddadsi2016@gmail.com"
] |
jaouadeddadsi2016@gmail.com
|
1bea469ce41628300baec647daf2fd0474bdd419
|
cd29cff9794f8993cb65b7b558f7fe57d5a751e0
|
/teacherdirectory/views/__init__.py
|
ca7e5cf153938695013dabf19b19ab4ec891c354
|
[] |
no_license
|
hamade31/Tech-Test
|
e974913ec00462bedd921b182ad0ac6feb86e4cc
|
43e8c77796402baeb43b302e0cffb87f0b5a7d9d
|
refs/heads/main
| 2023-03-24T09:49:39.765469
| 2021-03-13T15:46:18
| 2021-03-13T15:46:18
| 347,280,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 38
|
py
|
from .teacher_profile_views import *
|
[
"phillipsheadset@gmail.com"
] |
phillipsheadset@gmail.com
|
2b36ac1a0cd628cdd8c6be5c39b7366eef994ca5
|
ab32f2a609249ec2b475a29b9479d444aa44b366
|
/Fase3Django/Fase3Django/asgi.py
|
9a36a5803f2aca56fd159a53feed9da94c72c147
|
[] |
no_license
|
EETS7/FinalDjango
|
bf787959518f4d99a6f99a8f38feef0ecbf04b51
|
10cb8bde440cf830eee8a05cc7424b6c2fb6c327
|
refs/heads/master
| 2023-05-04T13:39:56.293102
| 2021-05-16T03:40:27
| 2021-05-16T03:40:27
| 367,778,414
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
ASGI config for Fase3Django project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Fase3Django.settings')
application = get_asgi_application()
|
[
"elmertriquiz@gmail.com"
] |
elmertriquiz@gmail.com
|
477902b7aded01142a3a70e8952435c226af8fde
|
c3228cb4024a24a1fddcc895effd18708ef31056
|
/Search Engine/Search_Engine_python/routes.py
|
75ebc899bb9716fa3ac656343e2c42f112f0509d
|
[] |
no_license
|
ibtissammoufid/bulletins-search-engine
|
f4c848177670b2f8105263a54a8db7dfc7f43e57
|
d7e51d11859874603133550558c6ac3753181367
|
refs/heads/master
| 2022-11-19T12:00:42.899632
| 2020-06-30T17:36:22
| 2020-06-30T17:36:22
| 271,137,740
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,381
|
py
|
from flask import render_template, url_for, flash, redirect, request
from Search_Engine_python import app, db, bcrypt
from Search_Engine_python.forms import RegistrationForm, LoginForm
from Search_Engine_python.models import User, Post
from flask_login import login_user, current_user, logout_user, login_required
from elasticsearch import Elasticsearch
import os
os.chdir("C:\\Users\\hp\\Desktop\\Search Engine\\")
es = Elasticsearch('127.0.0.1', port=9200)
es = Elasticsearch(timeout=30)
@app.route('/')
@app.route("/home")
def home():
return render_template('index.html')
@app.route("/search")
def search():
return render_template('search.html')
@app.route('/results', methods=['GET', 'POST'])
def request_search():
search_term = request.form["input"]
res = es.search(
index='bulletins',
body={
"query": {"match_phrase": {"content": search_term}},
"highlight": {"pre_tags": ["<b>"], "post_tags": ["</b>"], "fields": {"content": {}}}})
res['ST'] = search_term
for hit in res['hits']['hits']:
hit['good_summary'] = '….'.join(hit['highlight']['content'][1:])
return render_template('results.html', res=res)
@app.route("/about")
def about():
return render_template('about.html', title='About')
@app.route("/register", methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RegistrationForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(username=form.username.data, email=form.email.data, password=hashed_password)
db.session.add(user)
db.session.commit()
flash('Your account has been created! You are now able to log in', 'success')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@app.route("/login", methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
next_page = request.args.get('next')
return redirect(next_page) if next_page else redirect(url_for('search'))
else:
flash('Login Unsuccessful. Please check email and password', 'danger')
return render_template('login.html', title='Login', form=form)
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for('home'))
@app.route("/saved")
def saved():
posts = Post.query.filter_by(author=current_user)
return render_template('saved.html', posts=posts)
@app.route("/results/<post_name>/<post_path>/add", methods=['GET', 'POST'])
@login_required
def newpost(post_name, post_path):
post = Post(title=post_name, content=post_path, author=current_user)
db.session.add(post)
db.session.commit()
posts = Post.query.all()
for post in posts :
if post.content == 'f':
db.session.delete(post)
db.session.commit()
flash('Your post has been created!', 'success')
return redirect(url_for('saved'))
|
[
"58593102+ibtissammoufid@users.noreply.github.com"
] |
58593102+ibtissammoufid@users.noreply.github.com
|
2e3f80dea7d5403facd335224553023a85a1f183
|
d5039d4b39e9411444ef6282fe384d72a305cc1d
|
/catkin_ws/build/robotiq/robotiq_ft_sensor/catkin_generated/pkg.installspace.context.pc.py
|
6573ae7dc301cf32da9f8a8f15bb440ace50f93b
|
[] |
no_license
|
iltertaha/pickplace
|
0daa8f0d3dba6e3c3717bcbd178916beaf46556e
|
f0cabcb3b99af0c472a8741f2a6e2cc067322f6e
|
refs/heads/master
| 2022-06-17T22:34:57.666166
| 2020-05-20T13:50:12
| 2020-05-20T13:50:12
| 265,561,982
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/burak/catkin_ws/install/include".split(';') if "/home/burak/catkin_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;std_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "robotiq_ft_sensor"
PROJECT_SPACE_DIR = "/home/burak/catkin_ws/install"
PROJECT_VERSION = "1.0.0"
|
[
"ilteraktolga@gmail.com"
] |
ilteraktolga@gmail.com
|
493640c7a6caa30620ca5d466c147cac7cb2940d
|
85ccd32aa73eecf274a937f1fc3b6f4d484b77da
|
/test cases/python/2 extmodule/subinst/submod/printer.py
|
2a4a61bc995b3277b8870e7d3a99f41f41ee8ac7
|
[
"Apache-2.0"
] |
permissive
|
mesonbuild/meson
|
48321cf4235dfcc0194fed90ff43a57367592bf7
|
cf5adf0c646474f0259d123fad60ca5ed38ec891
|
refs/heads/master
| 2023-09-01T05:58:50.807952
| 2023-03-17T20:27:37
| 2023-08-31T11:52:41
| 19,784,232
| 5,122
| 1,848
|
Apache-2.0
| 2023-09-14T15:47:23
| 2014-05-14T15:08:16
|
Python
|
UTF-8
|
Python
| false
| false
| 48
|
py
|
#!/usr/bin/env python3
print('subinst.submod')
|
[
"eschwartz@archlinux.org"
] |
eschwartz@archlinux.org
|
bfb6d5bffe2d3f4f810f2427e58fbae8e8fbcb6d
|
5b44a2a1ee9eabf95f8052d5bbce498c79e37204
|
/src/Expartmanager/utils.py
|
287740ad720e2437e9acec28351d25a9488444d5
|
[] |
no_license
|
Shamsulhaq/Emanager
|
0eafb7c7009c953c851c461959d80c13df203ef7
|
2174c99904327bb8cbfe75e2cd061160a58080be
|
refs/heads/master
| 2021-02-27T00:27:49.956787
| 2020-03-07T03:52:30
| 2020-03-07T03:52:30
| 245,563,445
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,284
|
py
|
import random
import string
from django.utils.text import slugify
from io import BytesIO
from django.http import HttpResponse
from django.template.loader import get_template
# from xhtml2pdf import pisa
'''
random_string_generator is located here:
http://joincfe.com/blog/random-string-generator-in-python/
'''
def random_string_generator(size=8, chars=string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def unique_order_id_generator(instance):
"""
This is for a Django project with order_id field.
"""
order_new_id = random_string_generator().upper()
# order_new_id = random. for _ in range(10)
Klass = instance.__class__
qs_exists = Klass.objects.filter(order_id=order_new_id).exists()
if qs_exists:
return unique_order_id_generator(instance)
return order_new_id
def unique_slug_generator(instance, new_slug=None):
"""
This is for a Django project and it assumes your instance
has a model with a slug field and a title character (char) field.
"""
if new_slug is not None:
slug = new_slug
else:
try:
slug = slugify(instance.title(), allow_unicode=True)
except:
slug = slugify(instance.title, allow_unicode=True)
Klass = instance.__class__
qs_exists = Klass.objects.filter(slug=slug).exists()
if qs_exists:
if not slug:
new_slug = "fastpick-{randstr}".format(
randstr=random_string_generator(size=4))
else:
new_slug = "{slug}-{randstr}".format(
slug=slug,
randstr=random_string_generator(size=4))
return unique_slug_generator(instance, new_slug=new_slug)
return slug
# PDF Marker
#
# def render_to_pdf(template_src, context_dict={}):
# template = get_template(template_src)
# html = template.render(context_dict)
# result = BytesIO()
# pdf = pisa.pisaDocument(BytesIO(html.encode("utf-8")), result, encoding='utf-8')
#
# if not pdf.err:
# return HttpResponse(result.getvalue(), content_type='application/pdf')
# return None
# pisaStatus = pisa.CreatePDF(
# StringIO(sourceHtml.encode('utf-8')),
# dest=resultFile,
# encoding='utf-8')
|
[
"bmshamsulhaq65@gmail.com"
] |
bmshamsulhaq65@gmail.com
|
42e2322a8dffcca4ea82471172f3c53b55d3c365
|
dbf8d687413e6cc6bbcea45b89aaa258ae1bd08f
|
/Nasa4plot.py
|
ca4445542cd8488e614a2123de0ea1ad1aa8fcf9
|
[] |
no_license
|
mhr75/Nasa_Space_Apps_Challenge_2020_Covid_19
|
082728086a43c861aadbfbd83bffe38536d0d6ea
|
1d5352c3562fdcba61e9c9a33815bd517a510c2e
|
refs/heads/master
| 2022-10-24T08:29:12.390366
| 2020-06-12T07:14:53
| 2020-06-12T07:14:53
| 268,289,540
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,297
|
py
|
import numpy as np
from netCDF4 import Dataset
#import matplotlib.pyplot as plt
#import cartopy.crs as ccrs
#import metpy
my_example_nc_file = 'MERRA2_400.tavgM_2d_aer_Nx.202004.nc4'
fh = Dataset(my_example_nc_file, mode='r')
#data = Dataset('MERRA2_400.tavgM_2d_aer_Nx.202004.nc4', mode='r')
print(fh)
lons = fh.variables['lon'][:]
lats = fh.variables['lat'][:]
tmax = fh.variables['time'][:]
tmax_units = fh.variables['time'].units
# lons = data.variables['lon'][:]
# lats = data.variables['lat'][:]
# T2M = data.variables['T2M'][:,:,:]
#
# T2M = T2M[0,:,:]
# Set the figure size, projection, and extent
# fig = plt.figure(figsize=(8,4))
# ax = plt.axes(projection=ccrs.Robinson())
# ax.set_global()
# ax.coastlines(resolution="110m",linewidth=1)
# ax.gridlines(linestyle='--',color='black')
# Set contour levels, then draw the plot and a colorbar
# clevs = np.arange(230,311,5)
# plt.contourf(lons, lats, T2M, clevs, transform=ccrs.PlateCarree(),cmap=plt.cm.jet)
# plt.title('MERRA-2 Air Temperature at 2m, January 2010', size=14)
# cb = plt.colorbar(ax=ax, orientation="vertical", pad=0.02, aspect=16, shrink=0.8)
# cb.set_label('K',size=12,rotation=0,labelpad=15)
# cb.ax.tick_params(labelsize=10)
# Save the plot as a PNG image
# fig.savefig('MERRA2_t2m.png', format='png', dpi=360)
#
fh.close()
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
# Get some parameters for the Stereographic Projection
lon_0 = lons.mean()
lat_0 = lats.mean()
m = Basemap(width=5000000,height=3500000,
resolution='l',projection='stere',\
lat_ts=40,lat_0=lat_0,lon_0=lon_0)
# Because our lon and lat variables are 1D,
# use meshgrid to create 2D arrays
# Not necessary if coordinates are already in 2D arrays.
lon, lat = np.meshgrid(lons, lats)
xi, yi = m(lon, lat)
# Plot Data
cs = m.pcolor(xi,yi,np.squeeze(tmax))
# Add Grid Lines
m.drawparallels(np.arange(-80., 81., 10.), labels=[1,0,0,0], fontsize=10)
m.drawmeridians(np.arange(-180., 181., 10.), labels=[0,0,0,1], fontsize=10)
# Add Coastlines, States, and Country Boundaries
m.drawcoastlines()
m.drawstates()
m.drawcountries()
# Add Colorbar
cbar = m.colorbar(cs, location='bottom', pad="10%")
cbar.set_label(tmax_units)
# Add Title
plt.title('DJF Maximum Temperature')
print(plt.show())
|
[
"49345529+mhr75@users.noreply.github.com"
] |
49345529+mhr75@users.noreply.github.com
|
1003d98d67650cebb39eb7e2386aa52b1c084c11
|
73b0a65a41ee148c65440881336c9173b22fd767
|
/Latest_THIS/ADDRESS_EXTRACT.py
|
a8898b16ce2cbb24996f9a75c8443ad7aed3cae8
|
[] |
no_license
|
Senthuran100/PYTHON_PROJECTS
|
2cb05e2a6d576764b1ac1db196a908317b9faeec
|
06d9f9434c914e670f2098205afe92f003506c04
|
refs/heads/master
| 2021-07-14T23:34:14.083645
| 2017-10-16T11:07:28
| 2017-10-16T11:07:28
| 107,116,573
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 336
|
py
|
import pyap
test_address = """
Lorem ipsum
225 E. John Carpenter Freeway,
Suite 1500 Irving, Texas 75062
Dorem sit amet
"""
addresses = pyap.parse(test_address, country='US')
for address in addresses:
# shows found address
print(address)
# shows address parts
print(address.as_dict())
|
[
"32807418+Senthuran100@users.noreply.github.com"
] |
32807418+Senthuran100@users.noreply.github.com
|
22f6a4dedfe49666dd117f26ae66383804b5578a
|
b7ad4f4071890b7352923f0c1d446e10db7d991c
|
/wowit/wowit/doctype/items/items.py
|
c334951bb78025802a115d3dbe61b2a744a7f772
|
[
"MIT"
] |
permissive
|
ShahzadNaser/wowit
|
e38a40e1731d6fc25e11be8063ea81000af06116
|
a50ace8f6d7cc6da470f13d2c04d18d02907430e
|
refs/heads/master
| 2022-12-08T22:42:42.001294
| 2020-08-12T01:18:32
| 2020-08-12T01:18:32
| 285,718,643
| 0
| 0
|
NOASSERTION
| 2020-08-29T10:30:39
| 2020-08-07T02:31:54
|
Python
|
UTF-8
|
Python
| false
| false
| 257
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Shahzad Naser and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class Items(Document):
pass
|
[
"shahzadnaser1122@gmail.com"
] |
shahzadnaser1122@gmail.com
|
0e9c4018769c664f8c1bca77db85d0714e23b5f7
|
4ca8bf159fd5decfe0bde30fded5a3a7c1624ce7
|
/server.py
|
d46c3eebba25c671e67a056baadc298ce21cb1ad
|
[] |
no_license
|
ivanfoo/wilson
|
c432f9f5c3e0efb476927fe63ac3391df44cfa8f
|
4327e84bb437c4d684685f5f3d73b9a21126870e
|
refs/heads/master
| 2020-12-05T01:45:40.844720
| 2016-09-10T17:42:45
| 2016-09-10T17:42:45
| 67,885,439
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 33
|
py
|
from wilson import app
app.run()
|
[
"sanfru.ivan@gmail.com"
] |
sanfru.ivan@gmail.com
|
4b35a2175c927750d9f11295397fddafd1d5a618
|
21dd1f04868e3e5ee84285b9e7a38fb29f346910
|
/clases/mostrarArchivo.py
|
9845e77cd3c78929ff54e9b482a4e99d768b1f56
|
[] |
no_license
|
melany202/programaci-n-2020
|
b8428b3978dddd1326178127df8d46cbffd8686f
|
fa45a2dec278e80faeab41eecededced9a530784
|
refs/heads/master
| 2023-01-03T09:29:21.623268
| 2020-10-28T18:54:54
| 2020-10-28T18:54:54
| 283,563,749
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
archivo=open('párrafo.txt',encoding='UTF-8')
print(archivo)
parrafo=archivo.readlines()
archivo.close()
print(parrafo)
listaRenglones=[]
with open ('parrafo.txt',encoding='UTF-8') as parrafo:
for line in parrafo:
print(line)
listaRenglones.append(line)
|
[
"melanysuarez307@gmail.com"
] |
melanysuarez307@gmail.com
|
753db623e90e5f9aa6a358e9fe53709fe0cf8b24
|
d2b3108ebc4e413f643b906b1411049317191280
|
/models/item.py
|
224ebd95e775faea47093027f62c7ec3c38d3923
|
[] |
no_license
|
rhwgf34/code
|
056640fa705a3c36539b9069594e9129af9ced28
|
49c196b2b97183adba0ebc3db61f9ad952c2f49b
|
refs/heads/master
| 2021-07-15T08:11:03.586738
| 2017-10-20T12:38:06
| 2017-10-20T12:38:06
| 107,606,568
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 823
|
py
|
import sqlite3
from db import db
class ItemModel(db.Model):
__tablename__ = 'items'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
price = db.Column(db.Float(precision=2))
store_id = db.Column(db.Integer, db.ForeignKey('stores.id'))
store = db.relationship('StoreModel')
def __init__(self, name, price, store_id):
self.name = name
self.price = price
self.store_id = store_id
def json(self):
return {'name': self.name, 'price': self.price}
@classmethod
def find_by_name(cls, name):
return cls.query.filter_by(name=name).first()
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
|
[
"mjuz1qt93@gmail.com"
] |
mjuz1qt93@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.