blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
61b940807ba953d8d1281bb51f0dd723c7a377c4 | 8eba953d39769dd1e9b83a48beef3942180553b2 | /myblog/blog/templatetags/new_highlight.py | 616983a527dde7d0f9871806c0f2439070760ff1 | [] | no_license | chenyufei91/blog | b016e83a474109587df663b0794949686c2001e6 | 1fbed7795bbbc4a4971842b9675393b3b4d3dc15 | refs/heads/master | 2021-01-02T08:30:16.066951 | 2017-09-12T12:04:26 | 2017-09-12T12:04:26 | 99,013,043 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,021 | py | # encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from django import template
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from haystack.utils import importlib
register = template.Library()
class HighlightNode(template.Node):
def __init__(self, text_block, query, html_tag=None, css_class=None, max_length=None, start_head=None,
filter_mark_down=None):
self.text_block = template.Variable(text_block)
self.query = template.Variable(query)
self.html_tag = html_tag
self.css_class = css_class
self.max_length = max_length
self.start_head = start_head
self.filter_mark_down = filter_mark_down
if html_tag is not None:
self.html_tag = template.Variable(html_tag)
if css_class is not None:
self.css_class = template.Variable(css_class)
if max_length is not None:
self.max_length = template.Variable(max_length)
if start_head is not None:
self.start_head = template.Variable(start_head)
if filter_mark_down is not None:
self.filter_mark_down = template.Variable(filter_mark_down)
def render(self, context):
text_block = self.text_block.resolve(context)
query = self.query.resolve(context)
kwargs = {}
if self.html_tag is not None:
kwargs['html_tag'] = self.html_tag.resolve(context)
if self.css_class is not None:
kwargs['css_class'] = self.css_class.resolve(context)
if self.max_length is not None:
kwargs['max_length'] = self.max_length.resolve(context)
if self.start_head is not None:
kwargs['start_head'] = self.start_head.resolve(context)
if self.filter_mark_down is not None:
kwargs['filter_mark_down'] = self.filter_mark_down.resolve(context)
# Handle a user-defined highlighting function.
if hasattr(settings, 'HAYSTACK_CUSTOM_HIGHLIGHTER') and settings.HAYSTACK_CUSTOM_HIGHLIGHTER:
# Do the import dance.
try:
path_bits = settings.HAYSTACK_CUSTOM_HIGHLIGHTER.split('.')
highlighter_path, highlighter_classname = '.'.join(path_bits[:-1]), path_bits[-1]
highlighter_module = importlib.import_module(highlighter_path)
highlighter_class = getattr(highlighter_module, highlighter_classname)
except (ImportError, AttributeError) as e:
raise ImproperlyConfigured(
"The highlighter '%s' could not be imported: %s" % (settings.HAYSTACK_CUSTOM_HIGHLIGHTER, e))
else:
from .highlighting import Highlighter
highlighter_class = Highlighter
highlighter = highlighter_class(query, **kwargs)
highlighted_text = highlighter.highlight(text_block)
return highlighted_text
@register.tag
def new_highlight(parser, token):
"""
Takes a block of text and highlights words from a provided query within that
block of text. Optionally accepts arguments to provide the HTML tag to wrap
highlighted word in, a CSS class to use with the tag and a maximum length of
the blurb in characters.
Syntax::
{% highlight <text_block> with <query> [css_class "class_name"] [html_tag "span"] [max_length 200] %}
Example::
# Highlight summary with default behavior.
{% highlight result.summary with request.query %}
# Highlight summary but wrap highlighted words with a div and the
# following CSS class.
{% highlight result.summary with request.query html_tag "div" css_class "highlight_me_please" %}
# Highlight summary but only show 40 characters.
{% highlight result.summary with request.query max_length 40 %}
"""
bits = token.split_contents()
tag_name = bits[0]
if not len(bits) % 2 == 0:
raise template.TemplateSyntaxError(u"'%s' tag requires valid pairings arguments." % tag_name)
text_block = bits[1]
if len(bits) < 4:
raise template.TemplateSyntaxError(u"'%s' tag requires an object and a query provided by 'with'." % tag_name)
if bits[2] != 'with':
raise template.TemplateSyntaxError(u"'%s' tag's second argument should be 'with'." % tag_name)
query = bits[3]
arg_bits = iter(bits[4:])
kwargs = {}
for bit in arg_bits:
if bit == 'css_class':
kwargs['css_class'] = six.next(arg_bits)
if bit == 'html_tag':
kwargs['html_tag'] = six.next(arg_bits)
if bit == 'max_length':
kwargs['max_length'] = six.next(arg_bits)
if bit == 'start_head':
kwargs['start_head'] = six.next(arg_bits)
if bit == 'filter_mark_down':
kwargs['filter_mark_down'] = six.next(arg_bits)
return HighlightNode(text_block, query, **kwargs)
| [
"418587175@qq.com"
] | 418587175@qq.com |
310f57f68e84aa364cd0a8bff37692ad665e16c1 | a0752885a2cb58a9651370d67644a61c485c8b69 | /app.py | 69385c4382f02ecd44f1c1451a81a60a45104218 | [] | no_license | Omar-Al-Azzawi/kuva-gallery | 84aa2acb6741c6411bf15805d9405e73ce32c2cc | ca73821e95cfb37b2c2da74db9f8f8431991b018 | refs/heads/main | 2023-04-09T08:44:04.668547 | 2021-04-30T17:54:27 | 2021-04-30T17:54:27 | 363,218,683 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,205 | py | import os
from flask import Flask, request, redirect, render_template, send_from_directory
from werkzeug.utils import secure_filename
app = Flask(__name__)
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
@app.route("/")
def index():
images = os.listdir('./images')
return render_template("index.html", images=images)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route("/upload", methods=["GET","POST"])
def upload_file():
if request.method=="GET":
return render_template('upload.html')
target = os.path.join(APP_ROOT, 'images/')
print(target)
if not os.path.isdir(target):
os.mkdir(target)
for file in request.files.getlist("file"):
print(file)
filename = file.filename
destination = "/".join([target, filename])
print(destination)
file.save(destination)
return render_template("uploaded.html")
@app.route('/upload/<filename>')
def send_image(filename):
return send_from_directory("images", filename)
def send_image_for_filter(image):
return render_template('filter.html', image=image)
@app.route("/filters")
def filter():
return render_template('filters.html')
@app.url_defaults
def hashed_url_for_static_file(endpoint, values):
if 'static' == endpoint or endpoint.endswith('.static'):
filename = values.get('filename')
if filename:
if '.' in endpoint: # has higher priority
blueprint = endpoint.rsplit('.', 1)[0]
else:
blueprint = request.blueprint # can be None too
if blueprint:
static_folder = app.blueprints[blueprint].static_folder
else:
static_folder = app.static_folder
param_name = 'h'
while param_name in values:
param_name = '_' + param_name
values[param_name] = static_file_hash(os.path.join(static_folder, filename))
def static_file_hash(filename):
return int(os.stat(filename).st_mtime)
if __name__ == "__main__":
app.run(port=5000) | [
"alazzawiomar01@gmai.com"
] | alazzawiomar01@gmai.com |
a313555b0019f0f153a4c1bb8ea702d099dea1cc | a2d117137df3e0c269081294bfada1955a439b65 | /TrainingSuite/migrations/0003_auto_20160319_1543.py | 53b02e82ab749e7ea3e78887ca68ff2727c8f978 | [] | no_license | ros-viper/training_suite | 05ee80b4d6788af355c06c40f0cf25a250692d3f | ee2deb9ae3c4322d8f93ab58f3f95c61800cada6 | refs/heads/master | 2020-03-29T04:21:57.515964 | 2018-09-20T00:44:33 | 2018-09-20T00:44:33 | 149,528,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 908 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-19 15:43
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('TrainingSuite', '0002_auto_20160318_1102'),
]
operations = [
migrations.AddField(
model_name='student',
name='user',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='trainer',
name='user',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"rostyslav.rzhenetskyy@gmail.com"
] | rostyslav.rzhenetskyy@gmail.com |
2028b80a4b6d97ce46899f4a0460a1a563aa6c5c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02767/s175337385.py | 603d5a2e1d8cc05d8e395e38bf7392094f4f30ff | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | N = int(input())
X = list(map(int, input().split()))
ans = 100 ** 100
for p in range(1, 101):
now = 0
for x in X:
now += (p - x) ** 2
ans = min(ans, now)
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
a11ab50f121f06a787e4f3eca2800a50201185cd | 58c07f23c63bfb0ac69c7aa7de03973f1b1b4234 | /tests/celery/test_periodic.py | d62362f9f4c18f737df3b3792eed390ded46d6b2 | [
"MIT"
] | permissive | zconnect-iot/zconnect-django | 4d32e7535bebce4e94d1eb1a532d7e3b82326ca7 | 5c569f54f100e23d72e2ac4de795739ea461a431 | refs/heads/master | 2020-03-21T15:35:35.361709 | 2018-07-05T11:00:25 | 2018-07-05T11:00:25 | 138,721,705 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,733 | py | import contextlib
from datetime import datetime
from unittest.mock import Mock, patch
from freezegun import freeze_time
import pytest
from zconnect.tasks import (
remove_old_periodic_data, trigger_scheduled_events, trigger_update_strategy)
from zconnect.testutils.factories import (
DeviceSensorFactory, SensorTypeFactory, TimeSeriesDataFactory)
from zconnect.zc_timeseries.models import TimeSeriesData
def add_periodic_datums(data, device, product):
""" Given a periodic document and a data point in the from
```
{
"temp": 23.0,
"light": 1000,
"ts": "2010-01-01T19:37:00Z"
}
```
Args:
data (dict) - the data points to enter
"""
data = data.copy()
ts = data.pop('ts')
for k,v in data.items():
sensor_type = SensorTypeFactory(
sensor_name=k,
unit="",
product=product
)
device_sensor = DeviceSensorFactory(
device=device,
resolution=120,
sensor_type=sensor_type,
)
TimeSeriesDataFactory(
ts=ts,
value=v,
sensor=device_sensor,
)
@contextlib.contextmanager
def patch_sender():
""" Patch the get_sender with the _return value_ set to a mock """
fake_client = Mock()
with patch("zconnect.tasks.get_sender", return_value=fake_client):
yield fake_client
@pytest.mark.usefixtures("set_event_def")
class TestPeriodicTriggerDay:
event_def = {
"enabled": True,
"ref": "temp:max",
"condition": "time==7200&&day==2",
"actions":{
"alarm": False
},
"scheduled": True,
}
@pytest.mark.parametrize("time,expect_call", [
("2020-01-01T02:00:00Z", True),
("2020-01-01T02:00:01Z", True), # because of rounding
("2020-01-01T02:01:00Z", True), # True with new redis event system
("2020-01-01T01:00:00Z", False),
("2020-01-02T02:00:00Z", False),
("2020-07-01T02:00:00Z", True),
])
@pytest.mark.usefixtures("first_event_evaluation_datetime_min")
def test_periodic(self, time, expect_call, fakedevice, fakeproduct):
""" test some combinations of periodic tiggers"""
with patch_sender() as fake_client:
with freeze_time(time):
trigger_scheduled_events()
assert fake_client.as_device.called == expect_call
@pytest.mark.usefixtures("set_event_def")
class TestPeriodicTriggerTemp:
event_def = {
"enabled": True,
"ref": "temp:max",
"condition": "time==7200&&temp>30",
"actions":{
"alarm": False
},
"scheduled": True,
}
@pytest.mark.parametrize("temp,expect_call", [
(32, True),
(29, False),
])
@pytest.mark.usefixtures('first_event_evaluation_datetime_min')
def test_periodic_conditions(self, temp, expect_call, fakedevice, fakeproduct):
""" test some periodic conditions with additional context requirements """
context = {
"temp": temp,
"ts": datetime(2020,1,1,2,0,0)
}
add_periodic_datums(context, fakedevice, fakeproduct)
with patch_sender() as fake_client:
with freeze_time("2020-01-01T02:00:00Z"):
trigger_scheduled_events()
assert fake_client.as_device.called == expect_call
@pytest.mark.usefixtures("set_event_def")
class TestPeriodicTriggerDay2:
event_def = {
"enabled": True,
"ref": "temp:max",
"condition": "time==7200&&day==2",
"actions":{
"alarm": False
},
"scheduled": True,
}
@pytest.mark.parametrize("time,expect_call", [
("2020-01-01T02:00:00Z", True),
("2020-01-01T02:00:01Z", True), # because of rounding
("2020-01-01T02:01:00Z", True), # True with redis events since it hasn't been previously evaluated.
("2020-01-01T01:00:00Z", False),
("2020-01-02T02:00:00Z", False),
("2020-07-01T02:00:00Z", True),
])
@pytest.mark.usefixtures('first_event_evaluation_datetime_min')
def test_periodic(self, time, expect_call):
""" test some combinations of periodic tiggers"""
with patch_sender() as fake_client:
with freeze_time(time):
trigger_scheduled_events()
assert fake_client.as_device.called == expect_call
@pytest.mark.usefixtures('first_event_evaluation_datetime_min')
def test_redis_functionality(self):
with patch_sender() as fake_client:
with freeze_time("2020-01-01T02:00:00Z"):
trigger_scheduled_events()
assert fake_client.as_device.called == True
# Now try again, redis should have saved the evaluation time, so it
# shouldn't run it again.
with patch_sender() as fake_client:
with freeze_time("2020-01-01T02:05:00Z"):
trigger_scheduled_events()
# The condition has been evaluated since the time was greater than the
# condition time, don't reevaluate.
assert fake_client.as_device.called == False
@pytest.mark.usefixtures("set_event_def")
class TestDevicePeriodicTriggerDay1AndTime:
event_def = {
"enabled": True,
"ref": "timeandday",
"condition": "time==3600&&day==1",
"actions":{
"alarm": False
},
"scheduled": True,
}
@pytest.mark.usefixtures('first_event_evaluation_datetime_min')
def test_redis_time_day_functionality(self, fakedevice, fakeproduct):
""" Make sure that time and day triggers are working correctly."""
# First test an event definition: time==3600&&day==1
# If this is evaluated on day one before 3600 it should be false
# If this is then evaluated on day one after 3600 it should be true
# (even though the day is the same as the last evaluation)
with patch_sender() as fake_client:
with freeze_time("2019-12-31T00:30:00Z"):
trigger_scheduled_events()
assert fake_client.as_device.called == False
with patch_sender() as fake_client:
with freeze_time("2019-12-31T01:30:00Z"):
trigger_scheduled_events()
# Should be true since this was last evaluated before the time matched.
assert fake_client.as_device.called == True
@pytest.mark.usefixtures("set_event_def")
class TestDeviceFieldEventDef:
event_def = {
"enabled": True,
"ref": "devicefield",
"condition": "device:online==False",
"actions":{
"alarm": False
},
"scheduled": True,
}
@pytest.mark.usefixtures('first_event_evaluation_datetime_min')
def test_event_triggered_from_device_field(self, fakedevice, fakeproduct):
""" Make sure that a condition which depends on a device field, in this
case `online`, triggers event defs. """
with patch_sender() as fake_client:
trigger_scheduled_events()
assert fake_client.as_device.called == True
@pytest.mark.usefixtures('first_event_evaluation_datetime_min')
def test_event_not_triggered_twice(self, fakedevice, fakeproduct):
""" Make sure that a device field trigger only fires once """
with patch_sender() as fake_client:
trigger_scheduled_events()
assert fake_client.as_device.call_count == 1
trigger_scheduled_events()
assert fake_client.as_device.call_count == 1
@pytest.mark.usefixtures("set_event_def")
class TestPeriodicTriggerDay1:
event_def = {
"enabled": True,
"ref": "dayonly",
"condition": "day==1",
"actions": {
"alarm": False
},
"scheduled": True,
}
@pytest.mark.usefixtures('first_event_evaluation_datetime_min')
def test_redis_day_functionality(self, fakedevice, fakeproduct):
""" Need to confirm exactly what this should do."""
with patch_sender() as fake_client:
with freeze_time("2019-12-30T23:30:00Z"):
trigger_scheduled_events()
# day == 0. So this should be false.
assert fake_client.as_device.called == False
# day == 1. So this should be True
with patch_sender() as fake_client:
with freeze_time("2019-12-31T00:30:00Z"):
trigger_scheduled_events()
assert fake_client.as_device.called == True
with patch_sender() as fake_client:
with freeze_time("2019-12-31T01:30:00Z"):
trigger_scheduled_events()
# Last time the condition was evaluated it was true so don't fire again
assert fake_client.as_device.called == False
@pytest.mark.usefixtures("fakedevice")
class TestRemoveOldTimeSeriesData():
def test_removes_old(self, fakedevice):
with freeze_time("2015-01-01T00:00:00"):
TimeSeriesDataFactory(ts=datetime.utcnow())
before = TimeSeriesData.objects.count()
remove_old_periodic_data()
after = TimeSeriesData.objects.count()
assert after == before - 1
def test_leaves_new(self, fakedevice):
TimeSeriesDataFactory(ts=datetime.utcnow())
before = TimeSeriesData.objects.count()
remove_old_periodic_data()
after = TimeSeriesData.objects.count()
assert after == before
@pytest.mark.usefixtures("fake_device_update_status")
class TestUpdateTask():
def test_update_is_called(self):
with patch('zconnect.tasks.apply_update_strategy') as patched:
with patch_sender():
trigger_update_strategy()
assert patched.called
| [
"boulton@zoetrope.io"
] | boulton@zoetrope.io |
4d6cd1027ba3345911bdaf11bec3f493ce34647d | ebaa487f4fde26c83ce35e304cd2d7c33a76e2a8 | /csv_to_db.py | f0253c6c432c2b417b6ecd829fefd71c67856cbd | [] | no_license | vlampietti/finderscope-evantey | 40f82e4c0b4958f6b6e3952cbe1d8bfb7a8dce98 | 3125e3bb50bb2c15e658c82a4f22484fb97802a9 | refs/heads/master | 2021-07-06T05:06:22.597818 | 2017-07-19T19:25:04 | 2017-07-19T19:25:04 | 105,554,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,003 | py | import os
from config import basedir
from finderscope import app
from finderscope.models import *
import pandas as pd
def load_files():
# load test data as single course
course = Course(title="a_sample_course")
db.session.add(course)
db.session.commit()
id_course = course.id
# load person_course
chunks = pd.read_csv('data/fake_di_data_person_course.csv', chunksize=10000)
for chunk in chunks:
for index, row in chunk.iterrows():
u = User(user_id=row.user_id,
username=row.username,
ndays_act=row.ndays_act,
nevents=row.nevents,
nplay_video=row.nplay_video,
nchapters=row.nchapters,
nprogcheck=row.nprogcheck,
nproblem_check=row.nproblem_check,
nshow_answer=row.nshow_answer,
nvideo=row.nvideo,
nvideos_unique_viewed=row.nvideos_unique_viewed,
nvideos_total_watched=row.nvideos_total_watched,
sum_dt=row.sum_dt,
course_id=id_course)
db.session.add(u)
db.session.commit()
# load person_course_day
chunks = pd.read_csv('data/fake_di_data_person_course_day.csv', chunksize=10000)
for chunk in chunks:
for index, row in chunk.iterrows():
ud = UserDay(
username = row.username,
date = row.date,
nevents = row.nevents,
nvideo = row.nvideo,
nvideos_watched_sec = row.nvideos_watched_sec,
nproblems_answered = row.nproblems_answered,
nproblems_attempted = row.nproblems_attempted,
sum_dt = row.sum_dt)
db.session.add(ud)
db.session.commit()
if __name__ == '__main__':
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'app.db')
db.create_all()
load_files()
| [
"evantey14@gmail.com"
] | evantey14@gmail.com |
c5153f275342c54b4f05c24d5b6f29c3ee31f56e | b2a3f09c0cc32b8bda9234b5f3b1074e79e8fc7a | /pySDC/implementations/transfer_classes/TransferMesh_NoCoarse.py | e606661a27c09325b28f69cfb6430971250cf41d | [
"BSD-2-Clause"
] | permissive | gdmcbain/pySDC | 5f3d683b5980ebda65449bf8652497568e955cb6 | 493b73b76c32562dbdd98c80a7935943c0fa8e11 | refs/heads/master | 2023-04-18T00:11:45.500910 | 2021-05-03T15:05:01 | 2021-05-03T15:05:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,774 | py |
import scipy.sparse as sp
from pySDC.core.Errors import TransferError
from pySDC.core.SpaceTransfer import space_transfer
from pySDC.implementations.datatype_classes.mesh import mesh, imex_mesh
class mesh_to_mesh(space_transfer):
"""
Custon base_transfer class, implements Transfer.py
This implementation can restrict and prolong between nd meshes with dirichlet-0 or periodic boundaries
via matrix-vector products
Attributes:
Rspace: spatial restriction matrix, dim. Nf x Nc
Pspace: spatial prolongation matrix, dim. Nc x Nf
"""
def __init__(self, fine_prob, coarse_prob, params):
"""
Initialization routine
Args:
fine_prob: fine problem
coarse_prob: coarse problem
params: parameters for the transfer operators
"""
# invoke super initialization
super(mesh_to_mesh, self).__init__(fine_prob, coarse_prob, params)
def restrict(self, F):
"""
Restriction implementation
Args:
F: the fine level data (easier to access than via the fine attribute)
"""
if isinstance(F, mesh):
G = mesh(F)
elif isinstance(F, imex_mesh):
G = imex_mesh(F)
else:
raise TransferError('Unknown data type, got %s' % type(F))
return G
def prolong(self, G):
"""
Prolongation implementation
Args:
G: the coarse level data (easier to access than via the coarse attribute)
"""
if isinstance(G, mesh):
F = mesh(G)
elif isinstance(G, imex_mesh):
F = imex_mesh(G)
else:
raise TransferError('Unknown data type, got %s' % type(G))
return F
| [
"r.speck@fz-juelich.de"
] | r.speck@fz-juelich.de |
8726aad8b9ee42024c92a015ec47d895be38a8af | a728fe7ee13800053dfa229e9fbc4770072de3cd | /polling_stations/apps/data_collection/management/commands/import_leeds.py | 8a268d1a49f89f95d6637253547e4015f0da8556 | [] | no_license | amelvin/UK-Polling-Stations | 03cc27c5d6d191458b7c159650179b2e01568464 | a5ce2cf57f732720af9d0e18b66635879c127f7b | refs/heads/master | 2021-01-12T01:15:46.442982 | 2016-12-21T11:22:53 | 2016-12-21T11:22:53 | 78,362,111 | 1 | 0 | null | 2017-01-08T18:26:53 | 2017-01-08T18:26:53 | null | UTF-8 | Python | false | false | 2,122 | py | from data_collection.management.commands import BaseMorphApiImporter
class Command(BaseMorphApiImporter):
srid = 4326
districts_srid = 4326
council_id = 'E08000035'
elections = []
scraper_name = 'wdiv-scrapers/DC-PollingStations-Leeds'
geom_type = 'geojson'
split_districts = set()
def pre_import(self):
self.find_split_districts()
def find_split_districts(self):
'Identify districts mapped to more than one polling station.'
stations = self.get_stations()
for station1 in stations:
for station2 in stations:
if station1['POLLING_DI'] == station2['POLLING_DI'] and\
station1['OBJECTID'] != station2['OBJECTID']:
self.split_districts.add(station1['POLLING_DI'])
def district_record_to_dict(self, record):
poly = self.extract_geometry(record, self.geom_type, self.get_srid('districts'))
return {
'internal_council_id': record['POLLING_DI'],
'name': '%s - %s' % (record['WARD'], record['POLLING_DI']),
'area': poly
}
def station_record_to_dict(self, record):
# Handle split districts
if record['POLLING_DI'] in self.split_districts:
return None
location = self.extract_geometry(record, self.geom_type, self.get_srid('stations'))
internal_ids = record['POLLING_DI'].split("-")
if len(internal_ids) == 1:
return {
'internal_council_id': record['POLLING_DI'],
'postcode': '',
'address': record['POLLING_ST'],
'location': location,
'polling_district_id': record['POLLING_DI']
}
else:
stations = []
for id in internal_ids:
stations.append({
'internal_council_id': id,
'postcode': '',
'address': record['POLLING_ST'],
'location': location,
'polling_district_id': id
})
return stations
| [
"hishivshah@gmail.com"
] | hishivshah@gmail.com |
e07ba57b62f3de2f75247e36a9b41e606b23c21d | b6a37b551243c74c68d01205e87af5a89adc0ca9 | /model/train.py | 2e27b70c570b5c3faf2af301b04aa62c85a538ac | [
"MIT"
] | permissive | yhy-2000/Socialformer | 77a438c95b2405cf3b17023364ec46f0cb10af90 | 09d3be336cf78346d31a2ba6dd1460db18d076de | refs/heads/main | 2023-08-29T03:02:19.033728 | 2021-10-22T11:49:31 | 2021-10-22T11:49:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,531 | py | import time
import argparse
import pickle
import random
import numpy as np
import torch
import logging
import torch.nn.utils as utils
import torch.nn.functional as F
from torch.utils.data import DataLoader, TensorDataset
from model import BertForSearch
from transformers import AdamW, get_linear_schedule_with_warmup, BertTokenizer, BertModel
# from Trec_Metrics import Metrics
# from pair_dataset import PairDataset
from point_dataset_all import PointDataset
from list_dataset_all import ListDataset
from tqdm import tqdm
import os
from evaluate import evaluator, evaluator_trec
parser = argparse.ArgumentParser()
parser.add_argument("--is_training",action="store_true")
parser.add_argument("--per_gpu_batch_size",
default=25,
type=int,
help="The batch size.")
parser.add_argument("--per_gpu_test_batch_size",
default=64,
type=int,
help="The batch size.")
parser.add_argument("--learning_rate",
default=1e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--task",
default="msmarco-doc",
type=str,
help="Task")
parser.add_argument("--epochs",
default=2,
type=int,
help="Total number of training epochs to perform.")
parser.add_argument("--max_docs",
default=8,
type=int,
help="Max number of documents per query.")
parser.add_argument("--max_groups",
default=16,
type=int,
help="Max number of subgraphs.")
parser.add_argument("--max_psglen",
default=128,
type=int,
help="Max number of passage length.")
parser.add_argument("--sub_graph",
default='sub_graph1',
type=str,
help="graph partition method.")
parser.add_argument("--aggregator",
default='max',
type=str,
help="pooling method.")
parser.add_argument("--save_path",
default="./model/",
type=str,
help="The path to save model.")
parser.add_argument("--msmarco_score_file_path",
type=str,
help="The path to save score file.")
parser.add_argument("--log_path",
default="./log/",
type=str,
help="The path to save log.")
parser.add_argument("--train_file",
type=str)
parser.add_argument("--dev_file",
type=str)
parser.add_argument("--dev_id_file",
type=str)
parser.add_argument("--bert_model",
type=str)
parser.add_argument("--dataset_script_dir",
type=str,
help="-.")
parser.add_argument("--dataset_cache_dir",
type=str,
help="-.")
parser.add_argument("--msmarco_dev_qrel_path",
type=str,
help="The path of relevance file.")
args = parser.parse_args()
args.batch_size = args.per_gpu_batch_size * torch.cuda.device_count()
args.test_batch_size = args.per_gpu_test_batch_size * torch.cuda.device_count()
result_path = "./output/" + args.task + "/"
# args.save_path += BertForSearch.__name__ + "." + args.task
score_file_prefix = result_path + BertForSearch.__name__ + "." + args.task
# args.log_path += BertForSearch.__name__ + "." + args.task + ".log"
# args.msmarco_score_file_path = score_file_prefix + "." + args.msmarco_score_file_path
logger = open(args.log_path, "a")
device = torch.device("cuda:0")
print(args)
logger.write("\n")
logger.write(str(args)+'\n')
train_dir = args.train_file
fns = [os.path.join(train_dir, fn) for fn in os.listdir(train_dir)]
train_data = fns
dev_data = args.dev_file
tokenizer = BertTokenizer.from_pretrained(args.bert_model)
def set_seed(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# some cudnn methods can be random even after fixing the seed
# unless you tell it to be deterministic
torch.backends.cudnn.deterministic = True
def train_model():
bert_model = BertModel.from_pretrained(args.bert_model)
model = BertForSearch(bert_model, args.max_docs, args.max_groups + 16, args.max_psglen+5)
model = model.to(device)
model = torch.nn.DataParallel(model)
fit(model, train_data, dev_data)
def train_step(model, train_data):
with torch.no_grad():
for key in train_data.keys():
train_data[key] = train_data[key].to(device)
loss = model.forward(train_data, is_training=True, pooling=args.aggregator)
return loss
def fit(model, X_train, X_test):
train_dataset = ListDataset(X_train, args.sub_graph, args.max_groups, args.max_psglen, tokenizer, args.dataset_script_dir, args.dataset_cache_dir)
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=8)
optimizer = AdamW(model.parameters(), lr=args.learning_rate)
t_total = int(len(train_dataset) * args.epochs // args.batch_size)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0 * int(t_total), num_training_steps=t_total)
one_epoch_step = len(train_dataset) // args.batch_size
best_result = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
for epoch in range(args.epochs):
print("\nEpoch ", epoch + 1, "/", args.epochs)
logger.write("Epoch " + str(epoch + 1) + "/" + str(args.epochs) + "\n")
avg_loss = 0
model.train()
epoch_iterator = tqdm(train_dataloader)
for i, training_data in enumerate(epoch_iterator):
loss = train_step(model, training_data)
loss = loss.mean()
loss.backward()
utils.clip_grad_norm_(model.parameters(), 5.0)
optimizer.step()
scheduler.step()
model.zero_grad()
for param_group in optimizer.param_groups:
args.learning_rate = param_group['lr']
epoch_iterator.set_postfix(lr=args.learning_rate, loss=loss.item())
if i == 1:
print("start training")
if i % 100 == 0:
print(i, loss.item())
if i > 0 and i == (one_epoch_step//5) == 0: # test the model per 20% steps
best_result = evaluate(model, X_test, best_result)
model.train()
avg_loss += loss.item()
cnt = len(train_dataset) // args.batch_size + 1
tqdm.write("Average loss:{:.6f} ".format(avg_loss / cnt))
#best_result = evaluate(model, X_test, X_test_new, best_result)
logger.close()
def evaluate(model, X_test, best_result, is_test=False):
y_pred = predict(model, X_test)
# print(y_pred)
qid_pid_list = []
with open(args.dev_id_file, 'r') as dif:
for line in dif:
qid, docid = line.strip().split()
qid_pid_list.append([qid, docid])
# print(len(y_pred))
# print(len(qid_pid_list))
fw = open(args.msmarco_score_file_path, 'w')
for i, (qd, y_pred) in enumerate(zip(qid_pid_list, y_pred)):
qid, pid = qd
fw.write(qid + "\t" + pid + "\t" + str(y_pred) + "\n")
fw.close()
if args.task == "msmarco":
myevaluator = evaluator(args.msmarco_dev_qrel_path, args.msmarco_score_file_path)
elif args.task == "trecdl":
myevaluator = evaluator_trec(args.msmarco_dev_qrel_path, args.msmarco_score_file_path)
result = myevaluator.evaluate()
if not is_test:
if args.task == "msmarco" and result[-2] > best_result[-2]:
best_result = result
print("[best result]", result)
_mrr100, _mrr10, _ndcg100, _ndcg20, _ndcg10, _map20, _p20 = result
tqdm.write(f"[best result][msmarco][{args.id}] mrr@100:{_mrr100}, mrr@10:{_mrr10}, ndcg@100:{_ndcg100}, ndcg@20:{_ndcg20}, ndcg@10:{_ndcg10}, map@20:{_map20}, p@20:{_p20}")
logger.write(f"[best result][msmarco][{args.id}] mrr@100:{_mrr100}, mrr@10:{_mrr10}, ndcg@100:{_ndcg100}, ndcg@20:{_ndcg20}, ndcg@10:{_ndcg10}, map@20:{_map20}, p@20:{_p20}\n")
logger.flush()
model_to_save = model.module if hasattr(model, 'module') else model
torch.save(model_to_save.state_dict(), args.save_path)
if args.task == "msmarco" and result[-2] <= best_result[-2]:
print("[normal result]", result)
_mrr100, _mrr10, _ndcg100, _ndcg20, _ndcg10, _map20, _p20 = result
logger.write(f"[normal result][msmarco][{args.id}] mrr@100:{_mrr100}, mrr@10:{_mrr10}, ndcg@100:{_ndcg100}, ndcg@20:{_ndcg20}, ndcg@10:{_ndcg10}, map@20:{_map20}, p@20:{_p20}\n")
logger.flush()
if is_test and args.task == "msmarco":
_mrr100, _mrr10, _ndcg100, _ndcg20, _ndcg10, _map20, _p20 = result
tqdm.write(f"[{args.id}] mrr@100:{_mrr100}, mrr@10:{_mrr10}, ndcg@100:{_ndcg100}, ndcg@20:{_ndcg20}, ndcg@10:{_ndcg10}, map@20:{_map20}, p@20:{_p20}")
if is_test and args.task == "trecdl":
_ndcg100, _ndcg10, _ndcg20, _p20 = result
logger.write(f"[normal result][trecdl][{args.id}] _ndcg100:{_ndcg100}, _ndcg10:{_ndcg10}, _ndcg20:{_ndcg20}, _p20:{_p20}\n")
logger.flush()
return best_result
def predict(model, X_test):
model.eval()
test_loss = []
test_dataset = PointDataset(X_test, args.sub_graph, args.max_groups, args.max_psglen, tokenizer, args.dataset_script_dir, args.dataset_cache_dir)
test_dataloader = DataLoader(test_dataset, batch_size=args.test_batch_size, shuffle=False, num_workers=8)
y_pred = []
# y_label = []
with torch.no_grad():
epoch_iterator = tqdm(test_dataloader, leave=False)
for i, test_data in enumerate(epoch_iterator):
with torch.no_grad():
for key in test_data.keys():
test_data[key] = test_data[key].to(device)
y_pred_test = model.forward(test_data, is_training=False, pooling=args.aggregator) # bs
y_pred.append(y_pred_test.data.cpu().numpy().reshape(-1))
# y_tmp_label = test_data["labels"].data.cpu().numpy().reshape(-1)
# y_label.append(y_tmp_label)
y_pred = np.concatenate(y_pred, axis=0).tolist()
# y_label = np.concatenate(y_label, axis=0).tolist()
return y_pred
def test_model():
bert_model = BertModel.from_pretrained(args.bert_model)
model = BertForSearch(bert_model, args.max_docs, args.max_groups + 16, args.max_psglen+5)
#model = BertForSearch(bert_model, 2 * int(args.max_doc_len/args.window_size), args.window_size+5)
# model.bert_model.resize_token_embeddings(model.bert_model.config.vocab_size + additional_tokens)
model_state_dict = torch.load(args.save_path)
model.load_state_dict({k.replace('module.', ''):v for k, v in model_state_dict.items()})
model = model.to(device)
model = torch.nn.DataParallel(model)
evaluate(model, dev_data, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0], True)
if __name__ == '__main__':
set_seed()
if args.is_training:
train_model()
else:
test_model()
| [
"noreply@github.com"
] | yhy-2000.noreply@github.com |
cb9224a4e42308249f9ab62d7934b7b6831f949b | 5666fde1b7d6a54bacd134fddf5b788e130e538c | /freqTable.py | dcd4331ea9f69cc44e97c940511842462db70830 | [] | no_license | 2CinnamonToast17/untitled1 | dae281ee51bbc94f2fd85cc68277b3f21bcd22a0 | e95dc113ba80a27f72ee553c10ccd7f04abd308a | refs/heads/master | 2021-08-11T13:03:15.805890 | 2017-11-13T18:47:44 | 2017-11-13T18:47:44 | 110,587,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | from collections import Counter
def create_classes(numbers, n):
low = min(numbers)
high = max(numbers)
# Width of each class
width = (high - low)/n
classes = []
a = low
b = low + width
classes = []
while a < (high-width):
classes.append((a, b))
a = b
b = a + width
# The last class may be of a size that is less than width
classes.append((a, high+1))
return classes
def frequency_table(numbers):
table = Counter(numbers)
numbers_freq = table.most_common()
numbers_freq.sort()
print('Number\tFrequency')
for number in numbers_freq:
print('{0}\t{1}'.format(number[0], number[1]))
if __name__ == '__main__':
scores = [7, 8, 9, 2, 10, 9, 9, 9, 9, 4, 5, 6, 1, 5, 6, 7, 8, 6, 1, 10]
frequency_table(create_classes(scores, 2))
| [
"ajbonino217@gmail.com"
] | ajbonino217@gmail.com |
5c5d95676a4b1f9f05f4dd859dd0123d6394a5b4 | 96d22c2c40f5f0336a8a63c0f3a83f41532d975b | /season1/l014_function.py | 6905a2be1ebc5de39d6833221e9437dc235d1370 | [] | no_license | haluto/python | 98fd83707867b56846aa40ea5e492789f0a3e8bf | 52e72e9db041c0c95b3a35e7f39907248aa3e06b | refs/heads/master | 2023-03-28T04:33:47.062479 | 2021-04-01T03:02:41 | 2021-04-01T03:02:41 | 115,498,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,510 | py | #!/usr/bin/python
#coding=utf-8
########
##
########
def ChangeInt(a):
a = 10
print "in ChangeInt function: ", a
def changeme(mylist):
mylist.append([1,2,3,4])
print "in changeme function: ", mylist
return
def printinfo(name, age):
print "Name: ", name
print "Age: ", age
return
def printinfo2(name, age = 35):
print "Name: ", name
print "Age: ", age
return
def printinfo3(arg1, *vartuple):
print arg1
for var in vartuple:
print var
return
def main():
########
##参数传递
########
#在 python 中,类型属于对象,变量是没有类型的:
a=[1,2,3]
b="Runoob"
#以上代码中,[1,2,3] 是 List 类型,"Runoob" 是 String 类型,
#而变量 a 是没有类型,她仅仅是一个对象的引用(一个指针),可以是 List 类型对象,也可以指向 String 类型对象。
####
#可更改(mutable)与不可更改(immutable)对象
####
'''
在 python 中,strings, tuples, 和 numbers 是不可更改的对象,而 list,dict 等则是可以修改的对象。
不可变类型:变量赋值 a=5 后再赋值 a=10,这里实际是新生成一个 int 值对象 10,再让 a 指向它,而 5 被丢弃,不是改变a的值,相当于新生成了a。
可变类型:变量赋值 la=[1,2,3,4] 后再赋值 la[2]=5 则是将 list la 的第三个元素值更改,本身la没有动,只是其内部的一部分值被修改了。
python 函数的参数传递:
不可变类型:类似 c++ 的值传递,如 整数、字符串、元组。如fun(a),传递的只是a的值,没有影响a对象本身。比如在 fun(a)内部修改 a 的值,只是修改另一个复制的对象,不会影响 a 本身。
可变类型:类似 c++ 的引用传递,如 列表,字典。如 fun(la),则是将 la 真正的传过去,修改后fun外部的la也会受影响
python 中一切都是对象,严格意义我们不能说值传递还是引用传递,我们应该说传不可变对象和传可变对象。
'''
#实例 传不可变对象
b = 2
ChangeInt(b)
print "out ChangeInt function: ", b #结果是2
#实例 传可变对象
mylist = [10,20,30]
changeme(mylist)
print "out changeme function: ", mylist
########
##参数
########
#调用函数时可使用的正式参数类型:
'''
必备参数
关键字参数
默认参数
不定长参数
'''
#必备参数
#必备参数须以正确的顺序传入函数。调用时的数量必须和声明时的一样。
#changeme() #会报错:TypeError: changeme() takes exactly 1 argument (0 given)
#关键字参数
#关键字参数和函数调用关系紧密,函数调用使用关键字参数来确定传入的参数值。
#使用关键字参数允许函数调用时参数的顺序与声明时不一致,因为 Python 解释器能够用参数名匹配参数值。
printinfo(age=50, name='miki')
#缺省参数
#调用函数时,缺省参数的值如果没有传入,则被认为是默认值。
printinfo2(age=50, name='miki')
printinfo2(name='wiki')
#不定长参数
#def functionname([formal_args,] *var_args_tuple ):
printinfo3(10)
printinfo3(70,60,50)
########
##匿名函数
########
'''
python 使用 lambda 来创建匿名函数。
lambda只是一个表达式,函数体比def简单很多。
lambda的主体是一个表达式,而不是一个代码块。仅仅能在lambda表达式中封装有限的逻辑进去。
lambda函数拥有自己的命名空间,且不能访问自有参数列表之外或全局命名空间里的参数。
虽然lambda函数看起来只能写一行,却不等同于C或C++的内联函数,后者的目的是调用小函数时不占用栈内存从而增加运行效率。
'''
#lambda [arg1 [,arg2,.....argn]]:expression
#实例
sum = lambda arg1, arg2: arg1+arg2
print sum(10, 20)
print sum(20, 20)
########
##return语句
########
#return语句[表达式]退出函数,选择性地向调用方返回一个表达式。不带参数值的return语句返回None。
########
##变量作用域
########
#全局变量和局部变量
#global -- 将变量定义为全局变量。
if __name__ == "__main__":
main()
| [
"jun.yin@tinno.com"
] | jun.yin@tinno.com |
af17030af8cd9d2852ba66eb17d6deacf28b5f24 | ad4d927b05d3004cc5f835c84807a272ecff439f | /src/olaf/build/rosserial/rosserial_arduino/catkin_generated/rosserial_arduino-extras.cmake.develspace.context.cmake.py | a1ba22978d6991df0e04c88abda9b4707da66098 | [] | no_license | kookmin-sw/capstone-2020-11 | 73954f8a692d3240a22ca9a81c9bede8538fabbf | 081733fb0470d83930433a61aabf9708275d64dd | refs/heads/master | 2023-03-06T23:02:14.869404 | 2022-11-09T01:44:27 | 2022-11-09T01:44:27 | 246,285,681 | 5 | 4 | null | 2023-03-04T13:53:47 | 2020-03-10T11:42:27 | C++ | UTF-8 | Python | false | false | 1,447 | py | # generated from catkin/cmake/template/cfg-extras.context.py.in
DEVELSPACE = 'TRUE' == 'TRUE'
INSTALLSPACE = 'FALSE' == 'TRUE'
CATKIN_DEVEL_PREFIX = '/home/nvidia/olaf/devel'
CATKIN_GLOBAL_BIN_DESTINATION = 'bin'
CATKIN_GLOBAL_ETC_DESTINATION = 'etc'
CATKIN_GLOBAL_INCLUDE_DESTINATION = 'include'
CATKIN_GLOBAL_LIB_DESTINATION = 'lib'
CATKIN_GLOBAL_LIBEXEC_DESTINATION = 'lib'
CATKIN_GLOBAL_PYTHON_DESTINATION = 'lib/python2.7/dist-packages'
CATKIN_GLOBAL_SHARE_DESTINATION = 'share'
CATKIN_PACKAGE_BIN_DESTINATION = 'lib/rosserial_arduino'
CATKIN_PACKAGE_ETC_DESTINATION = 'etc/rosserial_arduino'
CATKIN_PACKAGE_INCLUDE_DESTINATION = 'include/rosserial_arduino'
CATKIN_PACKAGE_LIB_DESTINATION = 'lib'
CATKIN_PACKAGE_LIBEXEC_DESTINATION = ''
CATKIN_PACKAGE_PYTHON_DESTINATION = 'lib/python2.7/dist-packages/rosserial_arduino'
CATKIN_PACKAGE_SHARE_DESTINATION = 'share/rosserial_arduino'
CMAKE_BINARY_DIR = '/home/nvidia/olaf/build'
CMAKE_CURRENT_BINARY_DIR = '/home/nvidia/olaf/build/rosserial/rosserial_arduino'
CMAKE_CURRENT_SOURCE_DIR = '/home/nvidia/olaf/src/rosserial/rosserial_arduino'
CMAKE_INSTALL_PREFIX = '/home/nvidia/olaf/install'
CMAKE_SOURCE_DIR = '/home/nvidia/olaf/src'
PKG_CMAKE_DIR = '/home/nvidia/olaf/devel/share/rosserial_arduino/cmake'
PROJECT_NAME = 'rosserial_arduino'
PROJECT_BINARY_DIR = '/home/nvidia/olaf/build/rosserial/rosserial_arduino'
PROJECT_SOURCE_DIR = '/home/nvidia/olaf/src/rosserial/rosserial_arduino'
| [
"ksp2246@naver.com"
] | ksp2246@naver.com |
66a39e8da4051e3be3eda89e19b36ed5bf8ddf44 | 6f94db52103adeee9727d795ca1ba0b9d98f096f | /geodjango/api/migrations/0006_auto_20180731_2052.py | c085ed52aca72c086d7ebf39b07aa32d5fb85eb0 | [] | no_license | dannybombastic/vozplus | a15fa5a13f39b749479bdc39ad87062b50f775a7 | b338809797f874388bbff270dea1c342e9d0fa56 | refs/heads/master | 2022-12-10T12:53:27.441115 | 2018-08-01T08:52:39 | 2018-08-01T08:52:39 | 143,129,291 | 0 | 0 | null | 2022-12-08T02:25:12 | 2018-08-01T08:48:10 | Python | UTF-8 | Python | false | false | 591 | py | # Generated by Django 2.0.7 on 2018-07-31 20:52
import django.contrib.gis.db.models.fields
import django.contrib.gis.geos.point
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0005_auto_20180731_2042'),
]
operations = [
migrations.AlterField(
model_name='menberspoint',
name='point',
field=django.contrib.gis.db.models.fields.PointField(default=django.contrib.gis.geos.point.Point(-4.877516799999999, 36.51584, srid=4326), srid=4326),
),
]
| [
"root@tough-cough.com"
] | root@tough-cough.com |
297121678430c46b62630c0bcc79578fe7d3bb1f | a98ea0571455b88e48af90e731b94d595ecfa4f7 | /LinkedList/09_linkedlist.py | 92c388ca665b5d32bea14f384150ce9274bf9e56 | [] | no_license | KishoreKicha14/DSandALGO | f0ccbe206259e59d60f22fd2f7654db8c102f4bc | 837adfaea1f94f91b3c157caf1734312d9ecc1a2 | refs/heads/master | 2020-06-18T08:01:29.786775 | 2019-07-10T16:49:17 | 2019-07-10T16:49:17 | 196,224,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,471 | py | class Node(object):
def __init__(self,data):
self.data=data
self.link=None
class LinkedList(object):
def __init__(self):
self.head=None
self.size=0
def insertend(self,data):
if not self.head:
self.head=data
else:
actucal=self.head
while actucal.link!=None:
actucal=actucal.link
actucal.link=data
self.size+=1
def transveral(self):
actucal=self.head
r=[]
while actucal!=None:
r.append(actucal.data)
actucal=actucal.link
print("->".join(r))
def swap(self):
actucal=self.head
if(self.size%2==0):
while(actucal!=None):
temp=actucal.data
actucal.data=actucal.link.data
actucal.link.data=temp
actucal=actucal.link.link
else:
while(actucal.link!=None):
temp=actucal.data
actucal.data=actucal.link.data
actucal.link.data=temp
actucal=actucal.link.link
node1=Node("A")
node2=Node("B")
node3=Node("C")
node4=Node("D")
node5=Node("E")
node6=Node("F")
ll1=LinkedList()
ll1.insertend(node1)
ll1.insertend(node2)
ll1.insertend(node3)
ll1.insertend(node4)
ll1.insertend(node5)
#ll1.insertend(node6)
ll1.transveral()
ll1.swap()
ll1.transveral()
| [
"noreply@github.com"
] | KishoreKicha14.noreply@github.com |
6538063f4beba7f3d0a8ad1bc05baee8cbbf0ddf | 23b686feb2d0ab9082a7ce622fc055946ed99c55 | /.history/atkd/forms_20190411112355.py | ce642266f9f3f7e83b028ecf825e5faa01be9731 | [] | no_license | jasvr/atkd | a18b9840bf9948a7560684cd5eb0d5e22f6c52c7 | daf61f7aa11cfc812171298894b1d0019641c4bd | refs/heads/master | 2020-05-07T09:35:56.343837 | 2019-04-12T16:17:09 | 2019-04-12T16:17:09 | 180,383,260 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | from django import forms
from .models import Parent, Student
class ParentForm(forms.ModelForm):
class Meta:
model = Parent
fields = ('first_name','last_name')
class StudentForm(forms.ModelForm):
class Meta:
model = Song
fields = ('title', 'album', 'preview_url', 'artist',) | [
"jas.vrgs@gmail.com"
] | jas.vrgs@gmail.com |
5e8bddb5ee16dfb2c418e0ce4eba2ee031a27d5d | cbf9dc37d7082f609a3d4aed505be32cc3e95cb0 | /server.py | 7d0e6e1bd13fce3f99592b75c14a5b0384b17de1 | [] | no_license | ciprian123/securitatea_informatiei_tema1 | 38e1cf9f5d7c88c291ce92d134cd2f0dee3bed6a | 2913790710642c7ab221b50830ae6a30a8d71f58 | refs/heads/main | 2023-01-06T15:08:26.421300 | 2020-11-01T13:21:32 | 2020-11-01T13:21:32 | 308,021,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,595 | py | import sys
import socket
import pickle
from random import randint
from threading import Thread
from encryptor import Encryptor
from decrypter import Decrypter
def get_random_string(n):
digits = "0123456789"
string = ""
for i in range(n):
string += (digits[randint(0, 9)])
return bytes(string.encode('utf8'))
secret_key3 = '1111222233334444'
iv_k3 = b'1002492919392444'
secret_key2 = '1122334455667788'
iv_k2 = get_random_string(16)
secret_key1 = '1234567890123456'
iv_k1 = get_random_string(16)
client_a_enc_mode = ''
encrypted_message_pickle = ''
def client_a_thread(conn, ip, port):
global client_a_enc_mode, encrypted_message_pickle
encryption_mode = conn.recv(128).decode('utf8')
if encryption_mode.lower() == 'cbc':
client_a_enc_mode = 'cbc'
encrypted_key = Encryptor.simulate_aes_cbc_encryption(secret_key1, secret_key3, iv_k3)[0]
encrypted_iv_k1 = Encryptor.simulate_aes_cbc_encryption(iv_k1, secret_key3, iv_k3)[0]
else:
client_a_enc_mode = 'cfb'
encrypted_key = Encryptor.simulate_aes_cfb_encryption(secret_key1, secret_key3, iv_k3)[0]
encrypted_iv_k1 = Encryptor.simulate_aes_cfb_encryption(iv_k1, secret_key3, iv_k3)[0]
response = pickle.dumps([encryption_mode, encrypted_key, encrypted_iv_k1])
conn.sendall(response)
# receive encrypted key as confirmation message
data = conn.recv(128)
encrypted_key_to_confirm = pickle.loads(data)
if encryption_mode == 'cbc':
key_to_confirm = Decrypter.simulate_aes_cbc_decryption([encrypted_key_to_confirm], secret_key3, iv_k3)
else:
key_to_confirm = Decrypter.simulate_aes_cfb_decryption([encrypted_key_to_confirm], secret_key3, iv_k3)
print('[Client A] send confirmation key: ', key_to_confirm)
if key_to_confirm == secret_key1:
print('Key is valid!')
# send confirmation for secure communication
conn.sendall('[SERVER] Secure connection established!'.encode('utf'))
else:
print('Key is invalid!')
conn.sendall('[SERVER] Secure connection cannot be established!'.encode('utf'))
response = conn.recv(20480000)
decoded_response = pickle.loads(response)
if encryption_mode == 'cbc':
decrypted_counter = Decrypter.simulate_aes_cbc_decryption(decoded_response[0], secret_key1, iv_k1)
decrypted_file_content = Decrypter.simulate_aes_cbc_decryption(decoded_response[1], secret_key1, iv_k1)
else:
decrypted_counter = Decrypter.simulate_aes_cfb_decryption(decoded_response[0], secret_key1, iv_k1)
decrypted_file_content = Decrypter.simulate_aes_cfb_decryption(decoded_response[1], secret_key1, iv_k1)
print('[SERVER] Decrypted file content:')
print(decrypted_file_content)
# encrypting data and sending it to client b
if encryption_mode == 'cbc':
encrypted_counter = Encryptor.simulate_aes_cfb_encryption(decrypted_counter, secret_key2, iv_k2)[0]
encrypted_content = Encryptor.simulate_aes_cfb_encryption(decrypted_file_content, secret_key2, iv_k2)
else:
encrypted_counter = Encryptor.simulate_aes_cbc_encryption(decrypted_counter, secret_key2, iv_k2)[0]
encrypted_content = Encryptor.simulate_aes_cbc_encryption(decrypted_file_content, secret_key2, iv_k2)
encrypted_message_pickle = pickle.dumps([encrypted_counter, encrypted_content])
def client_b_thread(conn, ip, port):
global client_a_enc_mode, encrypted_message_pickle
conn.sendall(bytes(client_a_enc_mode.encode('utf8')))
while client_a_enc_mode == '':
conn.sendall(bytes(client_a_enc_mode.encode('utf8')))
conn.sendall(bytes(client_a_enc_mode.encode('utf8')))
conn.sendall(bytes(client_a_enc_mode.encode('utf8')))
if client_a_enc_mode == 'cbc':
# if client a uses cbc, client b will use cfb, and vice versa
encrypted_key = Encryptor.simulate_aes_cfb_encryption(secret_key2, secret_key3, iv_k3)[0]
encrypted_iv_k2 = Encryptor.simulate_aes_cfb_encryption(iv_k2, secret_key3, iv_k3)[0]
else:
encrypted_key = Encryptor.simulate_aes_cbc_encryption(secret_key2, secret_key3, iv_k3)[0]
encrypted_iv_k2 = Encryptor.simulate_aes_cbc_encryption(iv_k2, secret_key3, iv_k3)[0]
response = pickle.dumps([client_a_enc_mode, encrypted_key, encrypted_iv_k2])
conn.sendall(response)
# receive encrypted key as confirmation message
data = conn.recv(2048)
encrypted_key_to_confirm = pickle.loads(data)
if client_a_enc_mode == 'cbc':
key_to_confirm = Decrypter.simulate_aes_cfb_decryption([encrypted_key_to_confirm], secret_key3, iv_k3)
else:
key_to_confirm = Decrypter.simulate_aes_cbc_decryption([encrypted_key_to_confirm], secret_key3, iv_k3)
print('[Client B] send confirmation key: ', key_to_confirm)
if key_to_confirm == secret_key2:
print('Key is valid!')
# send confirmation for secure communication
conn.sendall('[SERVER] Secure connection established!'.encode('utf'))
else:
print('Key is invalid!')
conn.sendall('[SERVER] Secure connection cannot be established!'.encode('utf'))
# send encrypted contend to client b
while encrypted_message_pickle == '':
pass
conn.sendall(encrypted_message_pickle)
print('[SERVER] Encrypted data send successfully!')
def start_server():
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# this is for easy starting/killing the app
soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print('Socket created')
try:
soc.bind(("127.0.0.1", 6969))
print('Socket bind complete')
except socket.error as msg:
print('Bind failed. Error : ' + str(sys.exc_info()))
sys.exit()
soc.listen(2)
print('Socket now listening...')
conn_client_a, addr_client_a = soc.accept()
conn_client_b, addr_client_b = soc.accept()
ip_client_a, port_client_a = str(addr_client_a[0]), str(addr_client_a[1])
ip_client_b, port_client_b = str(addr_client_a[0]), str(addr_client_a[1])
print('Accepting connection from: ' + ip_client_a + ':' + port_client_a)
print('Accepting connection from: ' + ip_client_b + ':' + port_client_b)
Thread(target=client_a_thread, args=(conn_client_a, ip_client_a, port_client_a)).start()
Thread(target=client_b_thread, args=(conn_client_b, ip_client_b, port_client_b)).start()
start_server()
| [
"noreply@github.com"
] | ciprian123.noreply@github.com |
762d3a50e6ea2118503945c0eebae37e5cfa5665 | 8b1f8b425091d16be780ebc848fbc03807067154 | /main.py | 948591522ccebc53ae36f991c6116d2ecf8b2bca | [] | no_license | KyotoWeb/RandomPw | 300673f17e219a87863efede4a0b800770af6181 | 498d4ab733ccd3afdade0969ffc0f90bff0d9ffe | refs/heads/main | 2023-08-27T10:38:04.047612 | 2021-10-03T11:07:21 | 2021-10-03T11:07:21 | 413,053,141 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,481 | py | from pystyle import Colorate, Colors
from pycenter import center
from os import name, system
import random
def clear():
system("cls" if name == 'nt' else "clear")
if name =='nt':
system("title RandomPw & mode 190, 40 ")
banner = """\n
██▀███ ▄▄▄ ███▄ █ ▓█████▄ ▒█████ ███▄ ▄███▓ ██▓███ █ █░
▓██ ▒ ██▒▒████▄ ██ ▀█ █ ▒██▀ ██▌▒██▒ ██▒▓██▒▀█▀ ██▒▓██░ ██▒▓█░ █ ░█░
▓██ ░▄█ ▒▒██ ▀█▄ ▓██ ▀█ ██▒░██ █▌▒██░ ██▒▓██ ▓██░▓██░ ██▓▒▒█░ █ ░█
▒██▀▀█▄ ░██▄▄▄▄██ ▓██▒ ▐▌██▒░▓█▄ ▌▒██ ██░▒██ ▒██ ▒██▄█▓▒ ▒░█░ █ ░█
░██▓ ▒██▒ ▓█ ▓██▒▒██░ ▓██░░▒████▓ ░ ████▓▒░▒██▒ ░██▒▒██▒ ░ ░░░██▒██▓
░ ▒▓ ░▒▓░ ▒▒ ▓▒█░░ ▒░ ▒ ▒ ▒▒▓ ▒ ░ ▒░▒░▒░ ░ ▒░ ░ ░▒▓▒░ ░ ░░ ▓░▒ ▒
░▒ ░ ▒░ ▒ ▒▒ ░░ ░░ ░ ▒░ ░ ▒ ▒ ░ ▒ ▒░ ░ ░ ░░▒ ░ ▒ ░ ░
░░ ░ ░ ▒ ░ ░ ░ ░ ░ ░ ░ ░ ░ ▒ ░ ░ ░░ ░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░
░
by K Y O T O
"""
print(Colorate.Vertical(Colors.blue_to_cyan, center(banner, space=60 ), stop=20))
chars = "abcdefghijklmopqrstuvwxyzABCDEFGHIJKLMONPQRSTUVWXYZ1234567890!$^(')"
while 1:
password_len = int(input((Colorate.Horizontal(Colors.purple_to_blue, "what lenght would you like your password to be : "))))
password_count = int(input((Colorate.Horizontal(Colors.purple_to_blue, "How many passwords would you like : "))))
for x in range(0,password_count):
password = ""
for x in range(0,password_len):
passsword_char = random.choice(chars)
password = password + passsword_char
print("Here is your password :", password) | [
"noreply@github.com"
] | KyotoWeb.noreply@github.com |
c4b45dd88a794716f552bc60a41f70ae15c9b8e0 | 6be8f6e0387c3d250170acad54b6d39a6f439fdb | /todo/urls.py | 3af915fe2b2362725af0ffa466a3b450883eea6e | [] | no_license | Notsamu/todoproject | 906786497caf8f06fb079771b1e7dd0f1d19cf48 | e7fd2f8313ba71d4afc457494686945afa727b6f | refs/heads/main | 2023-03-19T01:41:18.585140 | 2021-03-11T07:37:47 | 2021-03-11T07:37:47 | 346,617,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | from django.urls import path, include
from .views import TodoList, TodoDetail, TodoCreate, TodoDelete, TodoUpdate
urlpatterns = [
path('list/', TodoList.as_view(), name='list'),
path('detail/<int:pk>', TodoDetail.as_view(), name='detail'),
path('create/', TodoCreate.as_view(), name='create'),
path('delete/<int:pk>', TodoDelete.as_view(), name='delete'),
path('update/<int:pk>', TodoUpdate.as_view(), name='update')
] | [
"introduction7991@gmail.com"
] | introduction7991@gmail.com |
fb1c41b85bf8f1158c015ac18a96d7dd139dc578 | 3479c74de4c5afe01df8692affb1d1b65a987210 | /main.py | 39098719b00ac8e2f4f17e795cf92d347ccc2010 | [] | no_license | rutvikvijjali/vizdoom-ddqn | c521581665bab2e122d0b1cc92c72ede5c735e0f | 7217b9ffedce222c5abadfb42eb5b8b560b767bf | refs/heads/master | 2020-04-24T16:07:26.242869 | 2019-02-22T16:25:05 | 2019-02-22T16:25:05 | 172,095,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,885 | py | from vizdoom import *
import numpy as np
import time,random
import tensorflow as tf
from collections import deque
import skimage
from skimage import transform, color, exposure
import warnings # This ignore all the warning messages that are normally printed during the training because of skiimage
import objgraph
warnings.filterwarnings('ignore')
import os
class agent :
def __init__ (self,sess,input_dim,output_dim,batch_size,tau,buffer_size):
self.input_dim = input_dim
self.output_dim = output_dim
self.batch_size = batch_size
self.tau = tau
self.buffer = deque(maxlen=buffer_size)
self.inp_layer = tf.placeholder(tf.float32, [None, *self.input_dim])
self.sess = sess
self.outputs = self.createNetwork()
self.gamma = 0.99 #Discount factor
self.network_params = tf.trainable_variables()
"""Uncomment the lines below for using a target network"""
# self.t_outputs = self.createNetwork()
# self.t_network_params = tf.trainable_variables()[len(self.network_params):]
'''Target Network implementation, Uncomment the next 4 lines'''
# print(len(self.t_network_params))
# print(len(self.network_params))
# self.update_target_op = [self.t_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) + tf.multiply(self.t_network_params[i], 1 - self.tau)) for i in range(len(self.network_params))]
# self.t_out = tf.placeholder(tf.float32,[None,3])
self.t_out = tf.placeholder(tf.float32,[None],'Target_out')
self.action_buffer = []
self.action_vec = tf.placeholder(tf.float32,[None,3],name='Action_vector')
#Comment the line below for DDQN
self.q_vector = tf.reduce_sum(tf.multiply(self.outputs,self.action_vec))
#Uncomment the line below and comment the next one for DDQN
#self.loss = tf.reduce_mean(tf.square(self.outputs-self.t_out))
self.loss = tf.reduce_mean(tf.square(self.q_vector-self.t_out))
self.frames = []
self.optimizer = tf.train.AdamOptimizer(0.002).minimize(self.loss)
self.decay_rate = 0.95
self.stop_prob = 0.01
self.start_prob = 1.0
def train(self):
index = self.sample()
action_ = []
inp = []
tar = []
done = []
rew = []
act_vec = []
# print('1')
for i in index:
inp.append(self.buffer[i][0])
tar.append(self.buffer[i][3])
rew.append(self.buffer[i][2])
act = np.zeros(3)
act[self.buffer[i][1]] = 1
act_vec.append(act)
# action_.append(self.buffer[i][1])
done.append(self.buffer[i][4])
# target_predictions = self.sess.run(self.t_outputs, feed_dict = {self.inp_layer:tar}) #Uncomment this line for DDQN
model_predictions = self.sess.run(self.outputs, feed_dict = {self.inp_layer:tar})
target = np.zeros(np.shape(model_predictions))
tar_q = []
for i in range(self.batch_size):
if(done[i]):
target[i][np.argmax(act_vec[i])] = rew[i]
tar_q.append(rew[i])
else:
act = np.argmax(model_predictions[i])
#Uncomment the line below and comment the next one for DDQN
#target[i][np.argmax(act_vec[i])] = rew[i] + self.gamma*(target_predictions[i][act])
target[i][np.argmax(act_vec[i])] = rew[i] + self.gamma*(model_predictions[i][act])
tar_q.append(rew[i]+self.gamma*(np.max(model_predictions[i][act])))
#Uncomment the line below and comment the next one for DDQN
# _,l = self.sess.run([self.optimizer,self.loss],feed_dict={self.inp_layer:tar,self.action_vec:act_vec,self.t_out:target})
_,l = self.sess.run([self.optimizer,self.loss],feed_dict={self.inp_layer:tar,self.action_vec:act_vec,self.t_out:tar_q})
# self.sess.run(self.update_target_op) #Uncomment this line for DDQN
return l
def choose_act(self,state,decay_step,isRand,newEp):
explore_prob = self.stop_prob + (self.start_prob-self.stop_prob)*np.exp(-decay_step*self.decay_rate)
img = skimage.transform.resize(state, (84,84))
if(newEp):
self.frames = [img,img,img,img]
self.action_buffer = np.stack(self.frames, axis=2)
act = self.sess.run(self.outputs, feed_dict={self.inp_layer: self.action_buffer.reshape((1,84,84,4))})
elif(explore_prob>np.random.rand() or isRand):
act = np.random.randint(0,3)
return act,self.action_buffer.tolist()
else:
# img = np.reshape(img, (1, 84, 84, 1))
self.frames[:-1] = self.frames[1:]
self.frames[-1] = img
self.action_buffer = np.stack(self.frames,axis = 2)
act = self.sess.run(self.outputs, feed_dict={self.inp_layer: self.action_buffer.reshape((1,84,84,4))})
# print("ACT BUF SIZE: ",np.shape(self.action_buffer.tolist()))
return np.argmax(act[0]),self.action_buffer.tolist()
def add(self,experience):
self.buffer.append(experience)
return len(self.buffer)
def sample(self):
size = len(self.buffer)
index = np.random.choice(np.arange(size),size = self.batch_size,replace=False)
return index
def createNetwork(self):
cnn_layer_1 = tf.nn.elu(tf.layers.batch_normalization(self.cnn_layer(self.inp_layer,shape=[8,8,4,16],stride=4),training = True, epsilon = 1e-5))
cnn_layer_2 = tf.nn.elu(tf.layers.batch_normalization(self.cnn_layer(cnn_layer_1,shape=[4,4,16,32],stride=2),training = True, epsilon = 1e-5))
# cnn_layer_3 = tf.nn.elu(tf.layers.batch_normalization(self.cnn_layer(cnn_layer_2,shape=[4,4,64,128],stride=2),training = True, epsilon = 1e-5))
flat_layer = tf.layers.flatten(cnn_layer_2)
h1 = tf.nn.elu(self.make_layer(flat_layer,256))
output = self.make_layer(h1, self.output_dim)
return output
def make_layer(self, input_layer, out_size):
inp_size = int(input_layer.get_shape()[1])
W = self.init_wts([inp_size,out_size])
b = self.init_bias([out_size])
return tf.matmul(input_layer,W)+b
def init_bias(self,shape):
return tf.Variable(tf.constant(0.1), shape, name='BIAS')
def init_wts(self,shape):
initializer = tf.contrib.layers.xavier_initializer()
return tf.Variable(initializer(shape))
def conv2d(self,inp,ker,stride):
return tf.nn.conv2d(input=inp,filter=ker, strides=[1,stride,stride,1], padding='SAME')
def max_pool(inp):
return tf.nn.max_pool(inp, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def cnn_layer(self,inp,shape,stride):
wts = self.init_wts(shape)
bs = self.init_bias([shape[3]])
return self.conv2d(inp, wts, stride)+bs
if __name__ == "__main__":
game = DoomGame()
game.load_config('defend_the_center.cfg')
# game.load_config('basic2.cfg')
game.init()
actions = np.identity(game.get_available_buttons_size(), dtype=int).tolist()
state = game.get_state().screen_buffer
i = 0
print('Action Size : ',game.get_available_buttons_size())
episodes = 1000
state_size = [84, 84, 4]
brain = agent(None, input_dim=state_size, output_dim=3, batch_size=64, tau=0.001, buffer_size=10000)
saver = tf.train.Saver()
init = tf.global_variables_initializer()
with tf.Session() as sess:
output_size = 3
brain.sess = sess
sess.run(tf.global_variables_initializer())
decay_step = 0
buff_size = 0
act_1 = np.zeros(3)
for i in range(episodes):
# print(objgraph.show_most_common_types())
step = 0
print('Episode Number : ',i)
game.new_episode()
count = 0
total_ep_rew = 0
l = []
while not game.is_episode_finished() and step<200:
decay_step+=1
step += 1
state = game.get_state()
# print('STATE : ',state.)
img = state.screen_buffer
misc = state.game_variables
# print(img)
if (buff_size > 100):
l.append(brain.train())
a_1, s_1 = brain.choose_act(img, decay_step, False, False)
elif (count==0):
a_1, s_1 = brain.choose_act(img, decay_step, False, True)
else :
a_1, s_1 = brain.choose_act(img, decay_step, True, False)
# a_1,s_1 = brain.choose_act(img,decay_step)
act_1 = np.zeros(3)
act_1[a_1] = 1
if (count > 0):
buff_size = brain.add((s_0, a_0, r_0, s_1, game.is_episode_finished()))
r_1 = game.make_action(act_1.tolist())
total_ep_rew += r_1
s_0 = s_1
a_0 = a_1
act_0 = act_1
r_0 = r_1
count = 1
img = np.ones((84, 84), dtype = np.int)
if (buff_size > 100):
l.append(brain.train())
print('Training loss : ',sum(l)/len(l))
a_1, s_1 = brain.choose_act(img, decay_step, False, True)
else:
a_1, s_1 = brain.choose_act(img, decay_step, True, True)
if (count > 0):
buff_size = brain.add((s_0, a_0, r_0, s_1, game.is_episode_finished()))
print(len(brain.buffer))
print("Result:", game.get_total_reward())
if(i%5 == 0):
save_path = saver.save(sess, "./models_rut/brain.ckpt")
| [
"noreply@github.com"
] | rutvikvijjali.noreply@github.com |
2aa471776ae7fad67f7b996be82f08b6c4f54650 | 9960f7e058fd7c7fb684774dd654b0f3f1f4a84e | /tests/integration/text/test_empty_document.py | 83414e2411c8fc1635f8654d85c1dda4d18cea8c | [
"BSD-3-Clause"
] | permissive | prepare/pyglet | 6998fdf1622db851d497f458dfef0e5ca0424443 | 79684462d2ddbabaa0b0c8d80adb99105890a39b | refs/heads/master | 2020-09-22T01:57:33.244516 | 2016-08-31T02:38:30 | 2016-08-31T02:38:30 | 67,846,145 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,513 | py | import unittest
from pyglet import gl
from pyglet import graphics
from pyglet.text import document
from pyglet.text import layout
from pyglet import window
class TestWindow(window.Window):
def __init__(self, doctype, *args, **kwargs):
super(TestWindow, self).__init__(*args, **kwargs)
self.batch = graphics.Batch()
self.document = doctype()
self.layout = layout.IncrementalTextLayout(self.document,
self.width, self.height, batch=self.batch)
def on_draw(self):
gl.glClearColor(1, 1, 1, 1)
self.clear()
self.batch.draw()
def set_bold(self):
self.document.set_style(0, len(self.document.text), {"bold": True})
class EmptyDocumentTest(unittest.TestCase):
"""Test that an empty document doesn't break."""
def test_unformatted(self):
self.window = TestWindow(document.UnformattedDocument)
self.window.dispatch_events()
self.window.close()
def test_formatted(self):
self.window = TestWindow(document.FormattedDocument)
self.window.dispatch_events()
self.window.close()
def test_bold_unformatted(self):
self.window = TestWindow(document.UnformattedDocument)
self.window.set_bold()
self.window.dispatch_events()
self.window.close()
def test_bold_formatted(self):
self.window = TestWindow(document.FormattedDocument)
self.window.set_bold()
self.window.dispatch_events()
self.window.close()
| [
"ben@isengard"
] | ben@isengard |
81c0750b574537a9ae3abd73b8ebb78de5a7fd6c | 885c9482836df5c4cb8995ab649d36caff7a2194 | /python/__init__.py | ea3c9a99420792f858dc65105d9d0517d117da67 | [] | no_license | NELOUNI/rfnoc-spectrum_sensor | b0562de141733e7c6e04dbba523089f947459a2b | 47f6df789ebaf16920eea7704a720402e685036a | refs/heads/master | 2020-05-21T06:02:28.876834 | 2017-03-10T17:22:45 | 2017-03-10T17:22:45 | 84,583,452 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,164 | py | #
# Copyright 2008,2009 Free Software Foundation, Inc.
#
# This application is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# The presence of this file turns this directory into a Python package
'''
This is the GNU Radio SPECTRUM_SENSOR module. Place your Python package
description here (python/__init__.py).
'''
# import swig generated symbols into the spectrum_sensor namespace
try:
# this might fail if the module is python-only
from spectrum_sensor_swig import *
except ImportError:
pass
# import any pure python here
#
| [
"naceur.elouni@nist.gov"
] | naceur.elouni@nist.gov |
9fe5a578b156da01b0ae0d09bb532a70be59e6ed | fe6b204445ae65113a16e6157f8b3d0632684280 | /build/basics/catkin_generated/pkg.develspace.context.pc.py | 468906e6d59d610c652c969578655e14baab4656 | [] | no_license | SaiTeja1898/ros-basics | 3f72926ba0b4a800cf6899e8192d9c871aead7b4 | bbf720a7e8a0a36e4fd4c34a699ab35895dc1c96 | refs/heads/master | 2022-11-19T09:16:17.624121 | 2020-07-18T08:24:57 | 2020-07-18T08:24:57 | 280,619,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/teja/catkin_ws/devel/include".split(';') if "/home/teja/catkin_ws/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;actionlib_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "basics"
PROJECT_SPACE_DIR = "/home/teja/catkin_ws/devel"
PROJECT_VERSION = "0.0.0"
| [
"saiteja.p15@iiits.in"
] | saiteja.p15@iiits.in |
a5372f5c655106f8145adff399760466e3954c13 | 5c21f6425ab41ca130bb6820b57118bb1be82826 | /gameshop/gameshop/storeutils/cart/urls.py | d3d931170f3fec9420a1e7620279591275cb111c | [] | no_license | jy03189211/gameshop | ad2079af9ca84b6ec0246a9be650a15fb6d1f083 | b36f44febb1512e07472fdf34a7af8a2828f362a | refs/heads/master | 2020-05-24T17:19:34.482708 | 2017-03-06T19:23:39 | 2017-03-06T19:23:39 | 84,862,987 | 0 | 0 | null | 2020-05-05T16:39:26 | 2017-03-13T18:52:17 | CSS | UTF-8 | Python | false | false | 238 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^add/(?P<item_id>\d+)/$', views.add_to_cart_view, name="cart_add"),
url(r'^remove/(?P<item_id>\d+)/$', views.remove_from_cart_view, name="cart_remove"),
] | [
"roope.palomaki@me.com"
] | roope.palomaki@me.com |
e8b54851aec473d844e4e39a4dd239fb22a82614 | d534b9e9ae739793a49c32e41b736ce7081cec0b | /mysite/venv/bin/sqlformat | 6cfca9230e7f31cf1e5c15a16bf6a2e0b2b45967 | [] | no_license | VladyslavPodrazhanskyi/django | 1d7e41404b90977ad75195935f53fd923b126b57 | fbaa1b52281a700aae90f6d81b2e0ad9f61c1142 | refs/heads/master | 2020-06-12T22:09:31.811495 | 2019-06-29T19:38:46 | 2019-06-29T19:38:46 | 194,441,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | #!/home/pvv/projects/django/mysite/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"vladislav304304@gmail.com"
] | vladislav304304@gmail.com | |
1704eae1f7b1ffd9b9fc112defa0ea55e1ad4a8d | fa4cf4236eac798c60c4e3195d78e603b846b37f | /03-Python编程:从入门到实践/DataVisualization/PopulationMap/NorthAmericaPopulation.py | 5f89bd862a6d4d36ff837760fa4ced98d247c25e | [
"Apache-2.0"
] | permissive | iloeng/ReadBooks | d58131b9c216ef1f8948ee2a680314a7701b9526 | e997b9c876cc692206a46f139a6b1fb0ba3787ab | refs/heads/master | 2022-11-22T13:33:09.074552 | 2019-09-21T03:20:19 | 2019-09-21T03:20:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: NorthAmericaPopulation
Description :
Author : Liangz
Date: 2018/10/26
-------------------------------------------------
Change Activity:
2018/10/26:
-------------------------------------------------
"""
__author__ = 'Liangz'
from pygal_maps_world.maps import World
world_map = World()
world_map.title = 'Populations Of Countries In North America'
world_map.add('North America', {'ca': 34126000, 'us': 309349000, 'mx': 113423000})
world_map.render_to_file('NA_population.svg')
| [
"Liangz.org@gmail.com"
] | Liangz.org@gmail.com |
4a9f0774832445d60d06aa421f5507b09539cfa8 | 5d4d4f5a0237b8584e48bb090e4171d30d0bdf81 | /xlstolua/xls2luas.py | a2f83dbe2b232d2a3138971ee6c94084383c621f | [] | no_license | dmxzxy/Love2DCrowdTest | b7233d61d42fdab15ba5d6962bff604e07736c0f | bbee735cebfcb9820922b17a49575395bce8ab50 | refs/heads/master | 2021-01-22T02:21:03.999914 | 2018-02-23T10:35:11 | 2018-02-23T10:35:11 | 92,353,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,903 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import shutil
from summary_tools import *
from descriptor import *
import plugins
ignoreList = [
'90.脏字过滤_keywords'
]
def isInIgnoreList(fname):
for i in ignoreList:
ifname = i.decode('utf8')
fname = fname.decode('gbk')
if ifname == fname :
return True;
return False
def main() :
xlspath = sys.argv[1]
topath = sys.argv[2]
ver = sys.argv[3]
# 检查目录是否存在
if not os.path.exists( xlspath ) :
print xlspath, " is not exist !"
exit(0)
file_sumary = generate_file_sumary(xlspath)
summary_path = xlspath + "/summary.txt"
last_file_summary = read_file_summary(summary_path)
summary_diff = compare_file_summary(file_sumary,last_file_summary)
# 遍历xlspath
files = []
for k,v in summary_diff["updated"].iteritems():
if isInIgnoreList(k):
continue
files.append(v.path)
print "\n\nstart gen .......................\n"
support = []
for name in plugins.__all__:
plugin = getattr(plugins, name)
try:
type_name = plugin.type_name
except AttributeError:
pass
else:
support.append(type_name())
print 'support export types : ' + str(support) + '\n\n'
for name in plugins.__all__:
code_gen_req = CodeGenerateRequest(files, ver)
code_gen_response = CodeGenerateResponse(topath)
plugin = getattr(plugins, name)
try:
gen_code = plugin.gen_code
except AttributeError:
pass
else:
gen_code(code_gen_req, code_gen_response, topath)
code_gen_response.saveToFile()
#write_file_summary(file_sumary,summary_path)
print "\n\n\n\nDone.........................."
if __name__ == "__main__" :
main()
| [
"xiaoyu.zhang@dena.com"
] | xiaoyu.zhang@dena.com |
2556ae1392f806ccb45d5def032e85458b535096 | 91503ee4dd0f64aba9963374f4ca82ab814e9809 | /env/bin/djeesefs | 4fb1da014b6a5547d5b99ce321475120d8afb528 | [] | no_license | Venturi/asir | c59b90b618983770c5621deb2a05a5b14b04002e | fd5765b2c2b5b16660ab9c41c3148201c4e19158 | refs/heads/develop | 2022-10-19T03:04:25.473415 | 2019-03-14T11:21:57 | 2019-03-14T11:21:57 | 51,375,781 | 0 | 1 | null | 2022-10-11T05:57:58 | 2016-02-09T15:31:38 | CSS | UTF-8 | Python | false | false | 212 | #!/app/env/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from fs.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"venturi@openmailbox.org"
] | venturi@openmailbox.org | |
6421d69b4ba2284140d9f66784a8d252db793176 | 4569d707a4942d3451f3bbcfebaa8011cc5a128d | /bittenforgitplugin/0.11/0.6b2/bitten/build/pythontools.py | 6a3d9219e8fea9eab21026b73f367d71dde254f7 | [
"BSD-3-Clause"
] | permissive | woochica/trachacks | 28749b924c897747faa411876a3739edaed4cff4 | 4fcd4aeba81d734654f5d9ec524218b91d54a0e1 | refs/heads/master | 2021-05-30T02:27:50.209657 | 2013-05-24T17:31:23 | 2013-05-24T17:31:23 | 13,418,837 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 19,370 | py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2007 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2008 Matt Good <matt@matt-good.net>
# Copyright (C) 2008 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://bitten.edgewall.org/wiki/License.
"""Recipe commands for tools commonly used by Python projects."""
from __future__ import division
import logging
import os
import cPickle as pickle
import re
try:
set
except NameError:
from sets import Set as set
import shlex
import sys
from bitten.build import CommandLine, FileSet
from bitten.util import loc, xmlio
log = logging.getLogger('bitten.build.pythontools')
__docformat__ = 'restructuredtext en'
def _python_path(ctxt):
"""Return the path to the Python interpreter.
If the configuration has a ``python.path`` property, the value of that
option is returned; otherwise the path to the current Python interpreter is
returned.
"""
python_path = ctxt.config.get_filepath('python.path')
if python_path:
return python_path
return sys.executable
def distutils(ctxt, file_='setup.py', command='build', options=None):
"""Execute a ``distutils`` command.
:param ctxt: the build context
:type ctxt: `Context`
:param file\_: name of the file defining the distutils setup
:param command: the setup command to execute
:param options: additional options to pass to the command
"""
if options:
if isinstance(options, basestring):
options = shlex.split(options)
else:
options = []
cmdline = CommandLine(_python_path(ctxt),
[ctxt.resolve(file_), command] + options,
cwd=ctxt.basedir)
log_elem = xmlio.Fragment()
error_logged = False
for out, err in cmdline.execute():
if out is not None:
log.info(out)
log_elem.append(xmlio.Element('message', level='info')[out])
if err is not None:
level = 'error'
if err.startswith('warning: '):
err = err[9:]
level = 'warning'
log.warning(err)
elif err.startswith('error: '):
ctxt.error(err[7:])
error_logged = True
else:
log.error(err)
log_elem.append(xmlio.Element('message', level=level)[err])
ctxt.log(log_elem)
if not error_logged and cmdline.returncode != 0:
ctxt.error('distutils failed (%s)' % cmdline.returncode)
def exec_(ctxt, file_=None, module=None, function=None, output=None, args=None):
"""Execute a Python script.
Either the `file_` or the `module` parameter must be provided. If
specified using the `file_` parameter, the file must be inside the project
directory. If specified as a module, the module must either be resolvable
to a file, or the `function` parameter must be provided
:param ctxt: the build context
:type ctxt: `Context`
:param file\_: name of the script file to execute
:param module: name of the Python module to execute
:param function: name of the Python function to run
:param output: name of the file to which output should be written
:param args: extra arguments to pass to the script
"""
assert file_ or module, 'Either "file" or "module" attribute required'
if function:
assert module and not file_, '"module" attribute required for use of ' \
'"function" attribute'
if module:
# Script specified as module name, need to resolve that to a file,
# or use the function name if provided
if function:
args = '-c "import sys; from %s import %s; %s(sys.argv)" %s' % (
module, function, function, args)
else:
try:
mod = __import__(module, globals(), locals(), [])
components = module.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
file_ = mod.__file__.replace('\\', '/')
except ImportError, e:
ctxt.error('Cannot execute Python module %s: %s' % (module, e))
return
from bitten.build import shtools
returncode = shtools.execute(ctxt, executable=_python_path(ctxt),
file_=file_, output=output, args=args)
if returncode != 0:
ctxt.error('Executing %s failed (error code %s)' % (file_, returncode))
def pylint(ctxt, file_=None):
"""Extract data from a ``pylint`` run written to a file.
:param ctxt: the build context
:type ctxt: `Context`
:param file\_: name of the file containing the Pylint output
"""
assert file_, 'Missing required attribute "file"'
msg_re = re.compile(r'^(?P<file>.+):(?P<line>\d+): '
r'\[(?P<type>[A-Z]\d*)(?:, (?P<tag>[\w\.]+))?\] '
r'(?P<msg>.*)$')
msg_categories = dict(W='warning', E='error', C='convention', R='refactor')
problems = xmlio.Fragment()
try:
fd = open(ctxt.resolve(file_), 'r')
try:
for line in fd:
match = msg_re.search(line)
if match:
msg_type = match.group('type')
category = msg_categories.get(msg_type[0])
if len(msg_type) == 1:
msg_type = None
filename = match.group('file')
if os.path.isabs(filename) \
and filename.startswith(ctxt.basedir):
filename = filename[len(ctxt.basedir) + 1:]
filename = filename.replace('\\', '/')
lineno = int(match.group('line'))
tag = match.group('tag')
problems.append(xmlio.Element('problem', category=category,
type=msg_type, tag=tag,
line=lineno, file=filename)[
match.group('msg') or ''
])
ctxt.report('lint', problems)
finally:
fd.close()
except IOError, e:
log.warning('Error opening pylint results file (%s)', e)
def coverage(ctxt, summary=None, coverdir=None, include=None, exclude=None):
"""Extract data from a ``coverage.py`` run.
:param ctxt: the build context
:type ctxt: `Context`
:param summary: path to the file containing the coverage summary
:param coverdir: name of the directory containing the per-module coverage
details
:param include: patterns of files or directories to include in the report
:param exclude: patterns of files or directories to exclude from the report
"""
assert summary, 'Missing required attribute "summary"'
summary_line_re = re.compile(r'^(?P<module>.*?)\s+(?P<stmts>\d+)\s+'
r'(?P<exec>\d+)\s+(?P<cov>\d+)%\s+'
r'(?:(?P<missing>(?:\d+(?:-\d+)?(?:, )?)*)\s+)?'
r'(?P<file>.+)$')
fileset = FileSet(ctxt.basedir, include, exclude)
missing_files = []
for filename in fileset:
if os.path.splitext(filename)[1] != '.py':
continue
missing_files.append(filename)
covered_modules = set()
try:
summary_file = open(ctxt.resolve(summary), 'r')
try:
coverage = xmlio.Fragment()
for summary_line in summary_file:
match = summary_line_re.search(summary_line)
if match:
modname = match.group(1)
filename = match.group(6)
if not os.path.isabs(filename):
filename = os.path.normpath(os.path.join(ctxt.basedir,
filename))
else:
filename = os.path.realpath(filename)
if not filename.startswith(ctxt.basedir):
continue
filename = filename[len(ctxt.basedir) + 1:]
if not filename in fileset:
continue
percentage = int(match.group(4).rstrip('%'))
num_lines = int(match.group(2))
missing_files.remove(filename)
covered_modules.add(modname)
module = xmlio.Element('coverage', name=modname,
file=filename.replace(os.sep, '/'),
percentage=percentage,
lines=num_lines)
coverage.append(module)
for filename in missing_files:
modname = os.path.splitext(filename.replace(os.sep, '.'))[0]
if modname in covered_modules:
continue
covered_modules.add(modname)
module = xmlio.Element('coverage', name=modname,
file=filename.replace(os.sep, '/'),
percentage=0)
coverage.append(module)
ctxt.report('coverage', coverage)
finally:
summary_file.close()
except IOError, e:
log.warning('Error opening coverage summary file (%s)', e)
def trace(ctxt, summary=None, coverdir=None, include=None, exclude=None):
"""Extract data from a ``trace.py`` run.
:param ctxt: the build context
:type ctxt: `Context`
:param summary: path to the file containing the coverage summary
:param coverdir: name of the directory containing the per-module coverage
details
:param include: patterns of files or directories to include in the report
:param exclude: patterns of files or directories to exclude from the report
"""
assert summary, 'Missing required attribute "summary"'
assert coverdir, 'Missing required attribute "coverdir"'
summary_line_re = re.compile(r'^\s*(?P<lines>\d+)\s+(?P<cov>\d+)%\s+'
r'(?P<module>.*?)\s+\((?P<filename>.*?)\)')
coverage_line_re = re.compile(r'\s*(?:(?P<hits>\d+): )?(?P<line>.*)')
fileset = FileSet(ctxt.basedir, include, exclude)
missing_files = []
for filename in fileset:
if os.path.splitext(filename)[1] != '.py':
continue
missing_files.append(filename)
covered_modules = set()
def handle_file(elem, sourcefile, coverfile=None):
code_lines = set()
for lineno, linetype, line in loc.count(sourcefile):
if linetype == loc.CODE:
code_lines.add(lineno)
num_covered = 0
lines = []
if coverfile:
prev_hits = '0'
for idx, coverline in enumerate(coverfile):
match = coverage_line_re.search(coverline)
if match:
hits = match.group(1)
if hits: # Line covered
if hits != '0':
num_covered += 1
lines.append(hits)
prev_hits = hits
elif coverline.startswith('>'): # Line not covered
lines.append('0')
prev_hits = '0'
elif idx not in code_lines: # Not a code line
lines.append('-')
prev_hits = '0'
else: # A code line not flagged by trace.py
if prev_hits != '0':
num_covered += 1
lines.append(prev_hits)
elem.append(xmlio.Element('line_hits')[' '.join(lines)])
num_lines = not lines and len(code_lines) or \
len([l for l in lines if l != '-'])
if num_lines:
percentage = int(round(num_covered * 100 / num_lines))
else:
percentage = 0
elem.attr['percentage'] = percentage
elem.attr['lines'] = num_lines
try:
summary_file = open(ctxt.resolve(summary), 'r')
try:
coverage = xmlio.Fragment()
for summary_line in summary_file:
match = summary_line_re.search(summary_line)
if match:
modname = match.group(3)
filename = match.group(4)
if not os.path.isabs(filename):
filename = os.path.normpath(os.path.join(ctxt.basedir,
filename))
else:
filename = os.path.realpath(filename)
if not filename.startswith(ctxt.basedir):
continue
filename = filename[len(ctxt.basedir) + 1:]
if not filename in fileset:
continue
missing_files.remove(filename)
covered_modules.add(modname)
module = xmlio.Element('coverage', name=modname,
file=filename.replace(os.sep, '/'))
sourcefile = file(ctxt.resolve(filename))
try:
coverpath = ctxt.resolve(coverdir, modname + '.cover')
if os.path.isfile(coverpath):
coverfile = file(coverpath, 'r')
else:
log.warning('No coverage file for module %s at %s',
modname, coverpath)
coverfile = None
try:
handle_file(module, sourcefile, coverfile)
finally:
if coverfile:
coverfile.close()
finally:
sourcefile.close()
coverage.append(module)
for filename in missing_files:
modname = os.path.splitext(filename.replace(os.sep, '.'))[0]
if modname in covered_modules:
continue
covered_modules.add(modname)
module = xmlio.Element('coverage', name=modname,
file=filename.replace(os.sep, '/'),
percentage=0)
filepath = ctxt.resolve(filename)
fileobj = file(filepath, 'r')
try:
handle_file(module, fileobj)
finally:
fileobj.close()
coverage.append(module)
ctxt.report('coverage', coverage)
finally:
summary_file.close()
except IOError, e:
log.warning('Error opening coverage summary file (%s)', e)
def figleaf(ctxt, summary=None, include=None, exclude=None):
"""Extract data from a ``Figleaf`` run.
:param ctxt: the build context
:type ctxt: `Context`
:param summary: path to the file containing the coverage summary
:param include: patterns of files or directories to include in the report
:param exclude: patterns of files or directories to exclude from the report
"""
from figleaf import get_lines
coverage = xmlio.Fragment()
try:
fileobj = open(ctxt.resolve(summary))
except IOError, e:
log.warning('Error opening coverage summary file (%s)', e)
return
coverage_data = pickle.load(fileobj)
fileset = FileSet(ctxt.basedir, include, exclude)
for filename in fileset:
base, ext = os.path.splitext(filename)
if ext != '.py':
continue
modname = base.replace(os.path.sep, '.')
realfilename = ctxt.resolve(filename)
interesting_lines = get_lines(open(realfilename))
covered_lines = coverage_data.get(realfilename, set())
percentage = int(round(len(covered_lines) * 100 / len(interesting_lines)))
line_hits = []
for lineno in xrange(1, max(interesting_lines)+1):
if lineno not in interesting_lines:
line_hits.append('-')
elif lineno in covered_lines:
line_hits.append('1')
else:
line_hits.append('0')
module = xmlio.Element('coverage', name=modname,
file=filename.replace(os.sep, '/'),
percentage=percentage,
lines=len(interesting_lines),
line_hits=' '.join(line_hits))
coverage.append(module)
ctxt.report('coverage', coverage)
def _normalize_filenames(ctxt, filenames, fileset):
for filename in filenames:
if not os.path.isabs(filename):
filename = os.path.normpath(os.path.join(ctxt.basedir,
filename))
else:
filename = os.path.realpath(filename)
if not filename.startswith(ctxt.basedir):
continue
filename = filename[len(ctxt.basedir) + 1:]
if filename not in fileset:
continue
yield filename.replace(os.sep, '/')
def unittest(ctxt, file_=None):
"""Extract data from a unittest results file in XML format.
:param ctxt: the build context
:type ctxt: `Context`
:param file\_: name of the file containing the test results
"""
assert file_, 'Missing required attribute "file"'
try:
fileobj = file(ctxt.resolve(file_), 'r')
try:
total, failed = 0, 0
results = xmlio.Fragment()
for child in xmlio.parse(fileobj).children():
test = xmlio.Element('test')
for name, value in child.attr.items():
if name == 'file':
value = os.path.realpath(value)
if value.startswith(ctxt.basedir):
value = value[len(ctxt.basedir) + 1:]
value = value.replace(os.sep, '/')
else:
continue
test.attr[name] = value
if name == 'status' and value in ('error', 'failure'):
failed += 1
for grandchild in child.children():
test.append(xmlio.Element(grandchild.name)[
grandchild.gettext()
])
results.append(test)
total += 1
if failed:
ctxt.error('%d of %d test%s failed' % (failed, total,
total != 1 and 's' or ''))
ctxt.report('test', results)
finally:
fileobj.close()
except IOError, e:
log.warning('Error opening unittest results file (%s)', e)
except xmlio.ParseError, e:
log.warning('Error parsing unittest results file (%s)', e)
| [
"tauran@7322e99d-02ea-0310-aa39-e9a107903beb"
] | tauran@7322e99d-02ea-0310-aa39-e9a107903beb |
d53fb38508be7fd74076e39e5bf8335660d0fae8 | 1dd4c16c80ebf0f7fc05879441dbc480610219cf | /calculator.pyw | 0b15554747846ca4dee64f58810e95bcbc7f0762 | [] | no_license | deepakmittal1412/Claculator-Python | a93a579efe4d3a789337e127e2c0de5ea98a2370 | e08ef54a18317a2777d70515d6c7a2a876f01356 | refs/heads/master | 2021-05-01T18:19:05.437411 | 2018-02-10T10:40:46 | 2018-02-10T10:40:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,844 | pyw | from tkinter import *
import parser
root = Tk()
root.resizable(0, 0)
root.propagate()
i = 0
def operation(op):
global i
l = len(op)
ent.insert(i, op)
i += l
def calculate():
s = ent.get()
if '!' in s:
j = s.index('!')
n = int(s[j - 1])
f = 1
for k in range(n, 1, -1):
f *= k
f = str(f)
s = s[:j - 1] + f + s[j + 1:]
try:
a = parser.expr(s).compile()
r = eval(a)
clearall()
ent.insert(0, r)
except Exception:
clearall()
ent.insert(0, "Error")
def get_values(num):
global i
ent.insert(i, num)
i += 1
def clearall():
ent.delete(0, END)
def undo():
s = ent.get()
if (len(s)):
clearall()
ent.insert(0, s[:-1])
else:
clearall()
ent.insert(0, "Error")
ent = Entry(root)
ent.grid(row=1, columnspan=10, sticky=W + E)
Button(root, text='1', command=lambda: get_values(1)).grid(row=2, column=0)
Button(root, text='2', command=lambda: get_values(2)).grid(row=2, column=1)
Button(root, text='3', command=lambda: get_values(3)).grid(row=2, column=2)
Button(root, text='4', command=lambda: get_values(4)).grid(row=3, column=0)
Button(root, text='5', command=lambda: get_values(5)).grid(row=3, column=1)
Button(root, text='6', command=lambda: get_values(6)).grid(row=3, column=2)
Button(root, text='7', command=lambda: get_values(7)).grid(row=4, column=0)
Button(root, text='8', command=lambda: get_values(8)).grid(row=4, column=1)
Button(root, text='9', command=lambda: get_values(9)).grid(row=4, column=2)
Button(root, text='AC', command=clearall).grid(row=5, column=0)
Button(root, text='0', command=lambda: get_values(0)).grid(row=5, column=1)
Button(root, text='=', command=calculate).grid(row=5, column=2)
Button(root, text='+', command=lambda: operation("+")).grid(row=2, column=3)
Button(root, text='-', command=lambda: operation("-")).grid(row=3, column=3)
Button(root, text='*', command=lambda: operation("*")).grid(row=4, column=3)
Button(root, text='/', command=lambda: operation("/")).grid(row=5, column=3)
Button(root, text='pi', command=lambda: operation("*3.14")).grid(row=2, column=4)
Button(root, text='%', command=lambda: operation("%")).grid(row=3, column=4)
Button(root, text='(', command=lambda: operation("(")).grid(row=4, column=4)
Button(root, text='exp', command=lambda: operation("**")).grid(row=5, column=4)
Button(root, text='<-', command=undo).grid(row=2, column=5)
Button(root, text='X!', command=lambda: operation("!")).grid(row=3, column=5)
Button(root, text=')', command=lambda: operation(")")).grid(row=4, column=5)
Button(root, text='X^2', command=lambda: operation("**2")).grid(row=5, column=5)
root.mainloop()
| [
"noreply@github.com"
] | deepakmittal1412.noreply@github.com |
b89e94fe82fecc77f5813b38e5a7aef86986ca77 | 862404583e59163a512c692e732b22ce5affa0a8 | /sidemash_sdk/StreamSquare.py | 1042f0863b0ab5a63f93339c2552b33184ce19d3 | [
"Apache-2.0"
] | permissive | sidemashold/sdk-python | f06cba8eae3cc63bd0b63365e99b3d52bca809f9 | d77e2eb8f50f818511b5a8538da6fd6d981d5e42 | refs/heads/master | 2023-02-24T22:57:11.893984 | 2021-02-02T15:42:08 | 2021-02-02T15:42:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,671 | py | # Copyright © 2020 Sidemash Cloud Services
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from sidemash_sdk.Hook import Hook
from sidemash_sdk.InstanceStatus import InstanceStatus
from sidemash_sdk.Publish import Publish
from sidemash_sdk.StreamSquareSize import StreamSquareSize
from typing import Dict
from typing import Optional
import json
class StreamSquare:
def __init__(self,
id: str,
url: str,
status: InstanceStatus,
is_elastic: bool,
size: StreamSquareSize,
play_domain_name: Optional[str],
publish_domain_name: Optional[str],
publish: Publish,
hook: Hook,
description: Optional[str],
foreign_data: Optional[str]):
self._type = "StreamSquare"
self.id = id
self.url = url
self.status = status
self.is_elastic = is_elastic
self.size = size
self.play_domain_name = play_domain_name
self.publish_domain_name = publish_domain_name
self.publish = publish
self.hook = hook
self.description = description
self.foreign_data = foreign_data
@staticmethod
def _type():
return "StreamSquare"
@staticmethod
def from_json(js: str):
value = json.loads(js)
return StreamSquare.from_dict(value)
def __to_remote_dict(self):
tuples = [
('id', self.id),
('url', self.url),
('status', str(self.status)),
('isElastic', self.is_elastic),
('size', str(self.size)),
('playDomainName', self.play_domain_name),
('publishDomainName', self.publish_domain_name),
('publish', self.publish.__to_remote_dict()),
('hook', self.hook.__to_remote_dict()),
('description', self.description),
('foreignData', self.foreign_data)
]
return dict(t for t in tuples if t[1] is not None)
def to_dict(self):
return dict([
('id', self.id),
('url', self.url),
('status', str(self.status)),
('is_elastic', self.is_elastic),
('size', str(self.size)),
('play_domain_name', self.play_domain_name),
('publish_domain_name', self.publish_domain_name),
('publish', self.publish.to_dict()),
('hook', self.hook.to_dict()),
('description', self.description),
('foreign_data', self.foreign_data)
])
@staticmethod
def __from_remote_dict(d: Dict[str, any]):
return StreamSquare(d["id"],
d["url"],
InstanceStatus.from_string(d["status"]),
d["isElastic"],
StreamSquareSize.from_string(d["size"]),
d["playDomainName"] if "playDomainName" in d else None,
d["publishDomainName"] if "publishDomainName" in d else None,
Publish.__from_remote_dict(d["publish"]),
Hook.__from_remote_dict(d["hook"]),
d["description"] if "description" in d else None,
d["foreignData"] if "foreignData" in d else None)
@staticmethod
def from_dict(d: Dict[str, any]):
return StreamSquare(d["id"],
d["url"],
InstanceStatus.from_string(d["status"]),
d["is_elastic"],
StreamSquareSize.from_string(d["size"]),
d["play_domain_name"] if "play_domain_name" in d else None,
d["publish_domain_name"] if "publish_domain_name" in d else None,
Publish.from_dict(d["publish"]),
Hook.from_dict(d["hook"]),
d["description"] if "description" in d else None,
d["foreign_data"] if "foreign_data" in d else None)
def to_json(self):
return json.dumps(self.to_dict())
def __to_remote_json(self):
return json.dumps(self.__to_remote_dict())
def to_string(self):
return self.__repr__()
def __repr__(self):
return ("StreamSquare(id=" + self.id +
", url=" + self.url +
", status=" + repr(self.status) +
", is_elastic=" + str(self.is_elastic) +
", size=" + repr(self.size) +
", play_domain_name=" + str(self.play_domain_name) +
", publish_domain_name=" + str(self.publish_domain_name) +
", publish=" + repr(self.publish) +
", hook=" + repr(self.hook) +
", description=" + str(self.description) +
", foreign_data=" + str(self.foreign_data) + ")") | [
"serge.nguetta@sidemash.com"
] | serge.nguetta@sidemash.com |
691d14e28d73bf783bdb8e31d1289980cfa3a85e | 3330b2672f6f4677e2a169469f5512f0e4d4e5a3 | /levelword.py | 25316903ab13ea6619ecf8d648674817a962631f | [] | no_license | XcloudFance/Project.Irene | 49dcf321c2ad947b4edf415f3501c5af93b163a2 | a6f2e5a66ecd963f303add439a164c63e0532f79 | refs/heads/master | 2021-07-03T16:09:51.048626 | 2020-03-07T07:45:34 | 2020-03-07T07:45:34 | 241,767,722 | 0 | 0 | null | 2021-05-30T02:29:04 | 2020-02-20T01:39:39 | Python | UTF-8 | Python | false | false | 1,767 | py | import requests_html
from requests_html import requests
from requests_html import HTMLSession
from bs4 import BeautifulSoup
from multiprocessing import Process,Pipe,Pool
import numpy as np
import redis
#redis_conn = redis.Redis(host='127.0.0.1', port= 6379, password= '', db= 0)
class node:
def init(self):
self.key = ''
self.val = 0
pass
def search(content,conn):
strtmp = [
'<span class="epp-xref dxref A1">A1</span>',
'<span class="epp-xref dxref A2">A2</span>',
'<span class="epp-xref dxref B1">B1</span>',
'<span class="epp-xref dxref B2">B2</span>',
'<span class="epp-xref dxref C1">C1</span>',
'<span class="epp-xref dxref C2">C2</span>'
]
r = requests.get('https://dictionary.cambridge.org/zhs/词典/英语-汉语-简体/'+content)
code = r.text
s = -1
level = 0
for i in strtmp:
s = code.find(i)
level+=1
if s != -1:
break
key = content
val = level
#conn.set(key,val)
return [key,val]
def judgment(wordlist):
wordlist = list(set(wordlist))
redis_conn = 0
p = Pool()
result = []
for i in wordlist:
result.append(p.apply_async(search, args=(i,redis_conn)).get())
p.close()
p.join()
score_average = 0
for i in result:
score_average += i[1]
score_average = score_average/len(result)
change = score_average / 6 * 10
#print(change)
#print(score_average)
return change
if __name__ == '__main__':
judgment(list(set(['uh','summary','soar'])))
#两分钟要能够表达150-200词的为5分段
#200-220的为5.5分或者6分段
#220-250为6.5分段
#250-270的为7分段
#270-300的为7.5或者8分段
#超越300的直接9分
| [
"34064977+XcloudFance@users.noreply.github.com"
] | 34064977+XcloudFance@users.noreply.github.com |
4dcf436b1af1fdf9501f398fbce1fe495b0d41bb | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/detection/FCOS/configs/vfnet/vfnet_r101_fpn_2x_coco.py | d0a1f569463972dc5b7fe10c35f8fb5d3321a261 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 165 | py | _base_ = './vfnet_r50_fpn_1x_coco.py'
model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
lr_config = dict(step=[16, 22])
total_epochs = 24
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
392f1b32710b3e56af0c84af350bd6efebfe00da | 1d40d4532f1bb6f08d98a9c4dd8a3e711230bfea | /themes/migrations/0003_auto_20190104_0351.py | 2f28efea3ce670d190d5fc609f65f8628306e592 | [] | no_license | Swiftkind/market | 22f2a71548a4b2af97da8001ed0e0890cdee9316 | 4c5aedef9ffe2f16d80430ae3ac91ab053bf8c95 | refs/heads/develop | 2020-04-07T11:20:05.740553 | 2019-02-10T22:49:46 | 2019-02-10T22:49:46 | 158,321,793 | 0 | 0 | null | 2019-02-18T02:34:24 | 2018-11-20T02:51:07 | CSS | UTF-8 | Python | false | false | 986 | py | # Generated by Django 2.1.4 on 2019-01-04 03:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('themes', '0002_auto_20190104_0351'),
]
operations = [
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.IntegerField(default=0)),
('comment', models.TextField(blank=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='themes',
name='review',
field=models.ManyToManyField(to='themes.Review'),
),
]
| [
"doradomiguel35@gmail.com"
] | doradomiguel35@gmail.com |
dc8bfcb8439268293ef63419fa45a3962b90fa89 | 9f4c0d12679c18701e83f1cc840a0f82e502cc18 | /models/menu.py | 0d349b4d7fe59b02377e2dc0c2c9b2859fbaab6f | [
"LicenseRef-scancode-public-domain"
] | permissive | kilimanjaro2/Library-Portal | afb76637836d96ecba3cbdb92de14c3266280319 | 55888a49f7d51fe65e891841d606dd2e4dc0ec0f | refs/heads/master | 2021-01-11T16:33:41.014410 | 2017-01-26T11:49:54 | 2017-01-26T11:49:54 | 80,110,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,768 | py | # -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## Customize your APP title, subtitle and menus here
#########################################################################
response.logo = A(B('web',SPAN(2),'py'),XML('™ '),
_class="navbar-brand",_href="http://www.web2py.com/",
_id="web2py-logo")
response.title = request.application.replace('_',' ').title()
response.subtitle = ''
## read more at http://dev.w3.org/html5/markup/meta.name.html
response.meta.author = myconf.get('app.author')
response.meta.description = myconf.get('app.description')
response.meta.keywords = myconf.get('app.keywords')
response.meta.generator = myconf.get('app.generator')
## your http://google.com/analytics id
response.google_analytics_id = None
#########################################################################
## this is the main application menu add/remove items as required
#########################################################################
response.menu = [
(T('All Recipes'), False, URL('default', 'index'), []), (T('Upload Recipe'), False, URL('default', 'upload'), []),(T('Uploaded Recipes'), False, URL('default', 'myr'), [])
]
DEVELOPMENT_MENU = True
#########################################################################
## provide shortcuts for development. remove in production
#########################################################################
def _():
# shortcuts
app = request.application
ctr = request.controller
# useful links to internal and external resources
if DEVELOPMENT_MENU: _()
if "auth" in locals(): auth.wikimenu()
| [
"kilimanjaro2@localhost.localdomain"
] | kilimanjaro2@localhost.localdomain |
ba44720d56ea9eaa51525df76280889d5db4248f | a51902bfe09d32577eba968dd2c5c579505b0c16 | /webSiteProject/settings.py | 7cdf892a0b923d46d81949719bd75973dc42e86f | [] | no_license | Teekkarimetsastajat/newWebSite | 22bcd8267412f20c55cdbe40a07b32ed6cc3802a | a184e4e498d808fd5edbbf33a032c8c2d995ba6b | refs/heads/master | 2021-01-17T14:22:05.495613 | 2016-12-06T10:43:05 | 2016-12-06T10:43:05 | 55,509,771 | 0 | 0 | null | 2016-07-12T17:29:34 | 2016-04-05T13:24:02 | Python | UTF-8 | Python | false | false | 3,249 | py | """
Django settings for webSiteProject project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f0#e@hp)lb(3x0w$d=a0n=5&smtb6efgxm=j%^sf32fyp-x2s6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'webSiteProject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'webSiteProject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'Teme',
'USER': 'admin',
'PASSWORD': 'oravavatkuli4',
'HOST': 'localhost',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'EET'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| [
"lauri.peltola@aalto.fi"
] | lauri.peltola@aalto.fi |
aaebb9ee00e234cedfd8b5c5a1593eeb6fd25a53 | 4bd6bf514826ce927d19ad969d34a2f76c0829f6 | /catalog/admin.py | dd908ee6bd4c62552295bcc27f28fded9ff652dd | [] | no_license | dangthihoa/mysite | 03bda2b80f82064dd06314a58276db05ce9dbb15 | b37c6b0e50732e41fdd37a424d1034994d321516 | refs/heads/master | 2022-12-04T10:52:12.706578 | 2020-08-13T09:20:57 | 2020-08-14T02:08:21 | 285,453,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | py | from django.contrib import admin
from .models import Author, Genre, Book, BookInstance, Language
#admin.site.register(Book)
#admin.site.register(Author)
admin.site.register(Genre)
admin.site.register(Language)
#admin.site.register(BookInstance)
# Define the admin class
class AuthorAdmin(admin.ModelAdmin):
list_display = ('last_name', 'first_name', 'date_of_birth', 'date_of_death')
fields = ['first_name', 'last_name', ('date_of_birth', 'date_of_death')]
# Register the admin class with the associated model
admin.site.register(Author, AuthorAdmin)
# Register the Admin classes for Book using the decorator
class BooksInstanceInline(admin.TabularInline):
model = BookInstance
@admin.register(Book)
class BookAdmin(admin.ModelAdmin):
list_display = ('title', 'author','language', 'display_genre')
inlines = [BooksInstanceInline]
# Register the Admin classes for BookInstance using the decorator
@admin.register(BookInstance)
class BookInstanceAdmin(admin.ModelAdmin):
list_display = ('book', 'status', 'borrower', 'due_back', 'id')
list_filter = ('status', 'due_back')
fieldsets = (
(None, {
'fields': ('book', 'imprint', 'id')
}),
('Availability', {
'fields': ('status', 'due_back', 'borrower')
}),
)
| [
"dang.thi.hoa@sun-asterisk.com"
] | dang.thi.hoa@sun-asterisk.com |
4ba12cdf38c6a4674d41c5c8b0e45392fa5521cf | c3db4c42360c47471635a97568bfc9c21bc14c06 | /pdfmerge/migrations/0018_field_field_question.py | d091202c1080d237688913ae37a521e3eeb390e0 | [
"MIT"
] | permissive | rupin/pdfmerger | 3ede9aa9f1f374eba9b1ea2c33b6920403a8f4ad | fee19523e88362d215f1a29cdab0d140f4c9385c | refs/heads/master | 2020-04-07T20:37:56.821730 | 2019-07-18T16:58:01 | 2019-07-18T16:58:01 | 158,696,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | # Generated by Django 2.1.3 on 2019-07-10 08:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pdfmerge', '0017_auto_20190707_1955'),
]
operations = [
migrations.AddField(
model_name='field',
name='field_question',
field=models.CharField(default='', max_length=300),
),
]
| [
"rupin.chheda@gmail.com"
] | rupin.chheda@gmail.com |
1cf415e37e252d91ffc466ca06613a4cadb53fa5 | 7c86d3bfcb044cdeff1e96cb5cfddfbd18a01cf6 | /Lecture6/src/homepage/admin.py | 8a7432230ac0487ad7b0f3dc7a651145156f18c2 | [] | no_license | michellextai/98215 | d360e9559d8bdf62244f792308b49f9158c253f8 | dd89d2c917782bd47d09a78e54f9d22fcfc1ec10 | refs/heads/master | 2021-01-10T06:31:52.400172 | 2016-03-08T07:03:38 | 2016-03-08T07:03:38 | 50,899,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | from django.contrib import admin
# Register your models here.
from .models import blog
admin.site.register(blog) | [
"michellextai@gmail.com"
] | michellextai@gmail.com |
d76885027a58cf9fd8188064e6ad01e1ade42ed8 | ef48678dad15e860bfa033717c754d0f3d047a07 | /ejercicios_python/Clase 4/Sin título0.py | 61bf0cf09762926b148cdb9d14f3e30adccfb81b | [] | no_license | agustinfernandez/Python_Unsam | 7411e2ca33ef8f57defb36ac8b98fcf62bb55079 | 816151b98e70bde0448ed1a5af5874b1f870392b | refs/heads/master | 2023-01-11T13:46:21.954509 | 2020-11-17T18:53:58 | 2020-11-17T18:53:58 | 312,004,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,409 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 27 10:10:07 2020
@author: agustin18
"""
# def tienen_a(expresion):
# n = len(expresion)
# i = 0
# while i<n:
# if expresion[i] == 'a':
# return True
# else:
# return False
# i+=1
# rta = tienen_a('palabra')
# print(rta)
#%%
#Ejercicio 4.1: Debugger
#
# def invertir_lista(lista):
# invertida=[]
# i=len(lista)
# while i>0:
# i= i-1
# invertida.append (lista.pop(i))
# return invertida
# l= [1, 2, 3, 4, 5]
# m= invertir_lista(l)
# print(f'Entrada{l}, Salida: {m}')
#El primer paso clave en donde se modifica el parámetro de entrada es en "invertida.append(lista.pop(i))", ya que el lista.pop borra el último elemento de la lista.
#%%
#Ejercicio 4.2:
import csv
from pprint import pprint
def leer_camion(nombre_archivo):
camion=[]
registro={}
with open(nombre_archivo, 'rt') as f:
filas = csv.reader(f)
encabezado= next(filas)
for fila in filas:
registro[encabezado[0]] = fila[0]
registro[encabezado[1]] = int(fila[1])
registro[encabezado[2]] = float(fila[2])
camion.append(registro)
return camion
camion=leer_camion('Data/camion.csv')
pprint(camion)
#Sobre escriba todos los registro{} con los datos de la fila que entran al for.
| [
"noreply@github.com"
] | agustinfernandez.noreply@github.com |
7b982b5e2d58b297d4b4466319c1fb437e635850 | 81d7b9dd63d7e0129eca909271307a61cf2d4a7d | /test.py | 3267123b58c228cd0f687cec94d9691714888a95 | [] | no_license | sambiak/djikstra | 4c9e6c584aced4deea556c87578c1e01caf20a84 | ff18ed45662fd0146b45c224501cd4a90e564aea | refs/heads/master | 2021-08-22T06:33:39.070392 | 2017-11-29T14:44:18 | 2017-11-29T14:44:18 | 111,694,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,856 | py | from djisktra import *
def graphe_non_orienté1():
G = dict()
G["a"] = {"b":6, "c":2}
G["b"] = {"a": 6, "c": 3}
G["c"] = {"b": 3, "a": 2}
return G
def graphe_non_orienté2():
G = dict()
G["a"] = {"b":6, "c":2}
G["b"] = {"a": 6, "c": 3, "d": 4}
G["c"] = {"b": 3, "a": 2}
G["d"] = {"b": 4}
return G
def graphe_orienté1():
G = dict()
G["a"] = {"b":2}
G["b"] = {"c": 3}
G["c"] = {"a": 6}
return G
def graphe_orienté2():
G = dict()
G["a"] = {"b": 9, "c": 15, "d":1}
G["b"] = {"a": 16}
G["c"] = {"b": 3}
G["d"] = {"c": 4}
return G
def test_djikstra1():
G = graphe_non_orienté1()
d, prédécesseur = djikstra("a", G)
assert d["a"] == 0
assert prédécesseur["a"] == None
assert d["c"] == 2
assert prédécesseur["c"] == "a"
assert d["b"] == 5
assert prédécesseur["b"] == "c"
def test_djikstra2():
G = graphe_orienté1()
d, prédécesseur = djikstra("b", G)
assert d["b"] == 0
assert prédécesseur["b"] == None
assert d["c"] == 3
assert prédécesseur["c"] == "b"
assert d["a"] == 9
assert prédécesseur["a"] == "c"
def test_djikstra3():
G = graphe_non_orienté2()
d, prédécesseur = djikstra("a", G)
assert d["a"] == 0
assert prédécesseur["a"] == None
assert d["c"] == 2
assert prédécesseur["c"] == "a"
assert d["b"] == 5
assert prédécesseur["b"] == "c"
assert d["d"] == 9
assert prédécesseur["d"] == "b"
def test_djikstra4():
G = graphe_orienté2()
d, prédécesseur = djikstra("c", G)
assert d["c"] == 0
assert prédécesseur["c"] == None
assert d["a"] == 19
assert prédécesseur["a"] == "b"
assert d["b"] == 3
assert prédécesseur["b"] == "c"
assert d["d"] == 20
assert prédécesseur["d"] == "a" | [
"gaugustoni@mourepiane155.etu.ec-m.fr"
] | gaugustoni@mourepiane155.etu.ec-m.fr |
b72a050a8db14761b2e3ec45dce8f1b91cdb0c07 | fc622a2514c7ee4086e9db657c41ff3f2eb790bd | /ipcheck.py | ecfd3b41f5ded2a16dbf52cf057ba1dd1ecb936b | [] | no_license | Owen000/IPcheck | 131e4e17ec805d85f2ad032c21aee0e68fa723ef | 98646d7eb77950bd6139f12cc30f9f8bef0884ca | refs/heads/main | 2023-03-30T03:44:27.863088 | 2021-04-09T21:42:30 | 2021-04-09T21:42:30 | 356,404,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,039 | py | #Imports subprocess (used for async functionality)
import subprocess
#Asks user for input (can be dragged from desktop)
fields = ['IP', 'Response']
plist = input("Paste exact file path of ip list: ")
filename = input("What do you want the name of the output file to be?")
with open(plist, 'r') as fileobj:
def ping(ip):
#Pings the ip once, change the "1" to "2" if MAC adress is not resolving, may make it slower
ping_reply = subprocess.run(["ping","-n","1", ip],stderr=subprocess.PIPE, stdout=subprocess.PIPE)
result =""
print (".", end='')
if ping_reply.returncode == 0:
#ping will return 0 success if destination is unreachable
if ("unreachable" in str(ping_reply.stdout)):
result = ("\n Offline%s" % ip)
else:
result= ("\n Online %s" % ip)
elif ping_reply.returncode == 1:
result= ("\n No response %s" % ip)
return result
for ip in fileobj:
print(ping(ip.strip()))
| [
"noreply@github.com"
] | Owen000.noreply@github.com |
e8c1c049131555e638de9598eff80205c9df752b | 8dd6d5d843a82105592bf99f5afe79b644aa7d62 | /Pandas/pd8.py | ab65e91ffab4d207cd6fe7ea193e1679abadb266 | [] | no_license | SeelamVenkataKiran/PythonTests | a05869c7cb198e135de38661e8eff238cfda8faf | c01bf53d162b7cf9b69a3b2f5722425399325616 | refs/heads/master | 2023-01-22T10:22:33.752483 | 2020-11-29T18:08:48 | 2020-11-29T18:08:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,247 | py | import pandas as pd
import numpy as np
#Axis indexes with duplicate values
ser11 = pd.Series(range(5), index=['a', 'a', 'b', 'b', 'c'])
ser11.index.is_unique
#Data selection with duplicates.
# Indexing a value with multiple entries returns a Series
ser11['a']
# Indexing a value with single entries return a scalar value:
ser11['c']
#if DF with duplicate indexes
df = pd.DataFrame(np.random.randn(4, 3), index=['a', 'a', 'b', 'b'])
df
df.loc['b']
#other useful methods
df.sum()
#Passing axis=1 sums over the rows instead:
df.sum(axis=1)
#NA values are excluded unless the entire slice is NA.
df.mean(axis=1, skipna=False)
df.describe()
# x = [12,34,34]
# from statistics import mean
# x = [-1.135797 ,0.088060 ,-0.679053]
# mean(x)
#On non-numeric data, describe produces alternate summary statistics
ser12 = pd.Series(['a', 'a', 'b', 'c'] * 4)
ser12.describe()
ser13 = pd.Series(['c', 'a', 'd', 'a', 'a', 'b', 'b', 'c', 'c'])
uniques = ser13.unique()
uniques
uniques.sort()
uniques
#series of value frequencies
ser11 = pd.Series(range(5), index=['a', 'a', 'b', 'b', 'c'])
ser11.value_counts()
##pd.value_counts(ser13.values, sort=False)
#isin :responsible for vectorized set membership
test = ser13.isin(['b', 'c'])
test
ser13[test] | [
"ajaykuma24@gmail.com"
] | ajaykuma24@gmail.com |
4fc6b2bc89bc622ffea7099b93f9ff84ba5fe6ca | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/280/82009/submittedfiles/testes.py | 58e6cb4900bd50ebf73f8061e32b3fbbb75a3190 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,526 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
"""
Aula de python (04/09)
"""
"""
print ("Hello World")
print ("Olá\nMundo")
print ("Antônio Marcos Cruz da Paz")
print ("18")
a=11
b=1037
print (a+b)
a=35
print ((9*a+160)/5)
h=30
d=10
print (3.14159*((d/2)**2)*h)
a=2
b=5
print ((2+5)**2)
"""
"""
Programa para medir a média
e dar o resultado final.
"""
"""
print("Bem vindo ao programa para definição de resultado final")
print("Esse programa só aceita notas entre 0 e 10")
print("---------------------------------------------------------")
x=float(input ("Primeira Nota="))
if (x>10):print("Atenção:Insira uma nota válida")
if (x<0):print("Atenção:Insira uma nota válida")
y=float(input ("Segunda Nota="))
if (y>10):print("Atenção:Insira uma nota válida")
if (y<0):print("Atenção:Insira uma nota válida")
z=float((x+y)/2)
print("---------------------------------------------------------")
print ("Nota Final:")
print (z)
print ("Resultado Final:")
if (z<7):print("Reprovado")
if (z>7):print("Aprovado")
if (z==7):print("Aprovado")
"""
"""
nome=str(input("Qual o seu nome?: ")
idade=float(input("Qual é a sua idade?: "))
altura=float(input("Qual é a sua altura?: "))
print ("A idade de +nome+ é %.d e sua altura é %.2f" %(idade,altura))
"""
"""
print("Bem vindo ao programa para definição de resultado final")
print("Esse programa só aceita notas entre 0 e 10")
print("---------------------------------------------------------")
x=float(input ("Primeira Nota="))
if (x>10):print("Atenção:Insira uma nota válida")
if (x<0):print("Atenção:Insira uma nota válida")
y=float(input ("Segunda Nota="))
if (y>10):print("Atenção:Insira uma nota válida")
if (y<0):print("Atenção:Insira uma nota válida")
a=float(input("Terceira Nota="))
if (a>10):print("Atenção:Insira uma nota válida")
if (a<0):print("Atenção:Insira uma nota válida")
b=float(input("Quarta Nota="))
if (b>10):print("Atenção:Insira uma nota válida")
if (b<0):print("Atenção:Insira uma nota válida")
z=float((x+y+a+b)/4)
print("---------------------------------------------------------")
print ("Nota Final:")
print (z)
print ("Resultado Final:")
if (z<7):print("Reprovado")
if (z>7):print("Aprovado")
if (z==7):print("Aprovado")
print("--------------------------------------------------")
m=float(input("Medida em metros="))
cent=float(m*100)
print(cent)
print("--------------------------------------------------")
ah=(float(input("Qual sua altura?: ")))
pes=((72.7*ah)-58)
print(pes)
print("--------------------------------------------------")
rad=(float(input("Valor do raio= ")))
area=(float(3.1416*(rad**2)))
print(area)
print("--------------------------------------------------")
"""
"""
print(bool(not(10<20)))
t1=input("Digite algo: ")
t2=input("Digite algo: ")
t3=input("Digite algo: ")
print(t1+t2+t3)
"""
"""
n1=float(input("n1: "))
n2=float(input("n2: "))
n3=float(input("n3: "))
total=(n1+n2+n3)
print(total)
"""
"""
p=float(input("Insira P: "))
i=float(input("Insira i: "))
n=float(input("Insira n: "))
v=p*((((1+i)**n)-1)/i)
print("%.2f" %v)
"""
"""
a=int(input("Que horas são? [0-23] "))
if a > 3 and a < 12:
print ("Bom dia")
elif a >= 12 and a < 18:
print ("Boa tarde")
elif a >= 0 and a < 24:
print ("Boa noite")
else:
print ("Entrada inválida")
"""
x=0
p=0
while (x < 100):
if x%2 == 0:
p=p+x
print (x)
x += 1
print (p)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
f044e59a2d8c9964b9e4360d450379c15ca426d7 | 7f5943bba5d189b8a9cd1aed36604a6c74cca3bf | /Python/98.py | 3f784c79d64fddb11269e967b4b7bc081494d7a3 | [] | no_license | EpsilonHF/Leetcode | 2b3ea060862997e9ebd4db5e75b821d09e596883 | cdf33ab470914210a0d710800337565d1697aa03 | refs/heads/master | 2021-08-18T07:36:10.547074 | 2020-05-08T12:41:10 | 2020-05-08T12:41:10 | 179,662,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,380 | py | """
Given a binary tree, determine if it is a valid binary search tree (BST).
Assume a BST is defined as follows:
The left subtree of a node contains only nodes with keys less than the
node's key.
The right subtree of a node contains only nodes with keys greater than
the node's key.
Both the left and right subtrees must also be binary search trees.
Example 1:
2
/ \
1 3
Input: [2,1,3]
Output: true
Example 2:
5
/ \
1 4
/ \
3 6
Input: [5,1,4,null,null,3,6]
Output: false
Explanation: The root node's value is 5 but its right child's value is 4.
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isValidBST(self, root: TreeNode) -> bool:
return self.valid(root, float('-inf'), float('inf'))
def valid(self, node, low, high):
if node is None:
return True
if node.val >= high or node.val <= low:
return False
if node.left and (node.val <= node.left.val or node.left.val <= low) :
return False
if node.right and \
(node.val >= node.right.val or node.right.val >= high):
return False
return self.valid(node.left, low, node.val) and \
self.valid(node.right, node.val, high)
| [
"epsilonhf@outlook.com"
] | epsilonhf@outlook.com |
19a0dffc227328252e02acdbd7635de518c330fa | ff7b73e2142306c4019122874606fae0f09ce8f8 | /desirecart/wsgi.py | 13c56e143ada4fb6b461d45e449ac27fc8bb77d7 | [] | no_license | RajatPal158/E-Commerce | e5a276f42b8ee2a12dcec45ea1015b6c156ccd3e | 0547800ac0944e2142436792867885538add39e4 | refs/heads/main | 2023-08-07T16:48:31.872346 | 2021-09-23T20:30:50 | 2021-09-23T20:30:50 | 404,694,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | """
WSGI config for desirecart project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'desirecart.settings')
application = get_wsgi_application()
| [
"noreply@github.com"
] | RajatPal158.noreply@github.com |
d8c5f97073544c655718a44ba248998ba5b1599c | b70c5ba9e3df90e2c99da2c6d80f48f043eaf578 | /Chapter_5_文件与IO/5.13获取文件夹中的文件列表.py | 4ad68cc6112730e9ac3612c76d0a816ce82feca8 | [] | no_license | zh805/PythonCookbook_Codes | 90bdb67ec789efe695f629abdec2c28ca90859c1 | 81c6d20c487650ffa7adc8be68fb2078fc4ec159 | refs/heads/master | 2022-07-14T13:10:01.791019 | 2020-05-13T03:37:10 | 2020-05-13T03:37:10 | 256,399,267 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | '''
@Time : 2020/05/05 19:21:14
@Author : Zhang Hui
'''
# 问题:想获取文件系统中某个目录下的所有文件列表
import os
import glob
import fnmatch
if __name__ == '__main__':
# 文件列表
all_names = os.listdir('.')
# print(all_names)
# Get all regular files
file_names = [name for name in os.listdir('.')
if os.path.isfile(os.path.join('.', name))]
# print(file_names)
# Get all dirs
dir_names = [name for name in os.listdir('.')
if os.path.isdir(os.path.join('.', name))]
# print(dir_names)
# 字符串的 startswith() 和 endswith() 方法对于过滤一个目录的内容也是很有用的。
pyfiles = [name for name in os.listdir('.') if name.endswith('.py')]
# print(help(glob))
pyfiles2 = glob.glob('somedir/*.py')
pyfiles3 = [name for name in os.listdir('somedir')
if fnmatch(name, '*.py')]
# Get file metadata
file_metadata = [(name, os.stat(name)) for name in pyfiles]
for name, meta in file_metadata:
print(name, meta.st_size, meta.st_mtime)
| [
"771662863@qq.com"
] | 771662863@qq.com |
2aa946ad55db684d941665e168df9ddd64d00992 | 1574c8c379cdefe49ef35bd53c5181e783d6ce45 | /djproject/djproject/sync/migrations/0001_initial.py | 166fde6c8648160868253f6605c4d15a8099c64d | [] | no_license | sporty/FruitySync | 957ce495eb6e5f8c495acc97cb8307232b6ae462 | aefa53b65b9c555ec7cccce4ab923bff7c6b5376 | refs/heads/master | 2021-03-24T10:22:14.888552 | 2013-12-09T16:03:35 | 2013-12-09T16:03:35 | 4,232,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,320 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SnsAccount'
db.create_table('sync_snsaccount', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('facebook_access_token', self.gf('django.db.models.fields.CharField')(max_length=255)),
('twitter_access_key', self.gf('django.db.models.fields.CharField')(max_length=255)),
('twitter_access_secret', self.gf('django.db.models.fields.CharField')(max_length=255)),
('except_twitter_clients', self.gf('django.db.models.fields.CharField')(max_length=255)),
('start_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('create_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('update_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, auto_now_add=True, blank=True)),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('sync', ['SnsAccount'])
# Adding model 'SyncedTweet'
db.create_table('sync_syncedtweet', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sync.SnsAccount'])),
('tweet', self.gf('django.db.models.fields.CharField')(unique=True, max_length=128)),
('create_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('update_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, auto_now_add=True, blank=True)),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('sync', ['SyncedTweet'])
def backwards(self, orm):
# Deleting model 'SnsAccount'
db.delete_table('sync_snsaccount')
# Deleting model 'SyncedTweet'
db.delete_table('sync_syncedtweet')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sync.snsaccount': {
'Meta': {'object_name': 'SnsAccount'},
'create_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'except_twitter_clients': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'facebook_access_token': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'start_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'twitter_access_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'twitter_access_secret': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'update_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'})
},
'sync.syncedtweet': {
'Meta': {'object_name': 'SyncedTweet'},
'create_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sync.SnsAccount']"}),
'tweet': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'update_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'})
}
}
complete_apps = ['sync'] | [
"rt.sporty@gmail.com"
] | rt.sporty@gmail.com |
f39e6e125d9adbcb718d8c4e280946557001193a | 520908cb844d4f26e06c36675a0aeecd1d428942 | /teachDRL/gan/maze_generators/aldousbroder.py | 5f483767a55e32ebb0ee25a89d96066dbd59ca77 | [
"MIT"
] | permissive | pierreosselin/teachDeepRL | 713e3b2273aafbb4adcf562f745c69854c59b808 | 51b512ce17a271a63b4d8b0780850511eb008750 | refs/heads/master | 2023-08-14T23:47:27.575306 | 2021-09-23T20:37:36 | 2021-09-23T20:37:36 | 395,716,623 | 1 | 0 | MIT | 2021-08-13T16:13:20 | 2021-08-13T16:13:18 | null | UTF-8 | Python | false | false | 3,691 | py | # https://github.com/theJollySin/mazelib/blob/master/mazelib/generate/AldousBroder.py
import abc
import numpy as np
from numpy.random import shuffle
from random import choice, randrange
from pacman import make_grid
import cv2
class MazeGenAlgo:
__metaclass__ = abc.ABCMeta
def __init__(self, size=(17, 17)):
h, w = size
assert (w >= 3 and h >= 3), 'Mazes cannot be smaller than 3x3.'
self.h = int((h+1)/2)
self.w = int((w+1)/2)
self.H = h
self.W = w
@abc.abstractmethod
def generate(self):
return None
""" All of the methods below this are helper methods,
common to many maze-generating algorithms.
"""
def _find_neighbors(self, r, c, grid, is_wall=False):
""" Find all the grid neighbors of the current position; visited, or not.
Args:
r (int): row of cell of interest
c (int): column of cell of interest
grid (np.array): 2D maze grid
is_wall (bool): Are we looking for neighbors that are walls, or open cells?
Returns:
list: all neighboring cells that match our request
"""
ns = []
if r > 1 and grid[r - 2][c] == is_wall:
ns.append((r - 2, c))
if r < self.H - 2 and grid[r + 2][c] == is_wall:
ns.append((r + 2, c))
if c > 1 and grid[r][c - 2] == is_wall:
ns.append((r, c - 2))
if c < self.W - 2 and grid[r][c + 2] == is_wall:
ns.append((r, c + 2))
shuffle(ns)
return ns
class AldousBroderMazeGenerator(MazeGenAlgo):
"""
1. Choose a random cell.
2. Choose a random neighbor of the current cell and visit it. If the neighbor has not
yet been visited, add the traveled edge to the spanning tree.
3. Repeat step 2 until all cells have been visited.
"""
def __init__(self, size):
super(AldousBroderMazeGenerator, self).__init__(size)
def generate_maze(self):
""" highest-level method that implements the maze-generating algorithm
Returns:
np.array: returned matrix
"""
# create empty grid, with walls
grid = np.empty((self.H, self.W), dtype=np.int8)
grid.fill(0)
crow = randrange(0, self.H, 2)
ccol = randrange(0, self.W, 2)
grid[crow][ccol] = 1 #Free space
num_visited = 1
while num_visited < self.h * self.w:
# find neighbors
neighbors = self._find_neighbors(crow, ccol, grid, 0)
# how many neighbors have already been visited?
if len(neighbors) == 0:
# mark random neighbor as current
(crow, ccol) = choice(self._find_neighbors(crow, ccol, grid, 1))
continue
# loop through neighbors
for nrow, ncol in neighbors:
if grid[nrow][ncol] == 0:
# open up wall to new neighbor
grid[(nrow + crow) // 2][(ncol + ccol) // 2] = 1
# mark neighbor as visited
grid[nrow][ncol] = 1
# bump the number visited
num_visited += 1
# current becomes new neighbor
crow = nrow
ccol = ncol
# break loop
break
return grid
if __name__ == '__main__':
p = AldousBroderMazeGenerator(size=(32, 32))
imgs = [p.generate_maze() for i in range(16)]
imgs_grid = make_grid(imgs)
cv2.imwrite(f'/Users/suny/Desktop/Uni/DPhil/Projects/RL/mazegan/data/pacman-mazes/mazes_aldous_broder.png', imgs_grid*255) | [
"pierre.osselin@gmail.com"
] | pierre.osselin@gmail.com |
5d22f877f65695e9ad850b005e38abd8a474084c | 15aa25f9d93cd2663a59f2e92e58fbb9624d323f | /backend/core/__init__.py | 5e09a1f8bca1ce1a6ca4966dc543dd903d3f4f68 | [] | no_license | chdsbd/vscode-python-ls-reproduction | 311a9febdef11cf9871ad72b8573f38f17ce09aa | 665f6f282dbbe32000065d4cc94af8429cbf1b56 | refs/heads/master | 2020-05-15T08:05:06.870189 | 2019-04-18T20:27:04 | 2019-04-18T20:27:04 | 182,152,958 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 36 | py | def cli():
print('hello world')
| [
"chris@dignam.xyz"
] | chris@dignam.xyz |
e8107a19177723d616b7a18b7ffeb70bc030232e | 5aeace80063fdef57e7594ff5aa3f8c56b662042 | /todoism/apis/v1/auth.py | c496c01358bbab60f4e2b697f38d0a8069046d73 | [
"MIT"
] | permissive | TrumpUSA/todoism | 4491db32a78559b3f41f633ddbc1ea79c6f56b5c | dffda90e9a6f2ef04eecb0c068f61cfa2a955731 | refs/heads/master | 2020-04-11T03:53:30.058288 | 2019-03-23T07:09:08 | 2019-03-23T07:09:08 | 161,493,223 | 0 | 0 | MIT | 2018-12-12T13:35:21 | 2018-12-12T13:35:20 | null | UTF-8 | Python | false | false | 2,073 | py | from functools import wraps
from flask import g, current_app, request
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired
from todoism.apis.v1.errors import api_abort, invalid_token, token_missing
from todoism.models import User
def generate_token(user):
expiration = 3600
s = Serializer(current_app.config['SECRET_KEY'], expires_in=expiration)
token = s.dumps({'id': user.id}).decode('ascii')
return token, expiration
def validate_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except (BadSignature, SignatureExpired):
return False
user = User.query.get(data['id'])
if user is None:
return False
g.current_user = user
return True
def get_token():
# Flask/Werkzeug do not recognize any authentication types
# other than Basic or Digest, so here we parse the header by hand.
if 'Authorization' in request.headers:
try:
token_type, token = request.headers['Authorization'].split(None, 1)
except ValueError:
# The Authorization header is either empty or has no token
token_type = token = None
else:
token_type = token = None
return token_type, token
def auth_required(f):
@wraps(f)
def decorated(*args, **kwargs):
token_type, token = get_token()
# Flask normally handles OPTIONS requests on its own, but in the
# case it is configured to forward those to the application, we
# need to ignore authentication headers and let the request through
# to avoid unwanted interactions with CORS.
if request.method != 'OPTIONS':
if token_type is None or token_type.lower() != 'bearer':
return api_abort(400, 'The token type must be bearer.')
if token is None:
return token_missing()
if not validate_token(token):
return invalid_token()
return f(*args, **kwargs)
return decorated
| [
"withlihui@gmail.com"
] | withlihui@gmail.com |
858b628115ecbb92aca52d08a4a9c8a4267b07a4 | 0397b7112b0915f8b7a9d32d07853ce14980d1de | /pagerank.py | 76429d16cf907e0c181e84b44073b12e56be597f | [] | no_license | Sulemanovaaa/info_search | b7b4c75a3b23126b9942560a125b982ad9006ee0 | b5c8359ea963f01b7dbd9bd0a70adffa0f6bf431 | refs/heads/master | 2020-04-23T14:46:01.410728 | 2019-04-08T09:28:35 | 2019-04-08T09:28:35 | 171,243,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | from crawler import load_redirect_map
import operator
def pagerank(answers):
redirect = load_redirect_map()
d = 0.25
N = len(answers)
pageranks = dict()
for page in answers:
summa = 0
for links in redirect:
for item in redirect.get(links):
if item == page:
summa += (1/N)/len(redirect.get(links))
pr = ((1-d)/N)+d*summa
pageranks.update({page: pr})
pageranks = sorted(pageranks.items(), key=operator.itemgetter(1), reverse=True)
return pageranks
| [
"sulemanovaaa@icloud.com"
] | sulemanovaaa@icloud.com |
e903f2aeb74765f6e935dbaf8f926ae38e99a4d9 | 5536a6e94d2761697e16db65d7e69f1930fcd7f2 | /cart/cart.py | 9ff74b331e64c023b5a8cd8cc9400c88b2626712 | [] | no_license | morshedmasud/e_commerse | 6b8614c5e8759eceb5125dc1ab5ee6a14a852b2a | 58bf694f9fea141ec9a0b8de52063f6318ce5f73 | refs/heads/master | 2020-04-01T14:40:26.055994 | 2019-05-11T17:00:28 | 2019-05-11T17:00:28 | 153,303,417 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,251 | py | from decimal import Decimal
from django.conf import settings
from product.models import Products
class Cart(object):
# Initialize the cart
def __init__(self, request):
self.session = request.session
cart = self.session.get(settings.CART_SESSION_ID)
if not cart:
# save an empty cart in the session
cart = self.session[settings.CART_SESSION_ID] = {}
self.cart = cart
def add(self, product, quentity=1, update_quantity=False):
# add a product to the cart or update it's quantity.
product_id = str(product.id)
if product_id not in self.cart:
self.cart[product_id] = {
'quantity': 0,
'price': str(product.price)
}
if update_quantity:
self.cart[product_id]['quantity'] = quentity
else:
self.cart[product_id]['quantity'] += quentity
self.save()
def remove(self, product):
product_id = str(product.id)
if product_id in self.cart:
del self.cart[product_id]
self.save()
def save(self):
# update the session cart
self.session[settings.CART_SESSION_ID] = self.cart
# mark the session as 'modified' to make sure it is saved
self.session.modfied = True
def __iter__(self):
"""
Iterate over the items in the cart and get the products from the database
"""
product_ids = self.cart.keys()
# get the product objects and add them to the cart
products = Products.objects.filter(id__in=product_ids)
for product in products:
self.cart[str(product.id)]['product'] = product
for item in self.cart.values():
item['price'] = Decimal(item['price'])
item['total_price'] = item['price'] * item['quantity']
yield item
def __len__(self):
# count all item in the cart
return sum(item['quantity'] for item in self.cart.values())
def clear(self):
self.session[settings.CART_SESSION_ID] = {}
self.session.modfied = True
def get_total_price(self):
return sum(Decimal(item['price']) * item['quantity'] for item in self.cart.values()) | [
"masudraj6@gmail.com"
] | masudraj6@gmail.com |
52c0d97eb821a1b0157be50def2a1d1c46208948 | 88022686e05595ea2f22bc74617202ff4a214f65 | /stories/views.py | 600bbc6903193f931fe23fb3aab7b2590a4492fb | [] | no_license | mohamedAbdElhameed/ricardo | 22fbda1f94290166b8e24a8d1de6d4269ded4b2b | b3258b1080e23b4f87d6af5f37daf50e70d3e87e | refs/heads/master | 2022-12-15T06:25:26.906532 | 2020-05-23T02:37:39 | 2020-05-23T02:37:39 | 155,744,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | from django.shortcuts import render
# Create your views here.
from products.models import Category
from stories.models import ArtisanMaster, Tale
from userprofile.forms import SignUpForm, LoginForm
def masters_view(request):
masters = ArtisanMaster.objects.all()
categories = Category.objects.all()
sign_up_form = SignUpForm()
sign_in_form = LoginForm()
context = {
'categories': categories,
'masters': masters,
'sign_up_form': sign_up_form,
'sign_in_form': sign_in_form,
}
return render(request, 'stories/artisan_master.html', context)
def tales_view(request):
tales = Tale.objects.all()
categories = Category.objects.all()
sign_up_form = SignUpForm()
sign_in_form = LoginForm()
context = {
'categories': categories,
'tales': tales,
'sign_up_form': sign_up_form,
'sign_in_form': sign_in_form,
}
return render(request, 'stories/tales.html', context)
| [
"mohamedabdelhameed34@gmail.com"
] | mohamedabdelhameed34@gmail.com |
10cd9918c5d0e08e3736f3197fb1f3ffb2ad3ec3 | 5bbfb88f5e6d5af4ffdd424c72713ee8e9fb7073 | /NewProj/env/app/flask_rest_app.py | 7a74292804b7d999570e31e9340489987965f558 | [] | no_license | Icode4passion/myPytho2code | 7e958a7c2acd1495410c8d3ba56aa1bde938b168 | ac7ac954c4fb617451713be5ad60a6e08ce74c31 | refs/heads/master | 2020-03-25T10:51:06.723836 | 2018-08-06T09:43:46 | 2018-08-06T09:43:46 | 143,708,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | from flask import Flask
from flask_restplus import Resource , Api
app = Flask('__name__')
api = Api(app)
@api.route('/hello')
class HelloWorld(Resource):
"""docstring for HelloWorld"""
def get(self):
return {'hello':'world'}
if __name__ == '__main__':
app.run(debug = True)
| [
"yogeerama@gmail.com"
] | yogeerama@gmail.com |
e93039f1ea685d28128320151b61e83c0f7ec59e | 0a900c26f163cbcb592b94a88ced63cd751f6f74 | /projects/remstat/rc_remstat.py | 59fc320d8c0a6195c268027011bb35307b774c43 | [] | no_license | choco1911/try_py | b56e7818498b848cbb4f79a6f434c00597041f91 | 5ca2960402d28cc30ffbefb420032450a8d3e4a0 | refs/heads/master | 2021-01-12T05:35:49.177898 | 2017-02-09T16:45:17 | 2017-02-09T16:45:17 | 77,140,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,880 | py | #!/usr/bin/env python
import urllib2 as u
import re
import operator
servs = dict()
web_page = 'http://mailmon2.rambler.ru/cgi-bin/problem.cgi?report=hdd_smart_attributes&attr=5'
def getHtml(urll):
req = u.Request(urll)
html = u.urlopen(req).read()
#print('Get data from site')
return html.split('\n')
def parseHtml(htmlString):
at = re.compile(">(.*?)<")
li = re.compile("<a href=\"([^\"]+)\">")
for data in htmlString :
if data.find('<a') > 0 :
serv_attr = at.finditer(data)
serv_links = li.findall(data)
count = 0
for attr in serv_attr :
if len(attr.group(1))> 0 :
if count == 0 :
servName = serv_links[1]
servs[servName] = []
servs[servName].append(attr.group(1))
else :
servs[servName].append(attr.group(1))
count += 1
servs[servName].append(serv_links)
return servs
def getDiskinfo(server,disk):
servs = dict()
url = "http://mailmon2.rambler.ru/cgi-bin/hddsmart.cgi?host={0}&hdd={1}".format(server, disk)
req = u.Request(url)
html1 = u.urlopen(req).read()
shtml=html1.split('\n')
list_attr = []
for data in shtml :
if data.find('<td') > 0 :
#hdd_attr = re.finditer(r'<span class=\"(?:var|bar-value|unit)\">(?!Host:)(.*?)<', data)
hdd_attr = re.finditer(r'(?:<span class=\"(?:var|bar-value)\"|[^/]span+?)>(?!Host:)([^<].+?)</', data)
for item in hdd_attr :
list_attr.append(item.group(1))
# IF include last updated
# for num,item in enumerate(list_attr[:6] + list_attr[8:10] + list_attr[-2:]):
for num,item in enumerate(list_attr[:6] + list_attr[8:10]):
if item == 'RRDs:' : continue
# if 'span' in item : item = item[:item.index('<')] +" "+ item[item.rindex('>')+1:]
if 'span' in item : item = item[:item.index('<')] + item[item.rindex('>')+1:]
if num % 2 == 0:
title = item
else:
value = item
if 'value' in locals():
yield title, value
def exServer(domain):
excList=['corvus','search','mon','piclist','netmon','mailmon']
for exclude in excList:
if domain.startswith(exclude): return None
return domain
res = dict()
for server in parseHtml(getHtml(web_page)) :
if int(servs[server][11]) > 0 :
res[servs[server][0],servs[server][1]] = servs[server][11]
sorted_x = sorted(res, key=lambda i: int(res[i]), reverse=True)
ccc=1
for it in sorted_x[:10] :
#for it in sorted_x :
serv,hdd = it
serv = exServer(serv)
if serv :
print ccc,serv,hdd,res[it]
ccc += 1
for t,l in getDiskinfo(serv,hdd) :
print " " * 4, t,l
| [
"choco@rambler-co.ru"
] | choco@rambler-co.ru |
d34bfba5821dcb55b1e8f2cbfc59752953f18a40 | d15f7e7616d54576dee9b9f235e9d5b68f0dbe79 | /tests/granular/test_words.py | c4a962ec5eb9fa2c27d957bfaa4b79cbd6383c2f | [
"Apache-2.0"
] | permissive | MANISH007700/nlp_profiler | f371d59a18c751c7f2a1286ff38bc85f409a7cd3 | a610f0e49b2c7b3a0ed2d0d16d04c86b48a9872c | refs/heads/master | 2023-01-19T01:26:10.310960 | 2020-11-27T02:13:13 | 2020-11-27T02:17:16 | 316,686,101 | 1 | 0 | NOASSERTION | 2020-11-28T08:06:08 | 2020-11-28T08:06:08 | null | UTF-8 | Python | false | false | 1,783 | py | import numpy as np
import pytest
from nlp_profiler.constants import NaN
from nlp_profiler.granular_features.words \
import gather_words, count_words # noqa
text_with_a_number = '2833047 people live in this area'
text_to_return_value_mapping = [
(np.nan, []),
(float('nan'), []),
(None, []),
]
@pytest.mark.parametrize("text,expected_result",
text_to_return_value_mapping)
def test_given_invalid_text_when_parsed_then_return_empty_list(
text: str, expected_result: str
):
# given, when
actual_result = gather_words(text)
# then
assert expected_result == actual_result, \
f"Expected: {expected_result}, Actual: {actual_result}"
text_to_return_count_mapping = [
(np.nan, NaN),
(float('nan'), NaN),
(None, NaN),
]
@pytest.mark.parametrize("text,expected_result",
text_to_return_count_mapping)
def test_given_invalid_text_when_counted_then_return_NaN(
text: str, expected_result: float
):
# given, when
actual_result = count_words(text)
# then
assert expected_result is actual_result, \
f"Expected: {expected_result}, Actual: {actual_result}"
def test_given_a_text_with_words_when_parsed_then_return_only_the_words():
# given
expected_results = ['people', 'live', 'in', 'this', 'area']
# when
actual_results = gather_words(text_with_a_number)
# then
assert expected_results == actual_results, \
"Didn't find the expected words in the text"
def test_given_a_text_with_words_when_counted_then_return_count_of_words():
# given, when
actual_results = count_words(text_with_a_number)
# then
assert actual_results == 5, \
"Didn't find the expected number of words in the text"
| [
"sadhak001@gmail.com"
] | sadhak001@gmail.com |
ff8fe1e7b0adadab747445b080980151c55c552a | b8a25f07753481521c8aa0f9217aa1b8112a91a2 | /srv/asgi.py | 42d80d0047499a53feebfa61b890fa1cf3803c30 | [] | no_license | rudGess/web1c | f55e30bed0d223e18d8d6203a451af3cf8c72294 | 5c8b73082217dea3b88165212bdec3549c6fe2e3 | refs/heads/master | 2023-08-03T22:57:01.500631 | 2021-09-24T17:38:29 | 2021-09-24T17:38:29 | 410,048,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | """
ASGI config for srv project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from channels.routing import ProtocolTypeRouter, URLRouter
from django.core.asgi import get_asgi_application
import echo.routing
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'srv.settings')
application = ProtocolTypeRouter({
"http":get_asgi_application(),
"websocket":URLRouter(echo.routing.websocket_urlpatterns)
}) | [
"Frenki4palica@yandex.ru"
] | Frenki4palica@yandex.ru |
608f1b5f7a4e0584bda407870c0aeab19bfe99eb | 255de81bb35ca27b5362b7c872132aa58a87a29b | /Predictor-LinearRegressionOneVar.py | bd48819dd953267b1037a2619801be332397b321 | [] | no_license | etowusu/LinearRegressionOneVar | 7411f31e32673cb8a07f2e7ea802e76c55c5e301 | c47479a52919a0f30417a1cd8926053016cd14c5 | refs/heads/master | 2020-09-04T16:49:27.930932 | 2019-11-06T21:46:58 | 2019-11-06T21:46:58 | 219,809,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | def predict(X,theta):
"""
Takes in numpy array of x and theta and return the predicted value of y based on theta
"""
predictions= np.dot(theta.transpose(),X)
return predictions[0] | [
"etowusu@gmail.com"
] | etowusu@gmail.com |
ff47455f97a12eb18764b2e2dd20cfd0ae263b0e | 9e5608843fd165cfdd02d1b3214c1244ec3cfeae | /test/test_core.py | 06383f8f3bb2dc7683fe72c50892fd1966c66eb7 | [
"BSD-2-Clause"
] | permissive | OliverPSZ/lamprop | 3ba4b046ecc04a0f46aed0306903990c5f896a25 | b8ba07913448c269b7ba8ce11a198845552d934f | refs/heads/master | 2022-04-09T04:33:08.441607 | 2020-03-13T20:30:30 | 2020-03-13T20:30:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,183 | py | # file: test_types.py
# vim:fileencoding=utf-8:ft=python:fdm=marker
#
# Author: R.F. Smith <rsmith@xs4all.nl>
# Created: 2015-04-05 23:36:32 +0200
# Last modified: 2019-01-01T02:40:22+0100
"""Test for lamprop types"""
import sys
# Inserting the path is needed to make sure that the module here is loaded,
# not an installed version!
sys.path.insert(1, '.')
from lamprop.core import fiber, resin, lamina, laminate # noqa
hf = fiber(233000, 0.2, -0.54e-6, 1.76, "Hyer's carbon fiber")
hr = resin(4620, 0.36, 41.4e-6, 1.1, "Hyer's resin")
def test_lamina(): # {{{1
f = fiber(230000, 0.30, -0.41e-6, 1.76, 'T300')
r = resin(2900, 0.36, 41.4e-6, 1.15, 'Epikote04908')
la = lamina(f, r, 100, 0, 0.5)
assert ((la.E1, la.E2, la.G12, la.ν12, la.αx, la.αy, la.ρ) ==
(116450.0, 5800, 2900.0, 0.3, 1.1060541004723054e-07, 4.14e-05,
1.455))
assert ((la.Q̅11, la.Q̅12, la.Q̅16, la.Q̅22, la.Q̅26, la.Q̅66) ==
(116974.35045890552, 1747.8348630184253, 0.0, 5826.116210061417, 0.0, 2900.0))
def test_ud(): # {{{1
la = lamina(hf, hr, 100, 0, 0.5)
ud = laminate('ud', [la, la, la, la])
assert 0.45 < ud.thickness < 0.46
assert 1.42 < ud.ρ < 1.44
assert ud.vf == 0.5
assert 0.614 < ud.wf < 0.616
assert 118800 < ud.Ex < 118820
assert 9230 < ud.Ey < 9250
assert 4610 < ud.Gxy < 4630
assert 0.29 < ud.νxy < 0.31
assert 0.022 < ud.νyx < 0.024
assert 2.75e-7 < ud.αx < 2.76e-07
assert 4.13e-5 < ud.αy < 4.15e-5
def test_plain_weave(): # {{{1
A = lamina(hf, hr, 100, 0, 0.5)
B = lamina(hf, hr, 100, 90, 0.5)
pw = laminate('pw', [A, B, B, A])
assert 0.45 < pw.thickness < 0.46
assert 1.42 < pw.ρ < 1.44
assert pw.vf == 0.5
assert 0.614 < pw.wf < 0.616
assert 64345 < pw.Ex < 64365
assert 64345 < pw.Ey < 64365
assert 4610 < pw.Gxy < 4630
assert 0.042 < pw.νxy < 0.044
assert 0.042 < pw.νyx < 0.044
assert 3.963e-06 < pw.αx < 3.983e-06
assert 3.963e-06 < pw.αy < 3.983e-06
def test_pm45(): # {{{1
A = lamina(hf, hr, 100, 45, 0.5)
B = lamina(hf, hr, 100, -45, 0.5)
pw = laminate('pw', [A, B, B, A])
assert 0.45 < pw.thickness < 0.46
assert 1.42 < pw.ρ < 1.44
assert pw.vf == 0.5
assert 0.614 < pw.wf < 0.616
assert 16238 < pw.Ex < 16258
assert 16238 < pw.Ey < 16258
assert 30832 < pw.Gxy < 30852
assert 0.75836 < pw.νxy < 0.75866
assert 0.75836 < pw.νyx < 0.75866
assert 3.963e-06 < pw.αx < 3.983e-06
assert 3.963e-06 < pw.αy < 3.983e-06
def test_qi(): # {{{1
A = lamina(hf, hr, 200, 0, 0.5)
B = lamina(hf, hr, 200, 90, 0.5)
C = lamina(hf, hr, 100, 45, 0.5)
D = lamina(hf, hr, 100, -45, 0.5)
qi = laminate('qi', [A, B, C, D, D, C, B, A])
assert 1.35 < qi.thickness < 1.37
assert 1.42 < qi.ρ < 1.44
assert qi.vf == 0.5
assert 0.614 < qi.wf < 0.616
assert 53339 < qi.Ex < 53359
assert 53339 < qi.Ey < 53359
assert 13351 < qi.Gxy < 13371
assert 0.20591 < qi.νxy < 0.20791
assert 0.20591 < qi.νyx < 0.20791
assert 3.963e-06 < qi.αx < 3.983e-06
assert 3.963e-06 < qi.αy < 3.983e-06
| [
"rsmith@xs4all.nl"
] | rsmith@xs4all.nl |
2da7c30eaed45f2b40980583085df5d884f197a3 | bb527b98ffe5c458608059087b29b2e1ff04645c | /crossword/urls.py | 28f45260619fc0eeee556835b6a55313968355b4 | [] | no_license | adecker89/Collaborative-Crossword | cc2a1784d4cc6a51ad4b1171233d458ef05c1f7d | 3849d6a7da277b191d1538002e047a036d786ccb | refs/heads/master | 2021-01-01T05:34:24.456932 | 2012-04-11T03:22:37 | 2012-04-11T03:22:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'crossword.views.home', name='home'),
# url(r'^crossword/', include('crossword.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
url(r'', include('django_socketio.urls')),
url(r'', include('app.urls')),
)
| [
"adecker89@gmail.com"
] | adecker89@gmail.com |
67d6e19618ccf25a7d4121d48598dcff95796dfe | 48be040a96ee190c9c473e7e4b02d1f4253a2d14 | /gromos2amber/__init__.py | 79fc6ec847e2745bae28a37dc14b458ac3efd6ec | [
"MIT"
] | permissive | ATB-UQ/gromos2amber | 22c6944e512590db00d49770e9048efa89d02ef8 | afe2112b28ec47a863607da514f9e8832cc426f0 | refs/heads/master | 2023-06-24T13:56:37.723640 | 2023-06-06T23:32:34 | 2023-06-06T23:32:34 | 196,283,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py |
from .Converter import convert
from .Errors import GromosFormatError, IllegalArgumentError
| [
"contact@tomlee.com.au"
] | contact@tomlee.com.au |
11e2847e92b4b026669f4204ab265c94131f3bea | b3281a2988342ab5ee2b968b2ded686cf9c09c9b | /ckanext-ceh-comment/ckanext/ceh_comment/commands/command.py | 8bb4b0c8bc07bf22313b1178893ce8f95fc4b4d4 | [] | no_license | glujan04/ceh_ckan | 0516d0f5fe927f550443887debdd39e9e5f1e546 | 2ebb08c83de077daa194e613decc5be40f7eb882 | refs/heads/master | 2022-11-27T13:30:26.654388 | 2020-08-11T19:46:22 | 2020-08-11T19:46:22 | 286,791,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,720 | py | from __future__ import print_function
import sys
from pprint import pprint
from ckan import model
from ckan.logic import get_action, ValidationError
from ckan.plugins import toolkit
from ckan.lib.cli import CkanCommand
class CehComment(CkanCommand):
'''CehComment remotely mastered metadata
Usage:
cehcomment initdb
- Creates the necessary tables in the database
cehcomment cleandb
- Remove the tables in the database
The command should be run from the ckanext-ceh-comment directory and expect
a development.ini file to be present. Most of the time you will
specify the config explicitly though::
paster cehcomment [command] --config=../ckan/development.ini
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 2
min_args = 0
def command(self):
self._load_config()
context = {'model': model, 'session': model.Session, 'ignore_auth': True}
self.admin_user = get_action('get_site_user')(context, {})
print('')
if len(self.args) == 0:
self.parser.print_usage()
sys.exit(1)
cmd = self.args[0]
if cmd == 'initdb':
self.initdb()
elif cmd == 'cleandb':
self.cleandb()
else:
print('Command {0} not recognized'.format(cmd))
def _load_config(self):
super(CehComment, self)._load_config()
def initdb(self):
from ckanext.ceh_comment.model import init_db as db_setup
db_setup()
print('DB tables created')
def cleandb(self):
from ckanext.ceh_comment.model import clean_db as db_remove
db_remove()
print('DB tables removed') | [
"glujan04@github.com"
] | glujan04@github.com |
50abf53e3a15ea6328179df1801559cc195cd590 | f70d5a5f3147d49ba046388057da5baf42997783 | /stackdjango/urls.py | ce998155c057224ee9779640148aed5a05e13a6b | [
"MIT"
] | permissive | arathijagdish/stackdjango | 76a2c3b75e5d52d91aeb256f957b0b1ee4d953e4 | 2064459fcbccc4069660cf23d68c7ca26aea110c | refs/heads/master | 2022-12-25T20:45:33.368843 | 2020-10-09T11:50:17 | 2020-10-09T11:50:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 753 | py | """stackdjango URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"mishelvettukattil@outlook.com"
] | mishelvettukattil@outlook.com |
ab814df1e19d524f90b6ec14783658f970b2b78f | 0ecc632414dac4f698330a2ff00cd210dfd34297 | /apps/CMDB/migrations/0037_auto_20181219_2035.py | 8eaf7e3939599baec47042a78e134ff1114a8014 | [] | no_license | qiuwenhuifx/roe | 0a6acd9356d65a50acb473012319fbc115aaa791 | 688729480f1d047f7eab9ffaf316d2d56046111d | refs/heads/master | 2021-07-13T03:15:00.266986 | 2020-05-31T13:12:01 | 2020-05-31T13:12:01 | 148,592,094 | 1 | 0 | null | 2020-05-31T13:12:02 | 2018-09-13T06:27:36 | JavaScript | UTF-8 | Python | false | false | 443 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-12-19 12:35
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('CMDB', '0036_auto_20181213_1404'),
]
operations = [
migrations.AlterUniqueTogether(
name='networkcard_assets',
unique_together=set([('host', 'macaddress', 'ip')]),
),
]
| [
"flc009@163.com"
] | flc009@163.com |
33c6d15a089071143f72c473f9eaddb43f0a52c9 | 3b92adca223a7b5f8df881a3cc6c0f717c6536f2 | /crm/urls.py | 5358c26a296b5f3541999665f79be4c2c92f8fff | [] | no_license | archdim1/crm_smartbuild | aefa28a6489cfafa3a0fe65807972bcd5e58a085 | ab5f3bbe92a36184383d04e00da9ed35f2a9175a | refs/heads/main | 2023-07-22T03:36:12.486853 | 2021-08-24T15:33:47 | 2021-08-24T15:33:47 | 398,759,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,821 | py | from django.urls import path
from .views import \
CompanyDetailView, \
CompanyCreateView, \
CompanyDeleteView, \
CompanyUpdateView, \
CompanyProjectsDetailView, \
ProjectDetailView, \
ProjectDeleteView, \
ProjectUpdateView, \
CompanyProjectsNotStartedDetailView, \
CompanyProjectsInProcessDetailView, \
CompanyProjectsCompletedDetailView, \
ProjectInteractionsDetailView, \
CompanyProjectInteractionsDetailView, \
CompanyInteractionsPhonesDetailView, \
CompanyInteractionsEmailDetailView, \
CompanyInteractionsMessengerDetailView, \
ProjectInteractionsPhonesDetailView, \
ProjectInteractionsEmailDetailView, \
ProjectInteractionsMessengerDetailView
urlpatterns = [
path('create/', CompanyCreateView.as_view(), name='company_create'),
path('<int:pk>/', CompanyDetailView.as_view(), name='company-detail'),
path('<int:pk>/projects/', CompanyProjectsDetailView.as_view(), name='projects'),
path('<int:pk>/interactions/', CompanyProjectInteractionsDetailView.as_view(), name='company_interactions'),
path('<int:pk>/interactions_phones', CompanyInteractionsPhonesDetailView.as_view(),
name='company_interactions_phones'),
path('<int:pk>/interactions_email', CompanyInteractionsEmailDetailView.as_view(),
name='company_interactions_email'),
path('<int:pk>/interactions_messenger', CompanyInteractionsMessengerDetailView.as_view(),
name='company_interactions_messenger'),
path('<int:pk>/projects_not_started/', CompanyProjectsNotStartedDetailView.as_view(), name='projects_not_started'),
path('<int:pk>/projects_in_process/', CompanyProjectsInProcessDetailView.as_view(), name='projects_in_process'),
path('<int:pk>/completed/', CompanyProjectsCompletedDetailView.as_view(), name='projects_completed'),
path('<int:pk>/delete/', CompanyDeleteView.as_view(), name='company_delete'),
path('<int:pk>/update/', CompanyUpdateView.as_view(), name='company_update'),
path('project/<int:pk>/', ProjectDetailView.as_view(), name='project-detail'),
path('project/<int:pk>/interactions/', ProjectInteractionsDetailView.as_view(), name='project_interactions'),
path('project/<int:pk>/interactions_phones/', ProjectInteractionsPhonesDetailView.as_view(),
name='project_interactions_phones'),
path('project/<int:pk>/interactions_email/', ProjectInteractionsEmailDetailView.as_view(),
name='project_interactions_phones'),
path('project/<int:pk>/interactions_messenger/', ProjectInteractionsMessengerDetailView.as_view(),
name='project_interactions_phones'),
path('project/<int:pk>/delete/', ProjectDeleteView.as_view(), name='project_delete'),
path('project/<int:pk>/update/', ProjectUpdateView.as_view(), name='project_update'),
]
| [
"archibelousov@gmail.com"
] | archibelousov@gmail.com |
96f95611c168147d5b4d65814ff89772133e9dad | b79818001c10fc26125496077ff8d74856570ea4 | /auth/tests/models.py | c864fce217fd25839a5ea73a6ce56e1af85bfa0f | [] | no_license | nexascale/nexathan | eb9ffdf8ad26db2f5017e8bd4317c8d4bf4df40a | 22ba52092ff3598f790ff3284104ad1f0ca692de | refs/heads/master | 2021-01-17T05:34:04.911657 | 2011-05-27T20:45:55 | 2011-05-27T20:45:55 | 1,660,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,487 | py | from django.conf import settings
from django.test import TestCase
from nexathan.auth.models import User, SiteProfileNotAvailable
class ProfileTestCase(TestCase):
fixtures = ['authtestdata.json']
def setUp(self):
"""Backs up the AUTH_PROFILE_MODULE"""
self.old_AUTH_PROFILE_MODULE = getattr(settings,
'AUTH_PROFILE_MODULE', None)
def tearDown(self):
"""Restores the AUTH_PROFILE_MODULE -- if it was not set it is deleted,
otherwise the old value is restored"""
if self.old_AUTH_PROFILE_MODULE is None and \
hasattr(settings, 'AUTH_PROFILE_MODULE'):
del settings.AUTH_PROFILE_MODULE
if self.old_AUTH_PROFILE_MODULE is not None:
settings.AUTH_PROFILE_MODULE = self.old_AUTH_PROFILE_MODULE
def test_site_profile_not_available(self):
# calling get_profile without AUTH_PROFILE_MODULE set
if hasattr(settings, 'AUTH_PROFILE_MODULE'):
del settings.AUTH_PROFILE_MODULE
user = User.objects.get(username='testclient')
self.assertRaises(SiteProfileNotAvailable, user.get_profile)
# Bad syntax in AUTH_PROFILE_MODULE:
settings.AUTH_PROFILE_MODULE = 'foobar'
self.assertRaises(SiteProfileNotAvailable, user.get_profile)
# module that doesn't exist
settings.AUTH_PROFILE_MODULE = 'foo.bar'
self.assertRaises(SiteProfileNotAvailable, user.get_profile)
| [
"nexascale@gmail.com"
] | nexascale@gmail.com |
731f3151c859fbee1a2b02c34ae093ac8b5e7150 | 249bf0a1b3b6368c851234b260abc6bfee58d4b1 | /game_deplacement.py | 40a697e7413eff235fac0bd2fef6e6d2d7049588 | [] | no_license | urvd/2048 | e3b60566a8adffa4414343be16b545f060b23b53 | 5a7cb7e0c33b7e2257fac70dcd4e1fba60d51787 | refs/heads/main | 2023-02-21T12:06:30.198281 | 2021-01-27T22:40:55 | 2021-01-27T22:40:55 | 302,426,467 | 0 | 0 | null | 2021-01-07T09:09:19 | 2020-10-08T18:18:50 | Python | UTF-8 | Python | false | false | 6,670 | py | import math
#action_left_limit
from copy import copy
from case_de_tableaux import Case
def show(states, len):
for col in range(len):
res = ''
for row in range(len):
if (row == len - 1):
res += '| ' + str(states[(row, col)]) + ' |'
else:
res += '| ' + str(states[(row, col)]) + ' '
print(res + '\n')
# get first empty state of line or column
def first_state_emptyY(current,indiceFixe,taille, inverse = False):
if not inverse:
for var in range(0, taille):
if current[(indiceFixe, var)] == 0:
return Case(indiceFixe, var, current[(indiceFixe, var)])
else:
for var in reversed(range(0, taille)):
if current[(indiceFixe, var)] == 0:
return Case(indiceFixe, var, current[(indiceFixe, var)])
return None
def first_state_emptyX(current,indiceFixe,taille, inverse = False):
if not inverse:
for var in range(0, taille):
if current[(var, indiceFixe)] == 0:
return Case(var, indiceFixe, current[(var, indiceFixe)])
else:
for var in reversed(range(0, taille)):
if current[(var, indiceFixe)] == 0:
return Case(var, indiceFixe, current[(var, indiceFixe)])
return None
# def getIndice(i, len):
# calc = len - (len - (i+1))
# return calc - 1
## Actions
class ActionImpl:
def __init__(self, current_states, lenght):
self.current_states = current_states
self.lenght = lenght
self.score = 0
self.apply_action()
self.no_mergable_case = True
def get_states(self):
return self.current_states
def get_score(self):
return self.score
def _merge_states(self):
self._merge_states()
def _decale_states(self):
self._decale_states()
def apply_action(self):
# decaler
self._decale_states()
# merger
self._merge_states()
# decaler
self._decale_states()
class UpActionImpl(ActionImpl):
def _merge_states(self):
for x in range(0, self.lenght):
for y in range(0, self.lenght - 1):
if self.current_states[(x, y)] == self.current_states[(x, y + 1)] \
and not self.current_states[(x, y)] == 0:
self.current_states[(x, y)] *= 2
self.current_states[(x, y + 1)] = 0
self.score += self.current_states[(x, y)]
self.no_mergable_case = False
# print(" > merger\n")
# show(self.current_states, self.lenght)
def _decale_states(self):
for x in range(0, self.lenght):
for y in range(self.lenght):
empty_state = first_state_emptyY(self.current_states, x, self.lenght)
if empty_state is not None and empty_state.y < y and not self.current_states[(x, y)] == 0:
self.current_states[(empty_state.x, empty_state.y)] = self.current_states[(x, y)]
self.current_states[(x, y)] = 0
# print(" > decaler\n")
# show(self.current_states, self.lenght)
class DownActionImpl(ActionImpl):
def _merge_states(self):
for x in range(0, self.lenght):
for y in reversed(range(1, self.lenght)):
if self.current_states[(x, y)] == self.current_states[(x, y - 1)] \
and not self.current_states[(x, y)] == 0:
self.current_states[(x, y)] *= 2
self.current_states[(x, y - 1)] = 0
self.score += self.current_states[(x, y)]
self.no_mergable_case = False
# print(" > merger\n")
# show(self.current_states, self.lenght)
def _decale_states(self):
for x in range(0, self.lenght):
for y in reversed(range(0, self.lenght)):
empty_state = first_state_emptyY(self.current_states, x, self.lenght, True)
if empty_state is not None and empty_state.y > y and not self.current_states[(x, y)] == 0:
self.current_states[(empty_state.x, empty_state.y)] = self.current_states[(x, y)]
self.current_states[(x, y)] = 0
# print(" > decaler\n")
# show(self.current_states, self.lenght)
class LeftActionImpl(ActionImpl):
def _merge_states(self):
for y in range(0, self.lenght):
for x in range(0, self.lenght - 1):
if self.current_states[(x, y)] == self.current_states[(x + 1, y)] \
and not self.current_states[(x, y)] == 0:
self.current_states[(x, y)] *= 2
self.current_states[(x + 1, y)] = 0
self.score += self.current_states[(x, y)]
self.no_mergable_case = False
# print(" > merger\n")
# show(self.current_states, self.lenght)
def _decale_states(self):
for y in range(0, self.lenght):
for x in range(0, self.lenght):
empty_state = first_state_emptyX(self.current_states, y, self.lenght)
if empty_state is not None and empty_state.x < x and not self.current_states[(x, y)] == 0:
self.current_states[(empty_state.x, empty_state.y)] = self.current_states[(x, y)]
self.current_states[(x, y)] = 0
# print(" > decaler\n")
# show(self.current_states, self.lenght)
class RightActionImpl(ActionImpl):
def _merge_states(self):
for y in range(0, self.lenght):
for x in reversed(range(1, self.lenght)):
if self.current_states[(x, y)] == self.current_states[(x - 1, y)] \
and not self.current_states[(x, y)] == 0:
self.current_states[(x, y)] *= 2
self.current_states[(x - 1, y)] = 0
self.score += self.current_states[(x, y)]
self.no_mergable_case = False
# print(" > merger\n")
# show(self.current_states, self.lenght)
def _decale_states(self):
for y in range(0, self.lenght):
for x in reversed(range(0, self.lenght)):
empty_state = first_state_emptyX(self.current_states, y, self.lenght, True)
if empty_state is not None and empty_state.x > x and not self.current_states[(x, y)] == 0:
self.current_states[(empty_state.x, empty_state.y)] = self.current_states[(x, y)]
self.current_states[(x, y)] = 0
# print(" > decaler\n")
# show(self.current_states, self.lenght)
| [
"uriel.vido@hotmail.fr"
] | uriel.vido@hotmail.fr |
5b79ef2e09e916e395b10697694b70a64388948c | 42c018a34285488c7bdaea525c24fae30e512415 | /model/operators.py | 2c061e750fbf6283c4c2207ae40e0444e5d50f0c | [
"MIT"
] | permissive | arita37/Zeroshot-GAN | 55f055a575988ee476d798e0fe4572113b2621cb | 1a99766c64bd6e28ff621cd04cb7d21ef6ce92db | refs/heads/master | 2022-01-07T17:10:29.829988 | 2018-04-29T09:06:38 | 2018-04-29T09:06:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,583 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import tensorflow as tf
import scipy
import cPickle
import os
import glob
import random
import imageio
import scipy.misc as misc
log_device_placement = True
allow_soft_placement = True
gpu_options = 0.9 #multi-gpu
batch_size = 50
image_shape = [28*28]
z_dim = 30 #latent space reprsentation z proposed in the paper
gf_dim = 16
df_dim = 16
lr = 0.005
beta1 = 0.5
def batch_norm(x, is_training, epsilon=1e-5, decay=0.9, scope="batch_norm"):
out = tf.contrib.layers.batch_norm(x, decay=decay, updates_collections=None, epsilon=epsilon,
scale=True, is_training=is_training, scope=scope)
return out
def conv(x, filter_size, stride_width, stride_height, feature_in, feature_out, scope="conv2d",log_device_placement=True):
with tf.variable_scope(scope):
w = tf.get_variable("w", [filter_size, filter_size, feature_in, feature_out],
initializer=tf.truncated_normal_initializer(stddev=0.02))
b = tf.get_variable("b", [feature_out], initializer=tf.constant_initializer(0.0))
conv = tf.nn.conv2d(x, w, strides=[1, stride_width, stride_height, 1], padding='SAME') + b
return conv
def deconv(x, filter_size, stride_width, stride_height, feature_out, scope="deconv2d",log_device_placement=True):
with tf.variable_scope(scope):
w = tf.get_variable("w", [filter_size, filter_size, feature_out[-1], x.get_shape()[-1]],
initializer=tf.truncated_normal_initializer(stddev=0.02))
b = tf.get_variable("b", [feature_out[-1]], initializer=tf.constant_intializer(0.0))
deconv = tf.nn.conv2d_transpose(x, w, strides=[1, stride_width, stride_height, 1], output_shape=feature_out) + b
return deconv
def leakyrelu(x, leak=0.2, name='lrelu'):
with tf.variable_scope(name):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
out = f1 * x + f2 * abs(x)
return out
def fc_layer(x, feature_in, feature_out, scope=None, with_w = False):
with tf.variable_scope(scope or "Linear"):
weights = tf.get_variable("weights", shape=[feature_in, feature_out], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.02))
bias = tf.get_variable("bias", shape=[feature_out], dtype=tf.float32,
initializer=tf.constant_initializer(0.0))
if with_w:
return tf.matmul(x, weights) + bias, weights, bias
else:
return tf.matmul(x, weights) + bias
def init_embedding(size, dimension, stddev=0.01, scope="Embedding"):
with tf.variable_scope(scope):
return tf.get_variable("E", shape=[size, 1, 1, dimension], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=stddev))
def merge(image, size):
height, width, channel = image[1], image[2], image[3]
img = np.zeros(height * size[0], width * size[1], channel)
print(img.shape)
for i, j in enumerate(image):
index = i % size[1]
jndex = j / size[2]
img[jndex*height:jndex*height + height, index*width:index*width + width] = image
#or img[jndex*height:jndex*height + height, index*width:index*width+width, :] = image
return img
def image_norm(image):
normalized = (image/127.5) - 1
return image
#def dense_batch_norm(x, number_out, phase_train, name='bn'): #BN necessary?
#beta = tf.get_variable(name + '/fc_beta', shape=[number_out], initializer=tf.constant_initializer(0.0))
#gamma = tf.get_variable(name + 'fc_gamma', shape=[number_out], initializer=tf.random_normal_initializer(mean=1.0, stddev=0.02))
#batch_mean, batch_var = tf.nn.moments(x, [0], name=name + '/fc_moments')
#ema = tf.train.ExponentialMovingAverage(decay=0.9)
#def mean_var_update():
# ema_apply_op = ema.apply([batch_mean, batch_var])
# with tf.control_dependencies(ema_apply_op):
# return tf.identity(batch_mean), tf.identity(batch_var)
#mean ,var = tf.cond(name=phase_train, mean_var_update, lambda: (ema.average(batch_mean), ema.average(batch_var)))
#normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-5)
#return normed
#def global_batch_norm(x, number_out, phase_train, name='bn'): #BN necessary?
#beta = tf.get_variable(name + '/beta', shape=[number_out], initializer=tf.constant_initializer(0.0))
#gamma = tf.get_variable(name + '/gamma', shape=[number_out], initializer=tf.random_normal_initializer(mean=1.0, stddev=0.02))
#batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name=name + '/moments')
#ema = tf.train.ExponentialMovingAverage(decay=0.9)
#def mean_var_update():
# ema_apply_op = ema.apply([batch_mean, batch_var])
# with tf.control_dependencies(ema_apply_op):
# return tf.identity(batch_mean), tf.identity(batch_var)
#mean, var = tf.cond(name=phase_train, mean_var_update, lambda: (ema.average(batch_mean), ema.average(batch_var)))
#normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-5)
#return normed
def mini_batch_dis(x, num_kernels=100, dim_kernel=5, init=False, name='MD'): #decrease mode loss
num_inputs = df_dim*4
theta = tf.get_variable(name+'/theta', [num_inputs, num_kernels, dim_kernel], initializer=tf.random_normal_initializer(stddev=0.05))
log_weight_scale = tf.get_variable(name+'/lws', [num_kernels, dim_kernel], initializer=tf.constant_initializer(0.0))
W = tf.matmul(theta, tf.expand_dims(tf.exp(log_weight_scale)/tf.sqrt(tf.reduce_sum(tf.square(theta),0)), 0))
W = tf.reshape(W,[-1, num_kernels*dim_kernel])
x = tf.reshape(x, [batch_size, num_inputs])
ac = tf.reshape(tf.matmul(x, W), [-1, num_kernels, dim_kernel])
diff = tf.matmul(tf.reduce_sum(tf.abs(tf.sub(tf.expand_dims(ac, 3), tf.expand_dims(tf.transpose(ac, [1, 2, 0]),0))), 2),
1-tf.expand_dims(tf.constant(np.eye(batch_size), dtype=np.float32), 1))
out = tf.reduce_sum(tf.exp(-diff),2) / tf.reduce_sum(tf.exp(-diff))
return tf.concat([x, diff], 1)
def conv2d(x, output_filters, kh=5, kw=5, sh=2, sw=2, stddev=0.02, scope="conv2d"):
with tf.variable_scope(scope):
shape = x.get_shape().as_list()
W = tf.get_variable('W', [kh, kw, shape[-1], output_filters],
initializer=tf.truncated_normal_initializer(stddev=stddev))
#print(W.shape) (5, 5, 3, 64)
b = tf.get_variable('b', [output_filters], initializer=tf.constant_initializer(0.0))
W_conv = tf.nn.conv2d(x, W, strides=[1, sh, sw, 1], padding='SAME')
return tf.reshape(tf.nn.bias_add(W_conv, b), W_conv.get_shape())#reshape depends
def deconv2d(x, output_shape, kh=5, kw=5, sh=2, sw=2, stddev=0.02, scope="deconv2d"):
with tf.variable_scope(scope):
input_shape = x.get_shape().as_list()
w = tf.get_variable('w', [kh, kw, output_shape[-1], input_shape[-1]],
initializer=tf.truncated_normal_initializer(stddev=stddev))
b = tf.get_variable('b', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
w_deconv = tf.nn.conv2d_transpose(x, w, output_shape=output_shape, strides=[1, sh, sw, 1])
return tf.reshape(tf.nn.bias_add(w_deconv, b), w_deconv.get_shape())
def batch_norm(x, is_training, epsilon=1e-5, decay=0.9, scope="batch_norm"):
return tf.contrib.layers.batch_norm(x, decay=decay, updates_collections=None, epsilon=epsilon,
scale=True, is_training=is_training, scope=scope)
#----------------------unit-test for conv&deconv
reader = tf.WholeFileReader()
directory = tf.train.string_input_producer(['/home/linkwong/Zeroshot-GAN/model/image.png'])
key, value = reader.read(directory)
image_tensor = tf.image.decode_png(value)
initialize = tf.global_variables_initializer()
generator_dim = 64
discriminator_dim = 64
output_width = 256
with tf.Session() as sess:
sess.run(initialize)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(1):
image = image_tensor.eval()
image = tf.image.resize_images(image, [256, 256]) #resize the image into 256*256
print(image.shape)
image_ten = tf.convert_to_tensor(image, tf.float32) #convert the image into tensor
print(image_ten.shape)
coord.request_stop()
coord.join(threads)
image_ten = tf.expand_dims(image_ten, 0) #(1, 256, 256, 3)
image_conv_1 = conv2d(image_ten, generator_dim, scope="conv_1") #(1, 128, 128, 64)
image_conv_2 = conv2d(image_conv_1, generator_dim*2, scope="conv_2")#(1, 64, 64, 128)
image_conv_3 = conv2d(image_conv_2, generator_dim*4, scope="conv_3")#(1, 32, 32, 256)
image_conv_4 = conv2d(image_conv_3, generator_dim*8, scope="conv_4")#(1, 16, 16, 512)
image_conv_5 = conv2d(image_conv_4, generator_dim*8, scope="conv_5")#(1, 8, 8, 512)
image_conv_6 = conv2d(image_conv_5, generator_dim*8, scope="conv_6")#(1, 4, 4, 512)
image_conv_7 = conv2d(image_conv_6, generator_dim*8, scope="conv_7")#(1, 2, 2, 512)
image_conv_8 = conv2d(image_conv_7, generator_dim*8, scope="conv_8")#(1, 1, 1, 512)
#print(image_conv_8.shape)
image_deconv_8 = deconv2d(image_conv_8, [1, 2, 2, generator_dim*8], scope="deconv_8")#(1, 2, 2, 512)
image_deconv_7 = deconv2d(image_deconv_8, [1, 4, 4, generator_dim*8], scope="deconv_7")#(1, 4, 4, 512)
image_deconv_6 = deconv2d(image_deconv_7, [1, 8, 8, generator_dim*8], scope="deconv_6")#(1, 8, 8, 512)
image_deconv_5 = deconv2d(image_deconv_6, [1, 16, 16, generator_dim*8], scope="deconv_5")#(1, 16, 16, 512)
image_deconv_4 = deconv2d(image_deconv_5, [1, 32, 32, generator_dim*4], scope="deconv_4")#(1, 32, 32, 256)
image_deconv_3 = deconv2d(image_deconv_4, [1, 64, 64, generator_dim*2], scope="deconv_3")#(1, 64, 64, 128)
image_deconv_2 = deconv2d(image_deconv_3, [1, 128, 128, generator_dim], scope="deconv_2")#(1, 128, 128, 64)
image_deconv_1 = deconv2d(image_deconv_2, [1, 256, 256, 3], scope="deconv_1")
#print(image_deconv_1.shape)
| [
"zuiaineo@foxmail.com"
] | zuiaineo@foxmail.com |
10b5ae95355c1e9e2dc296bec61f375ee994c223 | a6106cedc42dcab94ccc4ee6d681372d2246ce5e | /python/활용자료/예제/13/ex13-3.py | 87df268d5b02428277de9099026e305e6aa4e74c | [] | no_license | leemyoungwoo/pybasic | a5a4b68d6b3ddd6f07ff84dc8df76da02650196f | 481075f15613c5d8add9b8c4d523282510d146d2 | refs/heads/master | 2022-10-08T19:57:26.073431 | 2020-06-15T06:50:02 | 2020-06-15T06:50:02 | 267,502,565 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | import numpy as np
data1 = np.zeros(10)
print(data1)
print(data1.dtype)
data2 = np.zeros((2, 3))
print(data2)
data3 = np.zeros((2, 3), dtype=np.int32)
print(data3) | [
"mwlee2587@gmail.com"
] | mwlee2587@gmail.com |
20d078b4e77f64731c2b7679455f1ac78f6bdf9c | c35362d5d17bc57688ea77e796d310fa34ac6b48 | /Antlr2/Source_Code_Analysis.py | 61feab8180764ffdca5fd79495a696904f76034b | [] | no_license | parsamorsal/Vulnerability-Insertion | a0a7820d62a76f287c8cb890dd381b86d7576658 | a3f9f20d000b04fc3ab84cff386924c614c53286 | refs/heads/master | 2020-09-20T22:13:45.513804 | 2019-11-28T09:05:41 | 2019-11-28T09:05:41 | 224,603,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,313 | py | import sys
import collections
from antlr4 import *
from CLexer import CLexer
from CParser import CParser
from CListener import CListener
def main():
get_file()
file = FileStream("program.c")
lexer = CLexer(file)
stream = CommonTokenStream(lexer)
parser = CParser(stream)
tree = parser.compilationUnit()
listener = myCListener()
walker = ParseTreeWalker()
walker.walk(listener, tree)
listener.show()
def get_file():
try:
f1 = open(sys.argv[1], "rt")
x = f1.readline()
while x.startswith("#include"):
x = f1.readline()
f2 = open("program.c", "wt")
f2.write(f1.read())
f1.close()
f2.close()
except IndexError:
print("Error - You forgot to enter a file")
sys.exit(1)
except FileNotFoundError:
print("Error - Please enter a valid file")
sys.exit(1)
class myCListener(CListener):
def __init__(self):
self.is_main = True
self.variable = {}
self.variable_if = {}
self.variable_for = {}
self.parameter = []
self.function_name = ""
self.in_if = False
self.if_condition = ""
def enterFunctionDefinition(self, ctx: CParser.FunctionDefinitionContext):
# function_name = ctx.children.get(1).getChild(0).getChild(0).getText()
function_name = ctx.children[1].getChild(0).getChild(0).getText()
if function_name == "main":
self.is_main = False
return
self.function_name = function_name
def enterParameterDeclaration(self, ctx: CParser.ParameterDeclarationContext):
if self.is_main:
self.parameter.append(ctx.declarator().getText())
def enterInitDeclarator(self, ctx: CParser.InitDeclaratorContext):
if self.is_main:
variable_name = ctx.declarator().getText()
variable_value = ctx.initializer().getText()
self.variable[variable_name] = variable_value
def enterExpressionStatement(self, ctx: CParser.ExpressionStatementContext):
expr = ctx.getText()
if self.is_main:
if not self.in_if:
self.resolve(expr)
else:
# expr = expr[17:] OR:
# expr = re.sub("^printf\(\"%d\"," + "KjZC\(", "", expr) OR:
expr = expr[expr.index(self.function_name) + len(self.function_name) + 1: -3]
expr = expr.split(",")
for i in range(len(expr)):
self.parameter[i] = {self.parameter[i]: expr[i]}
# if:
def enterSelectionStatement(self, ctx: CParser.SelectionStatementContext):
self.variable_if = self.variable.copy()
self.in_if = True
self.if_condition = ctx.children[2].getText()
a = str(ctx.children[4].getText()[1:-2])
a = a.split(";")
for k in a:
# self.resolve(k)
k = k.replace(";", "")
k = k.split("=")
for i,j in self.variable_if.items():
if i in k[1]:
# k[1] = k[1].replace(i, j)
k[1] = k[1].replace(i, "(" + j + ")")
self.variable_if[k[0]] = k[1]
def exitSelectionStatement(self, ctx: CParser.SelectionStatementContext):
self.in_if = False
# for:
def enterIterationStatement(self, ctx: CParser.IterationStatementContext):
self.variable_for = self.variable.copy()
for_condition = ctx.children[2].getText().split(";")
# eg. i=0
a = str(for_condition[0]).split("=")
a = str(ctx.children[4].getText())[1:-2]
a = a.split(";")
for k in a:
# self.resolve(k)
k = k.replace(";", "")
k = k.split("=")
for i, j in self.variable_for.items():
if i in k[1]:
# k[1] = k[1].replace(i, j)
k[1] = k[1].replace(i, "(" + j + ")")
self.variable_for[k[0]] = k[1]
def resolve(self, expr):
expr = expr.replace(";", "")
expr = expr.split("=")
for i, j in self.variable.items():
if i in expr[1]:
# expr[1] = expr[1].replace(i, j)
expr[1] = expr[1].replace(i, "(" + j + ")")
self.variable[expr[0]] = expr[1]
def show(self):
f = open("output.txt", "wt")
a = self.variable_if.copy()
b = self.variable_for.copy()
for i, j in self.variable.items():
for k in list(a.values()):
if j == k:
a.pop(i)
for k in list(b.values()):
if j == k:
b.pop(i)
f.write("***********************************\n")
f.write("Symbolic formula:\n")
f.write("***********************************\n")
# x = collections.OrderedDict(sorted(self.variable.items(), key=lambda t: t[1]))
f.write("\nPath that leads to if:\n\n")
# for i,j in x.items():
# f.write(i + ": " + j + "\n")
for i, j in self.variable_if.items():
f.write(i + ": " + j + "\n")
# for i, j in sorted(self.variable_if.items()):
# f.write(i + ": " + j + "\n")
# for i in sorted(self.variable_if.keys()):
# f.write(i + ": " + self.variable_if[i] + "\n")
f.write("\n\nPath that leads to for:\n\n")
for i, j in self.variable_for.items():
if i != "i":
f.write(i + ": " + j + "\n")
for i in self.parameter:
for x,y in i.items():
for n,m in self.variable_if.items():
if x in m:
m = m.replace(x, y)
self.variable_if[n]=m
for n,m in self.variable_for.items():
if x in m:
m = m.replace(x, y)
self.variable_for[n]=m
if x in self.if_condition:
self.if_condition = self.if_condition.replace(x, y)
f.write("\n------------------------------------------------------------------------------------------------------------------\n")
f.write("\n***********************************\n")
f.write("Real values:\n")
f.write("***********************************\n")
number_of_integer_overflow = 0
number_of_division_by_zero = 0
if eval(self.if_condition):
f.write("\nPath leads to if:\n")
for i, j in self.variable_if.items():
answer = 0
try:
answer = eval(j)
if answer > 2147483647 or answer < -2147483648:
number_of_integer_overflow += 1
f.write("Found Interger Overflow: " + i + " = " + str(answer) + "\n")
else:
f.write(i + ": " + str(answer) + "\n")
except ZeroDivisionError:
number_of_division_by_zero += 1
f.write("Found By Division Error in " + i + "\n")
else:
f.write("\nPath leads to for:\n")
for i, j in self.variable_for.items():
answer = 0
try:
answer = eval(j)
if answer > 2147483647 or answer < -2147483648:
number_of_integer_overflow += 1
f.write("Found Interger Overflow: " + i + " = " + str(answer) + "\n")
else:
f.write(i + ": " + str(answer) + "\n")
except ZeroDivisionError:
number_of_division_by_zero += 1
f.write("Found By Division Error in " + i + "\n")
f.write("\n***********************************\n")
f.write("Number of Integer Overflow: " + str(number_of_integer_overflow) + "\n")
f.write("Number of Division By Zero: " + str(number_of_division_by_zero) + "\n")
f.write("***********************************\n")
if __name__ == '__main__':
main()
| [
"parsa.morsal@gmail.com"
] | parsa.morsal@gmail.com |
74b8ee0b52f345b94e5c3b3e63e8e2c9f074acd2 | 2ab1bfbc8c89a2448facf1d7636a80d7f9f45608 | /nlpnet/utils.py | eecb182865bcae1517d784e381149ab6e31114aa | [
"MIT"
] | permissive | eduardosan/nlpnet | e1fe7fbe4027eeda13710a7185af17efe2bf8d75 | 601f5411f8858505768e785d20d45abec100bb7e | refs/heads/master | 2019-07-12T03:00:04.421042 | 2014-05-19T18:38:17 | 2014-05-19T18:38:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,602 | py | # -*- coding: utf-8 -*-
"""
Utility functions
"""
import re
import logging
import nltk
import numpy as np
from nltk.tokenize.regexp import RegexpTokenizer
import config
import attributes
# these variables appear at module level for faster access and to avoid
# repeated initialization
_tokenizer_regexp = ur'''(?ux)
# the order of the patterns is important!!
([^\W\d_]\.)+| # one letter abbreviations, e.g. E.U.A.
\d{1,3}(\.\d{3})*(,\d+)| # numbers in format 999.999.999,99999
\d{1,3}(,\d{3})*(\.\d+)| # numbers in format 999,999,999.99999
\d+:\d+| # time and proportions
\d+([-\\/]\d+)*| # dates. 12/03/2012 12-03-2012
[DSds][Rr][Aa]?\.| # common abbreviations such as dr., sr., sra., dra.
[Mm]\.?[Ss][Cc]\.?| # M.Sc. with or without capitalization and dots
[Pp][Hh]\.?[Dd]\.?| # Same for Ph.D.
[^\W\d_]{1,2}\$| # currency
(?:(?<=\s)|^)[\#@]\w*[A-Za-z_]+\w*| # Hashtags and twitter user names
-[^\W\d_]+| # clitic pronouns with leading hyphen
\w+([-']\w+)*| # words with hyphens or apostrophes, e.g. não-verbal, McDonald's
-+| # any sequence of dashes
\.{3,}| # ellipsis or sequences of dots
\S # any non-space character
'''
_tokenizer = RegexpTokenizer(_tokenizer_regexp)
# clitic pronouns
_clitic_regexp_str = r'''(?ux)
(?<=\w) # a letter before
-(me|
te|
o|a|no|na|lo|la|se|
lhe|lho|lha|lhos|lhas|
nos|
vos|
os|as|nos|nas|los|las| # unless if followed by more chars
lhes)(?![-\w]) # or digits or hyphens
'''
_clitic_regexp = re.compile(_clitic_regexp_str)
def tokenize(text, clean=True):
"""
Returns a list of lists of the tokens in text, separated by sentences.
Each line break in the text starts a new list.
:param clean: If True, performs some cleaning action on the text, such as replacing
all digits for 9 (by calling :func:`clean_text`)
"""
ret = []
if type(text) != unicode:
text = unicode(text, 'utf-8')
if clean:
text = clean_text(text, correct=True)
text = _clitic_regexp.sub(r' -\1', text)
# loads trained model for tokenizing Portuguese sentences (provided by NLTK)
sent_tokenizer = nltk.data.load('tokenizers/punkt/portuguese.pickle')
# the sentence tokenizer doesn't consider line breaks as sentence delimiters, so
# we split them manually where there are two consecutive line breaks.
sentences = []
lines = text.split('\n\n')
for line in lines:
sentences.extend(sent_tokenizer.tokenize(line, realign_boundaries=True))
for p in sentences:
if p.strip() == '':
continue
new_sent = _tokenizer.tokenize(p)
ret.append(new_sent)
return ret
def clean_text(text, correct=True):
"""
Apply some transformations to the text, such as
replacing digits for 9 and simplifying quotation marks.
:param correct: If True, tries to correct punctuation misspellings.
"""
# replaces different kinds of quotation marks with "
# take care not to remove apostrophes
text = re.sub(ur"(?u)(^|\W)[‘’′`']", r'\1"', text)
text = re.sub(ur"(?u)[‘’`′'](\W|$)", r'"\1', text)
text = re.sub(ur'(?u)[«»“”]', '"', text)
if correct:
# tries to fix mistyped tokens (common in Wikipedia-pt) as ,, '' ..
text = re.sub(r'(?<!\.)\.\.(?!\.)', '.', text) # take care with ellipses
text = re.sub(r'([,";:])\1,', r'\1', text)
# inserts space after leading hyphen. It happens sometimes in cases like:
# blablabla -that is, bloblobloblo
text = re.sub(' -(?=[^\W\d_])', ' - ', text)
# replaces numbers with the 9's
text = re.sub(r'\d', '9', text)
# replaces special ellipsis character
text = text.replace(u'…', '...')
return text
_contractible_base = ur'''(?iux)
(
[ao]s?| # definite articles
um(as?)?|uns| # indefinite articles
is[st]o|aquilo| # demonstratives
es[st][ea]s?|
aquel[ea]s?|
el[ea]s?| # personal pronouns
outr[oa]s?
%s
)
$
'''
_contractible_de = re.compile(_contractible_base % u'|aqui|aí|ali|entre')
_contractible_em = re.compile(_contractible_base % '')
_contractible_art = re.compile('[oa]s?')
def contract(w1, w2):
"""
Makes a contraction of two words.
For example: contract('de', 'os') returns 'dos'
If a contraction between the given words doesn't exist in Portuguese, a ValueError
exception is thrown.
"""
cap = attributes.get_capitalization(w1)
w1 = w1.lower()
w2 = w2.lower()
contraction = None
if w1 == 'de' and _contractible_de.match(w2):
contraction = 'd' + w2
elif w1 == 'em' and _contractible_em.match(w2):
contraction = 'n' + w2
elif w1 == 'por' and _contractible_art.match(w2):
contraction = 'pel' + w2
elif w1 == 'a':
if w2 in ['o', 'os']:
contraction = 'a' + w2
elif w2.startswith('a'):
contraction = u'à' + w2[1:]
elif w1 == 'para' and _contractible_art.match(w2):
contraction = 'pr' + w2
elif w1 == 'com':
if w2 == 'mim':
contraction = 'comigo'
elif w2 == 'ti':
contraction = 'contigo'
elif w2 == 'si':
contraction = 'consigo'
elif w2 == u'nós':
contraction = 'conosco'
elif w2 == u'vós':
contraction = 'convosco'
elif w1 == 'lhe' and _contractible_art.match(w2):
contraction = 'lh' + w2
elif w1 == "d'":
contraction = w1 + w2
if contraction is None:
raise ValueError('Unexpected contraction: "%s" and "%s"' % (w1, w2))
return attributes.capitalize(contraction, cap)
def generate_feature_vectors(num_vectors, num_features, min_value=-0.1, max_value=0.1):
"""
Generates vectors of real numbers, to be used as word features.
Vectors are initialized randomly. Returns a 2-dim numpy array.
"""
logger = logging.getLogger("Logger")
table = (max_value * 2) * np.random.random((num_vectors, num_features)) + min_value
logger.debug("Generated %d feature vectors with %d features each." % (num_vectors,
num_features))
return table
def count_pos_tags():
"""Counts and returns how many POS tags there are."""
with open(config.FILES['pos_tags'], 'rb') as f:
text = f.read()
return len(text.split('\n'))
def count_chunk_tags():
"""Counts and returns how many chunk tags there are."""
with open(config.FILES['chunk_tags']) as f:
text = f.read()
return len(text.split('\n'))
def _create_affix_tables(affix, table_list, num_features):
"""
Internal helper function for loading suffix or prefix feature tables
into the given list.
affix should be either 'suffix' or 'prefix'.
"""
logger = logging.getLogger('Logger')
logger.info('Generating %s features...' % affix)
tensor = []
codes = getattr(attributes.Affix, '%s_codes' % affix)
num_affixes_per_size = getattr(attributes.Affix, 'num_%ses_per_size' % affix)
for size in codes:
# use num_*_per_size because it accounts for special suffix codes
num_affixes = num_affixes_per_size[size]
table = generate_feature_vectors(num_affixes, num_features)
tensor.append(table)
# affix attribute actually has a 3-dim tensor
# (concatenation of 2d tables, one for each suffix size)
for table in tensor:
table_list.append(table)
def create_feature_tables(args, md, text_reader):
"""
Create the feature tables to be used by the network. If the args object
contains the load_features option as true, the feature table for word types
is loaded instead of being created. The actual number of
feature tables will depend on the argument options.
:param arguments: Parameters supplied to the program
:param md: metadata about the network
:param text_reader: The TextReader being used.
:returns: all the feature tables to be used
"""
logger = logging.getLogger("Logger")
feature_tables = []
if not args.load_types:
logger.info("Generating word type features...")
table_size = len(text_reader.word_dict)
types_table = generate_feature_vectors(table_size, args.num_features)
else:
logger.info("Loading word type features...")
types_table = load_features_from_file(config.FILES[md.type_features])
if len(types_table) < len(text_reader.word_dict):
# the type dictionary provided has more types than
# the number of feature vectors. So, let's generate
# feature vectors for the new types by replicating the vector
# associated with the RARE word
diff = len(text_reader.word_dict) - len(types_table)
logger.warning("Number of types in feature table and dictionary differ.")
logger.warning("Generating features for %d new types." % diff)
num_features = len(types_table[0])
new_vecs = generate_feature_vectors(diff, num_features)
types_table = np.append(types_table, new_vecs, axis=0)
elif len(types_table) < len(text_reader.word_dict):
logger.warning("Number of features provided is greater than the number of tokens\
in the dictionary. The extra features will be ignored.")
feature_tables.append(types_table)
# Capitalization
if md.use_caps:
logger.info("Generating capitalization features...")
caps_table = generate_feature_vectors(attributes.Caps.num_values, args.caps)
feature_tables.append(caps_table)
# Prefixes
if md.use_prefix:
_create_affix_tables('prefix', feature_tables, args.prefix)
# Suffixes
if md.use_suffix:
_create_affix_tables('suffix', feature_tables, args.suffix)
# POS tags
if md.use_pos:
logger.info("Generating POS features...")
num_pos_tags = count_pos_tags()
pos_table = generate_feature_vectors(num_pos_tags, args.pos)
feature_tables.append(pos_table)
# chunk tags
if md.use_chunk:
logger.info("Generating chunk features...")
num_chunk_tags = count_chunk_tags()
chunk_table = generate_feature_vectors(num_chunk_tags, args.chunk)
feature_tables.append(chunk_table)
return feature_tables
def set_distance_features(max_dist=None,
num_target_features=None, num_pred_features=None):
"""
Returns the distance feature tables to be used by a convolutional network.
One table is for relative distance to the target predicate, the other
to the predicate.
:param max_dist: maximum distance to be used in new vectors.
"""
logger = logging.getLogger("Logger")
# max_dist before/after, 0 distance, and distances above the max
max_dist = 2 * (max_dist + 1) + 1
logger.info("Generating target word distance features...")
target_dist = generate_feature_vectors(max_dist, num_target_features)
logger.info("Generating predicate distance features...")
pred_dist = generate_feature_vectors(max_dist, num_pred_features)
return [target_dist, pred_dist]
def make_contractions_srl(sentences, predicates):
"""
Makes preposition contractions in the input data for SRL with Portuguese text.
It will contract words likely to be contracted, but there's no way to be
sure the contraction actually happened in the corpus.
:param sentences: the sentences list used by SRLReader objects.
:param predicates: the predicates list used by SRLReader objects.
:returns: a tuple (sentences, predicates) after contractions have been made.
"""
def_articles = ['a', 'as', 'o', 'os']
adverbs = [u'aí', 'aqui', 'ali']
pronouns = ['ele', 'eles', 'ela', 'elas', 'esse', 'esses',
'essa', 'essas', 'isso', 'este', 'estes', 'esta',
'estas', 'isto', ]
pronouns_a = ['aquele', 'aqueles', 'aquela', 'aquelas', 'aquilo',]
for (sent, props), preds in zip(sentences, predicates):
for i, token in enumerate(sent):
try:
next_token = sent[i + 1]
next_word = next_token.word
except IndexError:
# we are already at the last word.
break
# look at the arg types for this and the next token in all propostions
arg_types = [prop[i] for prop in props]
next_arg_types = [prop[i + 1] for prop in props]
# store the type of capitalization to convert it back
word = token.word.lower()
cap = attributes.get_capitalization(token.word)
def contract(new_word, new_lemma):
token.word = attributes.capitalize(new_word, cap)
token.lemma = new_lemma
token.pos = '%s+%s' % (token.pos, next_token.pos)
sent[i] = token
del sent[i + 1]
# removing a token will change the position of predicates
preds[preds > i] -= 1
for prop in props: del prop[i]
# check if the tags for this token and the next are the same in all propositions
# if the first is O, however, we will merge them anyway.
if all(a1 == a2 or a1 == 'O' for a1, a2 in zip(arg_types, next_arg_types)):
if word == 'de' and next_word in (def_articles + pronouns + pronouns_a + adverbs):
contract('d' + next_word, 'd' + next_token.lemma)
elif word == 'em' and next_word in (def_articles + pronouns + pronouns_a):
contract('n' + next_word, 'n' + next_token.lemma)
elif word == 'por' and next_word in def_articles:
contract('pel' + next_word, 'pel' + next_token.lemma)
elif word == 'a':
if next_word in pronouns_a:
contract(u'à' + next_word[1:], u'à' + next_token.lemma[1:])
elif next_word in ['o', 'os']:
contract('a' + next_word, 'ao')
elif next_word == 'a':
contract(u'à', 'ao')
elif next_word == 'as':
contract(u'às', 'ao')
return (sentences, predicates)
def set_logger(level):
"""Sets the logger to be used throughout the system."""
log_format = '%(message)s'
logging.basicConfig(format=log_format)
logger = logging.getLogger("Logger")
logger.setLevel(level)
def load_features_from_file(features_file):
"""Reads a file with features written as binary data."""
return np.load(features_file)
def save_features_to_file(table, features_file):
"""Saves a feature table to a given file, writing binary data."""
np.save(features_file, table)
def convert_iobes_to_bracket(tag):
"""
Convert tags from the IOBES scheme to the CoNLL bracketing.
Example:
B-A0 -> (A0*
I-A0 -> *
E-A0 -> *)
S-A1 -> (A1*)
O -> *
"""
if tag.startswith('I') or tag.startswith('O'):
return '*'
if tag.startswith('B'):
return '(%s*' % tag[2:]
if tag.startswith('E'):
return '*)'
if tag.startswith('S'):
return '(%s*)' % tag[2:]
else:
raise ValueError("Unknown tag: %s" % tag)
def boundaries_to_arg_limits(boundaries):
"""
Converts a sequence of IOBES tags delimiting arguments to an array
of argument boundaries, used by the network.
"""
limits = []
start = None
for i, tag in enumerate(boundaries):
if tag == 'S':
limits.append([i, i])
elif tag == 'B':
start = i
elif tag == 'E':
limits.append([start, i])
return np.array(limits, np.int)
| [
"erickrfonseca@gmail.com"
] | erickrfonseca@gmail.com |
638d1060ff17dc040d47d964c661fab869e786ca | caf90dfe5eb1d9f5ad70b46def46346b06c7c651 | /lesson6.py | 4c2e3dafbee923f28b621a15b3a40eae7bc8f89d | [] | no_license | cheyangggg/cheyangdecangku | 1f9180993f4196db999076544852302be867847c | 85376712638d3cb76d62f76b4e3cfb9f5cc45f0e | refs/heads/master | 2022-11-17T11:10:49.475075 | 2020-07-16T11:08:46 | 2020-07-16T11:08:46 | 272,627,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,741 | py |
# 接口自动化步骤
# 1.excl测试用例准备ok,代码自动读取测试数据
# 2.发送接口请求,得到响应信息
# 3.断言:实际结果和预期结果比较——通过/不通过
# 4.写入通过/不通过到excl里
# 第三方库:操作excl表格————openpyxl库:实现excl读取测试数据,并写入数据
# 1.安装 pip install openpyxl 2.导入
# 注意:把文件拉到pycharm里,lesson6同级,方便读取
# EXcel中三大对象:
# 1.工作簿workbook
# 2.表单shell sheet = wb['register']获取表单
# 3.单元格cell cell = sheet.cell(row = 2 , column = 1)通过表单获取行号列号--单元格
# cell = sheet.cell(row = 2 , column = 1).vale获取单元格内元素
import openpyxl
import requests
#读取测试用例函数
def read_data(filename,sheetname):
wb = openpyxl.load_workbook(filename) #加载工作簿--文档名字
sheet = wb[sheetname]
max_row = sheet.max_row #获取最大行数,把range里尾换成max_row+1
case_list = [] #创建空列表,存放测试用例
for i in range(2,max_row+1):
dict1 = dict(
case_id = sheet.cell(row = i , column = 1).value,
url = sheet.cell(row = i , column = 5).value, #获取url
data = sheet.cell(row = i ,column = 6).value, #获取data
expect = sheet.cell(row=i, column=7).value
)
case_list.append(dict1) #每循环一次,就把读取到的字典数据存放到list里
return case_list #返回测试用例列表
# cases = read_data('test_case_api.xlsx','register')
# print(cases)
#写入结果
def write_result(filename,sheetname,row,column,final_result):
wb = openpyxl.load_workbook(filename)
sheet = wb[sheetname]
sheet.cell(row = row ,column = column).value = final_result #写入结果
wb.save('test_case_api.xlsx') #保存文档
# write_result('test_case_api.xlsx','login',3,8,"Failed")
#执行接口函数
def api_fun(url,data):
headers_log = {"X-Lemonban-Media-Type":"lemonban.v2","Content-Type":"application/json"}
res = requests.post(url=url,json=data,headers=headers_log)
response = res.json()
return response
# 断言并写回执行结果
cases = read_data('test_case_api.xlsx','register') #读取数据
for case in cases:
case_id = case.get('case_id') #或case['case_id']
url = case.get('url')
data = eval(case.get('data'))
expect = eval(case.get('expect')) #获取预期结果
expect_msg = expect.get('msg') #获取预期结果中的msg
real_result = api_fun(url = url,data= data) #调用接口函数,返回结果用real_result接收。但是现在的数据类型是字符串,
#引入eval函数,能去掉引号,去除引号内元素
real_msg = real_result.get('msg') #获取实际结果中的msg
print('预期结果中的msg:{}'.format(expect_msg))
print('实际结果中的msg:{}'.format(real_msg))
if real_msg == expect_msg:
print('第{}条用例执行通过!'.format(case_id))
final_re = "Passed"
else:
print('第{}条用例测试不通过!'.format(case_id))
final_re = "Failed"
write_result('test_case_api.xlsx','register',case_id+1,8,final_re)
print('*'*20)
# 简历写熟悉Python语言,可利用requests及openpyxl库编写接口自动化脚本实现接口自动化测试,不会写自动化框架,但是原来公司自动化框架已经搭建好了,
# 我会往框架里加写自动化脚本。
| [
"cheyang1118@163.com"
] | cheyang1118@163.com |
52910eb5556673b244e2ed6934696591f10c0ddc | 98681f52c31debaf58c75202b945ec11426b1ff3 | /samples/py_sample/run_sample.py | 0e94c486b45305c4a9959f5c314931c84887b49e | [
"MIT"
] | permissive | whn09/EDCC-Palmprint-Recognition | d59c8fb92f2ddae315c62fd9aece11b042da4afe | 57f589f4d9f0bb522a9f8ca9d677b918a8761e16 | refs/heads/master | 2020-03-22T16:57:48.566914 | 2019-01-25T04:25:02 | 2019-01-25T04:25:02 | 140,363,523 | 1 | 0 | null | 2018-07-10T01:59:41 | 2018-07-10T01:59:41 | null | UTF-8 | Python | false | false | 6,074 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*
import os
from edcc_adapter import *
from PalmprintImageFactory import *
from PalmprintCodeRepository import *
import time
class PalmprintCodeDTO(object):
def __init__(self, ID, instanceID, imagePath, code):
self.palmprint = PalmprintDTO(ID, instanceID, imagePath)
self.code = code
class EDCCSample(object):
def __init__(self):
self._edcc_api = EdccApi()
self._factory = PalmprintImageFactory(4)
self._dbPath = "./R_PALMPRINTCODE.db"
if os.path.exists(self._dbPath):
os.remove(self._dbPath)
self._palmprintcodeRepository = PalmprintCodeRepo(self._dbPath)
self._configPath = os.path.normpath(
os.path.join(os.getcwd(), "edcc_config/config.json"))
self._succNum = 0
self._failNum = 0
self._wrong_list = []
def runSample(self):
self._initDB()
self._readDB()
predictGroup = self._factory.predictGroup
total_cost_time = 0.0
total_match_count = 0
for predict in predictGroup:
predictPalmprintCode, codingLen = self._edcc_api.GetEDCCCoding(
predict.imagePath, self._configPath)
K = 3
topKMatchScore = []
timeBegin = time.time()
for trainPalmprintCode in self.palmprintcodelist:
matchScore = self._edcc_api.GetTwoPalmprintCodingMatchScore(
predictPalmprintCode, trainPalmprintCode.code)
dictTmp = {}
dictTmp["instance"] = trainPalmprintCode
dictTmp["score"] = matchScore
topKMatchScore.append(dictTmp)
timeEnd = time.time()
costTime = (timeEnd - timeBegin) * 1000
total_cost_time += costTime
total_match_count += 1
topKMatchScore = sorted(
topKMatchScore, key=lambda p: p["score"], reverse=True)
while len(topKMatchScore) > K:
topKMatchScore.pop()
self.statisticsResult(predict, topKMatchScore, costTime)
print(
"\n\n=========================================================================")
print("Predict Over.\nTotal:%d\tPredictCorrect:%d\tAccuracy:%lf%%" % (
len(predictGroup), self._succNum, float(self._succNum) / len(predictGroup) * 100))
print("Total Cost Time:%lf ms\tMatch Count:%d\tPer Cost Time:%lf ms" % (
total_cost_time, total_match_count, total_cost_time / total_match_count))
if self._wrong_list:
print("Wrong List:")
for record in self._wrong_list:
print(record)
print(
"=========================================================================\n\n")
def statisticsResult(self, predict, topKMatchScore, costTime):
resultsDict = {}
idCountDict = {}
for result in topKMatchScore:
palmprintCodeInstance = result["instance"]
ID = palmprintCodeInstance.palmprint.id
InstanceID = palmprintCodeInstance.palmprint.instanceID
score = result["score"]
if ID not in resultsDict.keys():
resultsDict[ID] = 0.0
idCountDict[ID] = 0
if score > resultsDict[ID]:
resultsDict[ID] = score
idCountDict[ID] = idCountDict[ID] + 1
# for ID in resultsDict.keys():
# resultsDict[ID] = resultsDict[ID] / float(idCountDict[ID])
resultsDict = sorted(resultsDict.items(),
key=lambda r: r[1], reverse=True)
bestMatchID = resultsDict[0][0]
bestMatchScore = resultsDict[0][1]
predictHeadStr = "Predict:"
trainStr = "ID:%s InstanceID:%s" % (predict.id, predict.instanceID)
predictStr = "BestMatch: ID:%s MatchScore:%lf CostTime:%lf ms" % (
bestMatchID, bestMatchScore, costTime)
resultLineMaxLen = max(len(trainStr), len(
predictStr), len(predictStr)) + 5
print('-'*resultLineMaxLen)
print('| '+predictHeadStr+(resultLineMaxLen-len(predictHeadStr)-3)*' '+'|')
print('| '+trainStr+(resultLineMaxLen-len(trainStr)-3)*' '+'|')
print('| '+predictStr+(resultLineMaxLen-len(predictStr)-3)*' '+'|')
if bestMatchID == predict.id:
self._succNum = self._succNum + 1
print('| '+'Correct Match' +
(resultLineMaxLen-len('Correct Match')-3)*' '+'|')
else:
self._failNum = self._failNum + 1
self._wrong_list.append(resultStr)
print('| '+'Error Match'+(resultLineMaxLen-len('Error Match')-3)*' '+'|')
print('-'*resultLineMaxLen+'\n\n')
def _initDB(self):
print("Init PalmprintCodeDB")
self._palmprintcodeRepository.startExecute()
for train in self._factory.trainGroup:
results = self._palmprintcodeRepository.selectPalmprintByIDInstanceID(
train.id, train.instanceID)
if len(results):
continue
codingBytes, codingLen = self._edcc_api.GetEDCCCoding(
train.imagePath, self._configPath)
self._palmprintcodeRepository.insertPalmprint(train, codingBytes)
print("Insert ID:%s\tinstanceID:%s\tImagePath:%s" %
(train.id, train.instanceID, train.imagePath))
self._palmprintcodeRepository.endExecute()
def _readDB(self):
print("Read PalmprintCodeDB")
self._palmprintcodeRepository.startExecute()
allPalmprintCodeData = self._palmprintcodeRepository.selectAllPalmprint()
self.palmprintcodelist = []
for palmprint in allPalmprintCodeData:
palmprintcode = PalmprintCodeDTO(
palmprint[0], palmprint[1], palmprint[2], palmprint[3])
self.palmprintcodelist.append(palmprintcode)
self._palmprintcodeRepository.endExecute()
if __name__ == '__main__':
sample = EDCCSample()
sample.runSample()
| [
"513887568@qq.com"
] | 513887568@qq.com |
6760603b7754e9c68b36c7f7dd9d84a2f2b0021b | f4a01269d4c2ce3d6cebab5731620a343cd92337 | /tingweb/migrations/0010_userresetpassword.py | 5602776ef67acadf2dd15c8fba4bca92add32e14 | [] | no_license | irchriscott/Ting.com-Web | 12ca685208b14d09f95eb2dfe832fdff7fa0cf4c | 23121327a02bc437038fc507681a11a599640574 | refs/heads/master | 2023-01-07T19:22:35.435177 | 2021-04-04T13:15:34 | 2021-04-04T13:15:34 | 189,359,859 | 1 | 0 | null | 2023-01-07T06:27:32 | 2019-05-30T06:38:08 | JavaScript | UTF-8 | Python | false | false | 977 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-06-12 08:12
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tingweb', '0009_promotion_category'),
]
operations = [
migrations.CreateModel(
name='UserResetPassword',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=200)),
('token', models.TextField()),
('is_active', models.BooleanField(default=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('expired_at', models.DateTimeField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tingweb.User')),
],
),
]
| [
"irchristianscott@gmail.com"
] | irchristianscott@gmail.com |
d5a2904e70fa7cb4092b1e19025ce5bf4d415b44 | 39e1e256acae3fe9be4434024d42b9bb47bdd02f | /browser-request-logger.py | 46a0ec65b4fa7422e210f90bead13609251462a9 | [] | no_license | neulab/tranx-study | 9fb67b9a2181f0b362e4f97316c502eee4539b19 | e2a7089689f7f95e773e19c8f19513abe4fb8b9b | refs/heads/master | 2023-06-14T04:46:01.010892 | 2021-07-08T09:29:05 | 2021-07-08T09:29:05 | 250,357,553 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,640 | py | import os
import time
import requests
from utils import read_current_user_task
"""
RequestsLogger is an addon for mitmdump that logs information about request
and response pairs to server. As it does not do any changes to the body of the
request/response, I advise to run it with `--set stream_large_bodies=1`,
which enables streaming of the request/response body.
"""
class RequestsLogger:
def __init__(self):
self.s = requests.Session()
self.user, self.task = read_current_user_task()
def done(self):
self.s.close()
"""
This hook function is called by `mitmdump` whenever a response is received from a target server.
the flow object holds information about both request and response (the whole HTTP/s flow).
"""
def response(self, flow):
if flow.request.method == "GET" and flow.response.status_code == 200 and flow.response.headers:
for k, v in flow.response.headers.items():
if k.lower() == 'content-type' and 'text/html' in v.lower():
payload = {'url': flow.request.url,
'local_timestamp': int(time.time()),
'userid': self.user,
'task': self.task,
}
try:
res = self.s.post('http://moto.clab.cs.cmu.edu:8081/browser_log', json=payload, timeout=2.0)
except:
print('exception')
return
plugin = RequestsLogger()
addons = [
plugin
]
| [
"frankxu2004@gmail.com"
] | frankxu2004@gmail.com |
3d4c3eb3118985746e7b85d5db222f792fcbc570 | 6879753209a2c1641b143a18640d7e83c325b198 | /RankCompare.py | 73566ee602eddd5e0dc0005a795ff59a04afc83d | [] | no_license | EmileDHaene/WordFrequencyCounters | 7a74fa1e3d61efd277e469851663ac72ba386877 | cf3f0745c905e42c819955521b0e4d3cb9f33e56 | refs/heads/master | 2022-07-11T21:19:35.678548 | 2020-05-17T16:34:20 | 2020-05-17T16:34:20 | 264,710,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,328 | py | import matplotlib.pyplot as plt
import sys
import operator
import argparse
import csv
import numpy
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"word",
help="the word to be searched for in the text file."
)
parser.add_argument(
"filename",
help="the path to the text file to be searched through"
)
parser.add_argument(
"filename2",
help="The second file to be searched through to compare the word rank to"
)
args = parser.parse_args()
try:
open(args.filename)
except FileNotFoundError:
# Custom error print
sys.stderr.write("Error: " + args.filename + " does not exist!")
sys.exit(1)
word_freq(args.word, args.filename, args.filename2)
def word_freq(word, filename, filename2):
doc = {}
# file = open(filename, encoding='utf-8')
# data = file.read()
# words = data.split()
# print(len(words))
# file2 = open(filename2, encoding='utf-8')
# data2 = file2.read()
# words2 = data2.split()
# print(len(words2))
for line in open(filename, encoding='utf-8', errors='ignore'):
text = line.lower()
# Assume each word is separated by a space
split = text.split(' ')
for entry in split:
if doc.__contains__(entry):
doc[entry] = int(doc.get(entry)) + 1
else:
doc[entry] = 1
if word not in doc:
sys.stderr.write("Error: " + word + " does not appear in " + filename)
sys.exit(1)
doc2 = {}
for line in open(filename2, encoding='utf-8', errors='ignore'):
text = line.lower()
# Assume each word is separated by a space
split = text.split(' ')
for entry in split:
if doc2.__contains__(entry):
doc2[entry] = int(doc2.get(entry)) + 1
else:
doc2[entry] = 1
if word not in doc2:
sys.stderr.write("Error: " + word + " does not appear in " + filename2)
sys.exit(1)
sorted_doc = (sorted(doc.items(), key=operator.itemgetter(1)))[::-1]
sorted_doc2 = (sorted(doc2.items(), key=operator.itemgetter(1)))[::-1]
just_the_occur = []
just_the_rank = []
just_the_occur2 = []
just_the_rank2 = []
word_rank = 0
word_frequency = 0
word_rank2 = 0
word_frequency2 = 0
entry_num = 1
for entry in sorted_doc:
if entry[0] == word:
word_rank = entry_num
word_frequency = entry[1]
just_the_rank.append(entry_num)
entry_num += 1
just_the_occur.append(entry[1])
entry_num2 = 1
for entry in sorted_doc2:
if entry[0] == word:
word_rank2 = entry_num2
word_frequency2 = entry[1]
just_the_rank2.append(entry_num2)
entry_num2 += 1
just_the_occur2.append(entry[1])
# print(word_rank2)
# print(word_rank)
# print(word_frequency2)
# print(word_frequency)
# word_frequencyP = (word_frequency/words)*100
# word_frequency2P = (word_frequency2/words2)*100
rank_difference = abs(word_rank - word_rank2)
freq_difference = abs(word_frequency2 - word_frequency)
if rank_difference == 0:
print("They are the same rank!")
else:
print("The rank difference is " + str(rank_difference))
if freq_difference == 0:
print("They appear the same amount of times!")
else:
print("The frequency difference is " + str(freq_difference))
# sorted_doc.append(just_the_rank)
# sorted_doc2.append(just_the_rank2)
d1 = dict(sorted_doc)
d2 = dict(sorted_doc2)
w = csv.writer(open("Output1RANK.csv", "w", encoding='utf-8'))
for key, val in d1.items():
w.writerow([key, val])
w = csv.writer(open("Output2RANK.csv", "w", encoding='utf-8'))
for key, val in d2.items():
w.writerow([key, val])
plt.xlabel("Ranks of " + word + " are " + str(word_rank) + " and " + str(word_rank2))
plt.loglog(just_the_rank, just_the_occur, basex=10)
plt.loglog(just_the_rank2, just_the_occur2, basex=10)
if _z_name__ == "__main__":
main()
| [
"noreply@github.com"
] | EmileDHaene.noreply@github.com |
3580f9914a3e50e1aa6916f722ff3c976dc3b7b7 | ba35b813b5a5e3eaf2eb1a017c1427fcb4b88978 | /python_stack/flask/flask_mysql/create_read_pets/server.py | 41f1f73fbc372cddff107d6a3167789704627a52 | [] | no_license | bluecrayon52/CodingDojo | 08d3b5d745ec898cf43e731aabc48bc676ce9f34 | 1cc53c8f05760b2c1860e39c58b68a61bb6b7cb9 | refs/heads/master | 2020-04-29T13:35:27.052435 | 2019-05-13T16:20:37 | 2019-05-13T16:20:37 | 176,172,794 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | from flask import Flask, render_template, request, redirect
# import the function that will return an instance of a connection
from mysqlconnection import connectToMySQL
app = Flask(__name__)
@app.route("/")
def index():
# call the function, passing in the name of our db
mysql = connectToMySQL('pets')
# call the query_db function, pass in the query as a string
pets = mysql.query_db('SELECT * FROM pets;')
print(pets)
return render_template("index.html", pets=pets)
@app.route("/create_pet", methods=["POST"])
def add_pet_to_db():
mysql = connectToMySQL('pets')
query ="INSERT INTO pets (name, type, created_at, updated_at) VALUES (%(name)s, %(type)s, Now(), Now());"
data = {
'name': request.form['name'],
'type': request.form['type']
}
new_pet_id = mysql.query_db(query, data)
return redirect("/")
if __name__ == "__main__":
app.run(debug=True)
| [
"16687465+bluecrayon52@users.noreply.github.com"
] | 16687465+bluecrayon52@users.noreply.github.com |
4a17901fb414e1cfb8021a49ed8465d6f5d36c5c | bc22d49505eee02849e34970c2cade3c5f38ba25 | /dota-benchmark/tests/test_fbnet.py | 5382b92da51ca92b0a41ac83c66ec25283c5ab85 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Davis-love-AI/CV-2019-Fall-Course | 2937d74c2daa1eacb9f290f0d3d5e2f3f5769e43 | bfe46f7e004483f59299ea19ff41b9ad9b8d96bf | refs/heads/master | 2022-03-28T19:58:33.065889 | 2020-01-09T13:51:45 | 2020-01-09T13:51:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,846 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import unittest
import numpy as np
import torch
import maskrcnn_benchmark.modeling.backbone.fbnet_builder as fbnet_builder
TEST_CUDA = torch.cuda.is_available()
def _test_primitive(self, device, op_name, op_func, N, C_in, C_out, expand, stride):
op = op_func(C_in, C_out, expand, stride).to(device)
input = torch.rand([N, C_in, 7, 7], dtype=torch.float32).to(device)
output = op(input)
self.assertEqual(
output.shape[:2], torch.Size([N, C_out]),
'Primitive {} failed for shape {}.'.format(op_name, input.shape)
)
class TestFBNetBuilder(unittest.TestCase):
def test_identity(self):
id_op = fbnet_builder.Identity(20, 20, 1)
input = torch.rand([10, 20, 7, 7], dtype=torch.float32)
output = id_op(input)
np.testing.assert_array_equal(np.array(input), np.array(output))
id_op = fbnet_builder.Identity(20, 40, 2)
input = torch.rand([10, 20, 7, 7], dtype=torch.float32)
output = id_op(input)
np.testing.assert_array_equal(output.shape, [10, 40, 4, 4])
def test_primitives(self):
''' Make sures the primitives runs '''
for op_name, op_func in fbnet_builder.PRIMITIVES.items():
print('Testing {}'.format(op_name))
_test_primitive(
self, "cpu",
op_name, op_func,
N=20, C_in=16, C_out=32, expand=4, stride=1
)
@unittest.skipIf(not TEST_CUDA, "no CUDA detected")
def test_primitives_cuda(self):
''' Make sures the primitives runs on cuda '''
for op_name, op_func in fbnet_builder.PRIMITIVES.items():
print('Testing {}'.format(op_name))
_test_primitive(
self, "cuda",
op_name, op_func,
N=20, C_in=16, C_out=32, expand=4, stride=1
)
def test_primitives_empty_batch(self):
''' Make sures the primitives runs '''
for op_name, op_func in fbnet_builder.PRIMITIVES.items():
print('Testing {}'.format(op_name))
# test empty batch size
_test_primitive(
self, "cpu",
op_name, op_func,
N=0, C_in=16, C_out=32, expand=4, stride=1
)
@unittest.skipIf(not TEST_CUDA, "no CUDA detected")
def test_primitives_cuda_empty_batch(self):
''' Make sures the primitives runs '''
for op_name, op_func in fbnet_builder.PRIMITIVES.items():
print('Testing {}'.format(op_name))
# test empty batch size
_test_primitive(
self, "cuda",
op_name, op_func,
N=0, C_in=16, C_out=32, expand=4, stride=1
)
if __name__ == "__main__":
unittest.main()
| [
"XuehaiPan@pku.edu.cn"
] | XuehaiPan@pku.edu.cn |
18dba6e7cb63aa7c3ea62bc795d5f73a4039aa28 | 99b7f1a8bde483d38c808576d6794b9377c0409b | /mycommand_tests/acceptances/test_cli_logger.py | 227ccf5a2bdcc6a945b8cb4a3311726936817dd3 | [
"MIT"
] | permissive | FabienArcellier/spike_json_formatter_for_logging_python | 17249089a1fcd251d8f5b085c199c2423dbde7fd | e4e831d24039a5484af05654a30189ec658b1eac | refs/heads/master | 2020-05-19T07:46:15.439668 | 2019-05-04T14:21:46 | 2019-05-04T14:35:22 | 184,904,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | # coding=utf-8
import json
import sys
import unittest
from io import StringIO
from mycommand.cli_logger import get_logger
class CliLoggerTest(unittest.TestCase):
def setUp(self):
self.stderr = sys.stderr
self.stderr_mock = StringIO()
sys.stderr = self.stderr_mock
def tearDown(self):
sys.stderr = self.stderr
def test_logger_should_log_on_stdout(self):
# Assign
logger = get_logger('logger1')
# Acts
logger.info('hello world')
# Assert
logs = self.stderr_mock.getvalue().split('\n')
self.assertEqual(2, len(logs))
def test_logger_should_log_in_json(self):
# Assign
logger = get_logger('logger1')
# Acts
logger.info('hello world')
# Assert
logs = self.stderr_mock.getvalue().split('\n')
log = logs[0]
log_record = json.loads(log)
self.assertEqual('hello world', log_record['message'])
| [
"farcellier@octo.com"
] | farcellier@octo.com |
5c77b58b960e7d80d32ffb0e8e9efb1cc1fe1979 | db903a5e99712d1f45e1d45c4d77537f811ae569 | /src/python/pants/backend/python/util_rules/interpreter_constraints.py | 1b8e67d19729242b40f6b2eb83105f17f2289258 | [
"Apache-2.0"
] | permissive | Hirni-Meshram2/pants | 777db8ea67c1fc66de46f0ab374ba4fff8597357 | e802d62cc68176aa66947a939c771b01f47d5425 | refs/heads/main | 2023-05-01T09:23:10.973766 | 2021-05-19T08:24:50 | 2021-05-19T08:24:50 | 366,021,656 | 0 | 2 | Apache-2.0 | 2021-05-10T11:38:07 | 2021-05-10T11:38:06 | null | UTF-8 | Python | false | false | 10,142 | py | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import functools
import itertools
from collections import defaultdict
from typing import FrozenSet, Iterable, List, Sequence, Set, Tuple, TypeVar
from pkg_resources import Requirement
from typing_extensions import Protocol
from pants.backend.python.target_types import InterpreterConstraintsField
from pants.build_graph.address import Address
from pants.engine.engine_aware import EngineAwareParameter
from pants.engine.target import Target
from pants.python.python_setup import PythonSetup
from pants.util.frozendict import FrozenDict
from pants.util.ordered_set import FrozenOrderedSet
# This protocol allows us to work with any arbitrary FieldSet. See
# https://mypy.readthedocs.io/en/stable/protocols.html.
class FieldSetWithInterpreterConstraints(Protocol):
@property
def address(self) -> Address:
...
@property
def interpreter_constraints(self) -> InterpreterConstraintsField:
...
_FS = TypeVar("_FS", bound=FieldSetWithInterpreterConstraints)
# Normally we would subclass `DeduplicatedCollection`, but we want a custom constructor.
class InterpreterConstraints(FrozenOrderedSet[Requirement], EngineAwareParameter):
def __init__(self, constraints: Iterable[str | Requirement] = ()) -> None:
super().__init__(
v if isinstance(v, Requirement) else self.parse_constraint(v)
for v in sorted(constraints, key=lambda c: str(c))
)
@staticmethod
def parse_constraint(constraint: str) -> Requirement:
"""Parse an interpreter constraint, e.g., CPython>=2.7,<3.
We allow shorthand such as `>=3.7`, which gets expanded to `CPython>=3.7`. See Pex's
interpreter.py's `parse_requirement()`.
"""
try:
parsed_requirement = Requirement.parse(constraint)
except ValueError:
parsed_requirement = Requirement.parse(f"CPython{constraint}")
return parsed_requirement
@classmethod
def merge_constraint_sets(cls, constraint_sets: Iterable[Iterable[str]]) -> List[Requirement]:
"""Given a collection of constraints sets, merge by ORing within each individual constraint
set and ANDing across each distinct constraint set.
For example, given `[["CPython>=2.7", "CPython<=3"], ["CPython==3.6.*"]]`, return
`["CPython>=2.7,==3.6.*", "CPython<=3,==3.6.*"]`.
"""
# Each element (a Set[ParsedConstraint]) will get ANDed. We use sets to deduplicate
# identical top-level parsed constraint sets.
if not constraint_sets:
return []
parsed_constraint_sets: Set[FrozenSet[Requirement]] = set()
for constraint_set in constraint_sets:
# Each element (a ParsedConstraint) will get ORed.
parsed_constraint_set = frozenset(
cls.parse_constraint(constraint) for constraint in constraint_set
)
parsed_constraint_sets.add(parsed_constraint_set)
def and_constraints(parsed_constraints: Sequence[Requirement]) -> Requirement:
merged_specs: Set[Tuple[str, str]] = set()
expected_interpreter = parsed_constraints[0].project_name
for parsed_constraint in parsed_constraints:
if parsed_constraint.project_name == expected_interpreter:
merged_specs.update(parsed_constraint.specs)
continue
def key_fn(req: Requirement):
return req.project_name
# NB: We must pre-sort the data for itertools.groupby() to work properly.
sorted_constraints = sorted(parsed_constraints, key=key_fn)
attempted_interpreters = {
interp: sorted(
str(parsed_constraint) for parsed_constraint in parsed_constraints
)
for interp, parsed_constraints in itertools.groupby(
sorted_constraints, key=key_fn
)
}
raise ValueError(
"Tried ANDing Python interpreter constraints with different interpreter "
"types. Please use only one interpreter type. Got "
f"{attempted_interpreters}."
)
formatted_specs = ",".join(f"{op}{version}" for op, version in merged_specs)
return Requirement.parse(f"{expected_interpreter}{formatted_specs}")
def cmp_constraints(req1: Requirement, req2: Requirement) -> int:
if req1.project_name != req2.project_name:
return -1 if req1.project_name < req2.project_name else 1
if req1.specs == req2.specs:
return 0
return -1 if req1.specs < req2.specs else 1
return sorted(
{
and_constraints(constraints_product)
for constraints_product in itertools.product(*parsed_constraint_sets)
},
key=functools.cmp_to_key(cmp_constraints),
)
@classmethod
def create_from_targets(
cls, targets: Iterable[Target], python_setup: PythonSetup
) -> InterpreterConstraints:
return cls.create_from_compatibility_fields(
(
tgt[InterpreterConstraintsField]
for tgt in targets
if tgt.has_field(InterpreterConstraintsField)
),
python_setup,
)
@classmethod
def create_from_compatibility_fields(
cls, fields: Iterable[InterpreterConstraintsField], python_setup: PythonSetup
) -> InterpreterConstraints:
constraint_sets = {field.value_or_global_default(python_setup) for field in fields}
# This will OR within each field and AND across fields.
merged_constraints = cls.merge_constraint_sets(constraint_sets)
return InterpreterConstraints(merged_constraints)
@classmethod
def group_field_sets_by_constraints(
cls, field_sets: Iterable[_FS], python_setup: PythonSetup
) -> FrozenDict["InterpreterConstraints", Tuple[_FS, ...]]:
results = defaultdict(set)
for fs in field_sets:
constraints = cls.create_from_compatibility_fields(
[fs.interpreter_constraints], python_setup
)
results[constraints].add(fs)
return FrozenDict(
{
constraints: tuple(sorted(field_sets, key=lambda fs: fs.address))
for constraints, field_sets in sorted(results.items())
}
)
def generate_pex_arg_list(self) -> List[str]:
args = []
for constraint in self:
args.extend(["--interpreter-constraint", str(constraint)])
return args
def _includes_version(self, major_minor: str, last_patch: int) -> bool:
patch_versions = list(reversed(range(0, last_patch + 1)))
for req in self:
if any(
req.specifier.contains(f"{major_minor}.{p}") for p in patch_versions # type: ignore[attr-defined]
):
return True
return False
def includes_python2(self) -> bool:
"""Checks if any of the constraints include Python 2.
This will return True even if the code works with Python 3 too, so long as at least one of
the constraints works with Python 2.
"""
last_py27_patch_version = 18
return self._includes_version("2.7", last_patch=last_py27_patch_version)
def minimum_python_version(self) -> str | None:
"""Find the lowest major.minor Python version that will work with these constraints.
The constraints may also be compatible with later versions; this is the lowest version that
still works.
"""
if self.includes_python2():
return "2.7"
max_expected_py3_patch_version = 15 # The current max is 3.6.12.
for major_minor in ("3.5", "3.6", "3.7", "3.8", "3.9", "3.10"):
if self._includes_version(major_minor, last_patch=max_expected_py3_patch_version):
return major_minor
return None
def _requires_python3_version_or_newer(
self, *, allowed_versions: Iterable[str], prior_version: str
) -> bool:
# Assume any 3.x release has no more than 15 releases. The max is currently 3.6.12.
patch_versions = list(reversed(range(0, 15)))
# We only need to look at the prior Python release. For example, consider Python 3.8+
# looking at 3.7. If using something like `>=3.5`, Py37 will be included.
# `==3.6.*,!=3.7.*,==3.8.*` is extremely unlikely, and even that will work correctly as
# it's an invalid constraint so setuptools returns False always. `['==2.7.*', '==3.8.*']`
# will fail because not every single constraint is exclusively 3.8.
prior_versions = [f"{prior_version}.{p}" for p in patch_versions]
allowed_versions = [
f"{major_minor}.{p}" for major_minor in allowed_versions for p in patch_versions
]
for req in self:
if any(
req.specifier.contains(prior) for prior in prior_versions # type: ignore[attr-defined]
):
return False
if not any(
req.specifier.contains(allowed) for allowed in allowed_versions # type: ignore[attr-defined]
):
return False
return True
def requires_python38_or_newer(self) -> bool:
"""Checks if the constraints are all for Python 3.8+.
This will return False if Python 3.8 is allowed, but prior versions like 3.7 are also
allowed.
"""
return self._requires_python3_version_or_newer(
allowed_versions=["3.8", "3.9", "3.10"], prior_version="3.7"
)
def __str__(self) -> str:
return " OR ".join(str(constraint) for constraint in self)
def debug_hint(self) -> str:
return str(self)
| [
"noreply@github.com"
] | Hirni-Meshram2.noreply@github.com |
9791cdad6d0536107ee110dc74e7810dcaa4262f | 5bcbd788faf57bc867aa365096ba9f9db51f64d6 | /pageDemo/pageDemo/wsgi.py | 126f3dbcaff28013ca17263b989a791604647a04 | [] | no_license | sunren123/Python | 8c2fbf6a1552cd6c3fb80048e2fd6718e9ca85e0 | ee80fc805d2517e1d69d3d1d55edbd370bf8fd9b | refs/heads/master | 2020-04-06T16:27:21.471948 | 2018-12-10T13:07:16 | 2018-12-10T13:13:30 | 157,620,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
WSGI config for pageDemo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pageDemo.settings")
application = get_wsgi_application()
| [
"462227979@qq.com"
] | 462227979@qq.com |
18dbba319cb3ef28d2fcaa47fe2029c18352b524 | e7bc9bf2dfc3538c68cf0ae0b1e1ac396df87bbf | /stats-monitor/api/urls.py | 87c09031bf47ab0c02f9195cbcc9c29e6f43f24b | [
"MIT"
] | permissive | cryptobench/golemstats-monitor | 84d7677935333f2f7c5551718683de7c2644dfaa | affbb8ff3c428ed8bb437db92d0d869354931826 | refs/heads/master | 2023-05-04T08:47:36.106127 | 2021-05-28T12:37:16 | 2021-05-28T12:37:16 | 366,701,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | from django.urls import path
from django.shortcuts import render
from . import views
app_name = 'api'
urlpatterns = [
path('', views.FullLog),
]
| [
"phillip@golemgrid.com"
] | phillip@golemgrid.com |
f6afc5671b7cf400d2f9edd37587cd75cd30459e | 4443d08048f9980045e5f0541c69db0d756391d1 | /partner_ngos/programs_management/doctype/indicator_tool/test_indicator_tool.py | 28fd19f623e734f4cd2e5e8399ebdc359cfe0344 | [
"MIT"
] | permissive | mohsinalimat/partner_ngos | dea0db6e0f9718e7ffc69f7171bdb1603a055d72 | 4a345fb6989ff5a21db7fca07aa4e5174dca8f59 | refs/heads/master | 2023-03-15T13:15:40.571368 | 2020-07-09T07:22:59 | 2020-07-09T07:22:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Akram Mutaher and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestIndicatorTool(unittest.TestCase):
pass
| [
"frappe@ubuntu.vm"
] | frappe@ubuntu.vm |
2cbf45b9287321db3c32577c13b6b844623a137f | 6b9714ab4d4d6c69259aa5f8641e11b92aa3389c | /setup.py | 9e59a4cd4f8e910d3e1999c643fb139e1d1d77bf | [] | no_license | anujgulati/repository1 | 676436699eb62828eae1549f1391093f84a30799 | 8c6604c982768c1cf00db7ae7367aa60c97ee441 | refs/heads/master | 2021-06-01T17:08:04.794706 | 2021-05-18T11:17:18 | 2021-05-18T11:17:18 | 18,926,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | print('Hello, world! this is my first test s2- file- anuj')
print('balle shawa balle shawa')
| [
"noreply@github.com"
] | anujgulati.noreply@github.com |
fbfbe35c0fb0bba849098594867e351a69ca1810 | 6f9426260fc668fe7478f172f1bb8cf080339759 | /code/jianshu.py | 026935d001ad1ebf4482babf48113a1361a55e7b | [] | no_license | DriverSong/Huaweiruantiao_2018 | bac3312b5196848462fcc3d493ad3ca11b487646 | 3d3a2b1b856bceee731550c75b3924dcc5fbf276 | refs/heads/master | 2021-03-31T00:37:06.808004 | 2018-03-29T10:13:15 | 2018-03-29T10:13:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,591 | py | # -*- coding:utf-8 -*-
import pandas as pd
import numpy as np
from statsmodels.tsa.arima_model1 import ARMA
import sys
from dateutil.relativedelta import relativedelta
from copy import deepcopy
import matplotlib.pyplot as plt
class arima_model:
def __init__(self, ts, maxLag=9):
self.data_ts = ts
self.resid_ts = None
self.predict_ts = None
self.maxLag = maxLag
self.p = maxLag
self.q = maxLag
self.properModel = None
self.bic = sys.maxint
# 计算最优ARIMA模型,将相关结果赋给相应属性
def get_proper_model(self):
self._proper_model()
self.predict_ts = deepcopy(self.properModel.predict())
self.resid_ts = deepcopy(self.properModel.resid)
# 对于给定范围内的p,q计算拟合得最好的arima模型,这里是对差分好的数据进行拟合,故差分恒为0
def _proper_model(self):
for p in np.arange(self.maxLag):
for q in np.arange(self.maxLag):
# print p,q,self.bic
model = ARMA(self.data_ts, order=(p, q))
try:
results_ARMA = model.fit(disp=-1, method='css')
except:
continue
bic = results_ARMA.bic
# print 'bic:',bic,'self.bic:',self.bic
if bic < self.bic:
self.p = p
self.q = q
self.properModel = results_ARMA
self.bic = bic
self.resid_ts = deepcopy(self.properModel.resid)
self.predict_ts = self.properModel.predict()
# 参数确定模型
def certain_model(self, p, q):
model = ARMA(self.data_ts, order=(p, q))
try:
self.properModel = model.fit( disp=-1, method='css')
self.p = p
self.q = q
self.bic = self.properModel.bic
self.predict_ts = self.properModel.predict()
self.resid_ts = deepcopy(self.properModel.resid)
except:
print 'You can not fit the model with this parameter p,q, ' \
'please use the get_proper_model method to get the best model'
# 预测第二日的值
def forecast_next_day_value(self, type='day'):
# 我修改了statsmodels包中arima_model的源代码,添加了constant属性,需要先运行forecast方法,为constant赋值
self.properModel.forecast()
if self.data_ts.index[-1] != self.resid_ts.index[-1]:
raise ValueError('''The index is different in data_ts and resid_ts, please add new data to data_ts.
If you just want to forecast the next day data without add the real next day data to data_ts,
please run the predict method which arima_model included itself''')
if not self.properModel:
raise ValueError('The arima model have not computed, please run the proper_model method before')
para = self.properModel.params
# print self.properModel.params
if self.p == 0: # It will get all the value series with setting self.data_ts[-self.p:] when p is zero
ma_value = self.resid_ts[-self.q:]
values = ma_value.reindex(index=ma_value.index[::-1])
elif self.q == 0:
ar_value = self.data_ts[-self.p:]
values = ar_value.reindex(index=ar_value.index[::-1])
else:
ar_value = self.data_ts[-self.p:]
ar_value = ar_value.reindex(index=ar_value.index[::-1])
ma_value = self.resid_ts[-self.q:]
ma_value = ma_value.reindex(index=ma_value.index[::-1])
values = ar_value.append(ma_value)
predict_value = np.dot(para[1:], values) + self.properModel.constant[0]
self._add_new_data(self.predict_ts, predict_value, type)
return predict_value
# 动态添加数据函数,针对索引是月份和日分别进行处理
def _add_new_data(self, ts, dat, type='day'):
if type == 'day':
new_index = ts.index[-1] + relativedelta(days=1)
elif type == 'month':
new_index = ts.index[-1] + relativedelta(months=1)
ts[new_index] = dat
def add_today_data(self, dat, type='day'):
self._add_new_data(self.data_ts, dat, type)
if self.data_ts.index[-1] != self.predict_ts.index[-1]:
raise ValueError('You must use the forecast_next_day_value method forecast the value of today before')
self._add_new_data(self.resid_ts, self.data_ts[-1] - self.predict_ts[-1], type)
if __name__ == '__main__':
df = pd.read_csv('data/csv/flavor8', encoding='utf-8', index_col='date')
df.index = pd.to_datetime(df.index)
ts = df['count']
# 数据预处理
# ts_log = np.log(ts)
# 移动窗口函数,每隔7天做一次平均平滑
rol_mean = ts.rolling(window=7).mean()
# rol_mean = ts_log.rolling(window=12).mean()
# 滤除缺失数据
rol_mean.dropna(inplace=True)
rol_mean.replace(0,1,inplace=True)
ts_rol_mean = np.log(rol_mean)
ts_diff_1 = ts_rol_mean.diff(1)
#print(ts_diff_1)
ts_diff_1.dropna(inplace=True)
ts_diff_2 = ts_diff_1.diff(1)
ts_diff_2.dropna(inplace=True)
print(ts)
#ts.plot()
#ts_diff_1.plot()
#ts_log.plot()
plt.show()
"""
# 模型拟合
model = arima_model(ts_diff_1)
# 这里使用模型参数自动识别
model.get_proper_model()
print 'bic:', model.bic, 'p:', model.p, 'q:', model.q
print model.properModel.forecast()[0]
print model.forecast_next_day_value(type='month')
# 预测结果还原
predict_ts = model.properModel.predict()
diff_shift_ts = ts_diff_1.shift(0)
diff_recover_1 = predict_ts.add(diff_shift_ts)
rol_shift_ts = rol_mean.shift(1)
diff_recover = diff_recover_1.add(rol_shift_ts)
rol_sum = ts.rolling(window=6).sum()
rol_recover = diff_recover*7 - rol_sum.shift(1)
# log_recover = np.exp(rol_recover)
# log_recover.dropna(inplace=True)
rol_recover.dropna(inplace=True)
# 预测结果作图
# ts = ts[log_recover.index]
ts = ts[rol_recover.index]
plt.figure(facecolor='white')
# log_recover.plot(color='blue', label='Predict')
rol_recover.plot(color='blue', label='Predict')
ts.plot(color='red', label='Original')
plt.legend(loc='best')
# plt.title('RMSE: %.4f'% np.sqrt(sum((log_recover-ts)**2)/ts.size))
plt.title('RMSE: %.4f'% np.sqrt(sum((rol_recover-ts)**2)/ts.size))
plt.show()
""" | [
"sijieqian@foxmail.com"
] | sijieqian@foxmail.com |
846bed4e9453324aa3a7fb1a5ce934bd477987c5 | 73c9537b3e2dd9c57e581d474b9e2daf7a8fb02a | /portalpetcc/atividades/migrations/0021_auto_20160423_1616.py | 8a121e57a2c618238c4a1245a4fc58b440f57782 | [] | no_license | pviniciusm/petcc | 8f6ec2966729051f11b482c4c7ed522df3f920ba | 30ccddce6d0e39ccea492ac73b2ddca855c63cee | refs/heads/master | 2021-01-21T13:29:52.835434 | 2016-04-23T18:06:07 | 2016-04-23T18:06:07 | 54,607,007 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
class Migration(migrations.Migration):
dependencies = [
('atividades', '0020_auto_20160423_1615'),
]
operations = [
migrations.RemoveField(
model_name='atividade',
name='slug',
),
migrations.AlterField(
model_name='atividade',
name='hora_final',
field=models.TimeField(default=datetime.datetime(2016, 4, 23, 16, 16, 9, 671752)),
),
migrations.AlterField(
model_name='atividade',
name='hora_inicial',
field=models.TimeField(default=datetime.datetime(2016, 4, 23, 16, 16, 9, 671703)),
),
]
| [
"pvinics@gmail.com"
] | pvinics@gmail.com |
c4443b3628789cf496fd597c99da7d5c309b892e | 24946a607d5f6425f07d6def4968659c627e5324 | /HackerRank-30-Days-Challenges/d29.py | 070dc42ec9d10e85936c892e53021e6e0ad66aaf | [] | no_license | mmrubayet/HackerRank_solutions | 5d8acbb8fd6f305a006f147e6cb76dbfc71bbca5 | f1c72fbf730b6a79656d578f6c40a128a6f0ac5c | refs/heads/master | 2023-06-02T16:51:18.017902 | 2021-06-19T18:35:41 | 2021-06-19T18:35:41 | 233,853,287 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | t = int(input())
for _ in range(t):
n, k = map(int, input().split())
maximum = 0
for a in range(n-1, 1, -1):
for b in range(n, a, -1):
ab = a&b
if k > ab > maximum:
maximum = ab
if maximum == k-1:
break
if maximum == k-1:
break
print(maximum)
| [
"m.rubayet94@gmail.com"
] | m.rubayet94@gmail.com |
4ed69fad2fa0e115898e23a9601da84e556103dc | 933e9343194383d480d49126b2ffb1efe09cb0cd | /api/background/manipulators/position_manipulator.py | 61932fe4ce2bdf5b2a8c171e0bf2623b7c6f7e2c | [] | no_license | ostaninanastya/space_ship | e67b564ef53af2ad5fdbab5babd2ce0d4d42394b | 88ed6c173b3f10908cd8d2ee9ece25dac6a9361f | refs/heads/master | 2020-03-07T00:25:57.391503 | 2018-06-15T17:15:45 | 2018-06-15T17:15:45 | 127,157,140 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,327 | py | import sys, os
import configparser
import datetime
import graphene
sys.path.append(os.environ['SPACE_SHIP_HOME'] + '/api/background/mappers')
from position_mapper import PositionMapper
sys.path.append(os.environ['SPACE_SHIP_HOME'] + '/logbook')
import cassandra_mediator
config = configparser.ConfigParser()
config.read(os.environ['SPACE_SHIP_HOME'] + '/databases.config')
TIMESTAMP_PATTERN = os.environ.get('TIMESTAMP_PATTERN') or config['FORMATS']['timestamp']
class CreatePosition(graphene.Mutation):
class Arguments:
timestamp = graphene.String()
x = graphene.Float()
y = graphene.Float()
z = graphene.Float()
speed = graphene.Float()
attackangle = graphene.Float()
directionangle = graphene.Float()
ok = graphene.Boolean()
position = graphene.Field(lambda: PositionMapper)
def mutate(self, info, timestamp, x, y, z, speed, attackangle, directionangle):
position = PositionMapper.init_scalar(cassandra_mediator.create_position(datetime.datetime.strptime(timestamp, TIMESTAMP_PATTERN),\
x, y, z, speed, attackangle, directionangle))
ok = True
return CreatePosition(position = position, ok = ok)
class RemovePosition(graphene.Mutation):
class Arguments:
timestamp = graphene.String()
ok = graphene.Boolean()
position = graphene.Field(lambda: PositionMapper)
def mutate(self, info, timestamp):
position = PositionMapper.init_scalar(cassandra_mediator.remove_position(datetime.datetime.strptime(timestamp, TIMESTAMP_PATTERN)))
ok = True
return RemovePosition(position = position, ok = ok)
class UpdatePositions(graphene.Mutation):
class Arguments:
timestamp = graphene.String(default_value = '')
x = graphene.Float(default_value = float('nan'))
y = graphene.Float(default_value = float('nan'))
z = graphene.Float(default_value = float('nan'))
speed = graphene.Float(default_value = float('nan'))
attackangle = graphene.Float(default_value = float('nan'))
directionangle = graphene.Float(default_value = float('nan'))
set_x = graphene.Float(default_value = float('nan'))
set_y = graphene.Float(default_value = float('nan'))
set_z = graphene.Float(default_value = float('nan'))
set_speed = graphene.Float(default_value = float('nan'))
set_attackangle = graphene.Float(default_value = float('nan'))
set_directionangle = graphene.Float(default_value = float('nan'))
ok = graphene.Boolean()
def mutate(self, info, timestamp, x, y, z, speed, attackangle, directionangle, set_x, set_y, set_z, set_speed, set_attackangle, set_directionangle):
parsed_timestamp = None if not timestamp else datetime.datetime.strptime(timestamp, TIMESTAMP_PATTERN)
cassandra_mediator.update_positions(date = None if not parsed_timestamp else parsed_timestamp.date,\
time = None if not parsed_timestamp else parsed_timestamp.time, x = x, y = y, z = z, speed = speed,\
attackangle = attackangle, directionangle = directionangle, set_x = set_x, set_y = set_y, set_speed = set_speed,\
set_attackangle = set_attackangle, set_directionangle = set_directionangle)
ok = True
return UpdatePositions(ok = ok) | [
"zeionara@gmail.com"
] | zeionara@gmail.com |
ee208ab4675636710f8c6b6961a514af75ee1208 | e32bb97b6b18dfd48760ed28553a564055878d48 | /source_py3/python_toolbox/caching/cached_property.py | b37f9dd0a1b3806a1c67e463d035432846e91edc | [
"MIT"
] | permissive | rfdiazpr/python_toolbox | 26cb37dd42342c478931699b00d9061aedcd924a | 430dd842ed48bccdb3a3166e91f76bd2aae75a88 | refs/heads/master | 2020-12-31T04:15:53.977935 | 2014-04-30T23:54:58 | 2014-04-30T23:54:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,711 | py | # Copyright 2009-2014 Ram Rachum.
# This program is distributed under the MIT license.
'''
Defines the `CachedProperty` class.
See its documentation for more details.
'''
from python_toolbox import decorator_tools
from python_toolbox import misc_tools
class CachedProperty(misc_tools.OwnNameDiscoveringDescriptor):
'''
A property that is calculated only once for an object, and then cached.
Usage:
class MyObject:
# ... Regular definitions here
def _get_personality(self):
print('Calculating personality...')
time.sleep(5) # Time consuming process that creates personality
return 'Nice person'
personality = CachedProperty(_get_personality)
You can also put in a value as the first argument if you'd like to have it
returned instead of using a getter. (It can be a totally static value like
`0`). If this value happens to be a callable but you'd still like it to be
used as a static value, use `force_value_not_getter=True`.
'''
def __init__(self, getter_or_value, doc=None, name=None,
force_value_not_getter=False):
'''
Construct the cached property.
`getter_or_value` may be either a function that takes the parent object
and returns the value of the property, or the value of the property
itself, (as long as it's not a callable.)
You may optionally pass in the name that this property has in the
class; this will save a bit of processing later.
'''
misc_tools.OwnNameDiscoveringDescriptor.__init__(self, name=name)
if callable(getter_or_value) and not force_value_not_getter:
self.getter = getter_or_value
else:
self.getter = lambda thing: getter_or_value
self.__doc__ = doc or getattr(self.getter, '__doc__', None)
def __get__(self, obj, our_type=None):
if obj is None:
# We're being accessed from the class itself, not from an object
return self
value = self.getter(obj)
setattr(obj, self.get_our_name(obj, our_type=our_type), value)
return value
def __call__(self, method_function):
'''
Decorate method to use value of `CachedProperty` as a context manager.
'''
def inner(same_method_function, self_obj, *args, **kwargs):
with getattr(self_obj, self.get_our_name(self_obj)):
return method_function(self_obj, *args, **kwargs)
return decorator_tools.decorator(inner, method_function)
| [
"ram@rachum.com"
] | ram@rachum.com |
73e8cea08fed55cb41bafe7747f679e33f16c5e4 | 1a2ab983bd0043bced47a1d4c5b9184821f05c09 | /tensordiffeq/__init__.py | 177dc38dbf3418c168868e80f13ea56bc87078a4 | [] | no_license | tensordiffeq/TensorDiffEq | c447a0efe4e558f64565865f2c2ade7c5d7255eb | 7633927b8471a4150ea25972fbf41902af01070d | refs/heads/main | 2023-06-21T12:44:33.099745 | 2022-03-01T07:26:05 | 2022-03-01T07:26:05 | 300,818,380 | 92 | 35 | null | 2021-08-31T17:26:36 | 2020-10-03T07:05:08 | Python | UTF-8 | Python | false | false | 629 | py | from __future__ import absolute_import
from tensordiffeq import models, optimizers, networks, plotting, utils, domains, boundaries, fit, helpers, sampling
# from .models import CollocationSolverND, DiscoveryModel
# from .boundaries import dirichletBC, periodicBC, IC
# from .utils import constant, LatinHypercubeSample, tensor
# from .plotting import newfig, get_griddata
# from .helpers import find_L2_error
# from .optimizers import graph_lbfgs, eager_lbfgs
__all__ = [
"models",
"networks",
"plotting",
"utils",
"helpers",
"optimizers",
"boundaries",
"domains",
"fit",
"sampling"
]
| [
"levimcclenny@tamu.edu"
] | levimcclenny@tamu.edu |
64f802685d92208bb1dc54b2bf3fb8eb1ba4c7ba | 6e47d5c133dfc7158f97902081b1607e130b516a | /components/fighter.py | ba79439867e8aa202c0725199173a9af9b3d9724 | [] | no_license | commonguy356/Cavelike | b9c85b648fb2453b2f96dac400c75352f8b737a0 | 8ebeac5b0f9bcb7f07dd6b1ea25fefe69cdf9112 | refs/heads/master | 2022-11-10T04:04:55.479267 | 2020-07-02T20:20:40 | 2020-07-02T20:20:40 | 273,542,371 | 0 | 0 | null | 2020-07-02T20:05:46 | 2020-06-19T16:42:32 | Python | UTF-8 | Python | false | false | 915 | py | class Fighter:
def __init__(self, hp, defense, power):
self.max_hp = hp
self.hp = hp
self.defense = defense
self.power = power
def take_damage(self, amount):
results = []
self.hp -= amount
if self.hp <= 0:
results.append({'dead': self.owner})
return results
def attack(self, target):
results = []
damage = self.power - target.fighter.defense
if damage > 0:
results.append({'message': '{0} attacks {1} for {2} hit points.'.format(
self.owner.name.capitalize(), target.name, str(damage))})
results.extend(target.fighter.take_damage(damage))
else:
results.append({'message': '{0} attacks {1} but does no damage.'.format(
self.owner.name.capitalize(), target.name)})
return results
| [
"noreply@github.com"
] | commonguy356.noreply@github.com |
82e33027da58a104bd86d11192b73a336f7ad7f2 | 3c099a78896ca4b775d28fccf38c2bfdf6a1a555 | /CorePython/DataTypes/List/RotateString.py | 70094c27d723b5432c93d357b08a84132027dd71 | [] | no_license | anmolparida/selenium_python | db21215837592dbafca5cced7aecb1421395ed41 | 78aec8bf34d53b19fb723a124ad13342c6ce641c | refs/heads/master | 2022-12-03T23:52:32.848674 | 2020-08-30T19:26:30 | 2020-08-30T19:26:30 | 282,207,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py |
def rotate_string(vStr, step):
print(vStr[step:] + vStr[:step])
rotate_string('abcdef', 3)
rotate_string('123456', 1)
| [
"anmolparida@gmail.com"
] | anmolparida@gmail.com |
1d62494ced0747adf81d6714036dc881f234fbbb | 9df0c1c31db47e31703b316ac71cdebbe1934668 | /chapter/chapter8.py | 54d2f8bb3483cb94bbe078a3518ab604ae3f2a3b | [] | no_license | Parkduksung/study-python | 61f7d463e06eeb9a6771ea2f6521cb6b58e14f53 | b642df368931c8ae985795b791106b77f296871a | refs/heads/main | 2023-02-21T08:23:04.888549 | 2021-01-25T11:47:43 | 2021-01-25T11:47:43 | 320,812,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,663 | py | #8-1
def create_student(name, korean, math, english, science) :
return {
"name" : name,
"korean" : korean,
"math" : math,
"english" : english,
"science" : science
}
students = [
create_student("1",1,1,1,1),
create_student("2",2,2,2,2),
create_student("3",3,3,3,3),
create_student("4",4,4,4,4)
]
# print("이름","총점","평균",sep="\t")
import numpy as average
# 한번에 print 하기.. 별로 안이쁘네...
for student in students :
print(student["name"],
sum([student[i] for i in student if type(student[i]) == int]),
average.array([student[i] for i in student if type(student[i]) == int]).mean()
)
#여기서 __init__ 은 한개밖에 안됨.
class Student :
def __init__(self,name,korean,math,english,science) :
self.name = name
self.korean = korean
self.math = math
self.english = english
self.science = science
def sum(self) :
return self.korean + self.math + self.english + self.science
def average(self) :
return average.array([self.korean,self.math,self.english,self.science]).mean()
student1 = [
Student("1",1,1,1,1),
Student("3",3,3,3,3),
Student("5",5,5,5,5),
Student("7",7,7,7,7)
]
for student in student1 :
print(student.name, student.sum(), student.average())
# print(student.name,
# sum([i for i in student if type(i) == int]),
# average.array([i for i in student if type(i) == int]).mean()
# )
#8-2
#인스턴스가 어떤 클래스로 만들어졌는지 확인가능 파이썬은.
class Student2 :
def __init__(self) :
pass
student2 = Student2()
print(isinstance(student2,Student2))
print(isinstance(student1[0],Student))
# 단순한 인스턴스 확인
print(type(student2)==Student2())
# 만약에 클래스의 변수가 접근한정자가 private 으로 하고 싶으면 앞에다 __ 두개만 넣으면 된다.
# getter-setter
# private 한 radius 를 간접적으로 접근.
# 여기서 안전하게 사용하려면 먼가 setter 할때의 value 에 대한 조건문을 걸고 그 조건문에 해당되지 않으면 raise 해서 오류 발생시키면된다.
def get_radius(self):
return self.__radius
def set_radius(self, value):
self.__radius = value
#상속
class Parent:
def __init__(self):
self.value = "테스트"
print("__init__ 호출")
def test(self):
print("test()")
class Child(Parent):
def __init__(self):
Parent.__init__(self)
print("Child init")
child = Child()
child.test()
print(child.value)
| [
"duksung1234@naver.com"
] | duksung1234@naver.com |
d6dc2df3404943b96f01f73ff3255ecac887bc85 | 7ac1077baaf848a4bce7c54edf0719950bf5d1fb | /contact.py | 3d89f9754aae1f03b0e7cb64ac20af44f7da04e2 | [] | no_license | quanted/pisces_app | c78a86ccd465cd5a66c485f0d688e1f5cac8e32d | a04bb599cf54054a17015ce33aa43533ac9f08e4 | refs/heads/dev | 2023-08-31T12:36:35.738980 | 2022-04-05T13:39:29 | 2022-04-05T13:39:29 | 78,229,986 | 0 | 0 | null | 2020-04-03T13:42:31 | 2017-01-06T18:44:25 | Python | UTF-8 | Python | false | false | 5,479 | py | from django.template.loader import render_to_string
from django.http import HttpResponse, HttpResponseRedirect
import pisces_app.links_left as links_left
import sqlite3
import os
import logging
import datetime
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
db_name = "pisces_comments.sqlite3"
def create_connection():
""" create a database connection to the SQLite database
specified by db_file
:return: Connection object or None
"""
conn = None
try:
conn = sqlite3.connect(db_name)
return conn
except sqlite3.Error as e:
print(e)
return conn
def create_tables(conn, create_sql):
""" create a table from the create_table_sql statement
:param conn: Connection object
:param create_sql: a CREATE TABLE statement
:return:
"""
try:
c = conn.cursor()
c.execute(create_sql)
except sqlite3.Error as e:
print(e)
def add_comment(name, email, comment):
"""
:param name:
:param email:
:param comment:
:return:
"""
conn = create_connection()
sql_create_table = 'CREATE TABLE IF NOT EXISTS comments (name text NOT NULL, email text NOT NULL, comment text, timestamp text)'
create_tables(conn, sql_create_table)
new_comment = 'INSERT INTO comments VALUES (?,?,?,?)'
try:
parameters = (name, email, comment, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
cur = conn.cursor()
cur.execute(new_comment, parameters)
conn.commit()
conn.close()
return True
except sqlite3.Error:
return False
def handle_contact_post(request):
"""
:param request:
:return:
"""
post_data = request.POST
name = request.POST.get('name', "none") or "none" # additional or accounts for blank string
from_email = request.POST.get('email', "none") or "none"
comment = request.POST.get('comment')
try:
add_comment(name, from_email, comment)
except Exception as e:
logging.warning("Exception occurred handling contact submission: {}".format(e))
return
return contacts_submission_view(request)
def contacts_submission_view(request):
"""
Page that displays after an email has been sent by
the user on the contacts page.
:param request:
:request:
"""
page_title = "Piscine Stream Community Estimation System"
keywords = "PiSCES, Piscine Stream Community Estimation System, EPA"
imports = render_to_string('hms_default_imports.html')
disclaimer_file = open(os.path.join(os.environ['PROJECT_PATH'], 'hms_app/views/disclaimer.txt'), 'r')
disclaimer_text = disclaimer_file.read()
notpublic = True
html = render_to_string('01epa18_default_header.html', {
'TITLE': page_title,
'URL': str(request.get_host) + request.path,
'KEYWORDS': keywords,
'IMPORTS': imports,
'NOTPUBLIC': False,
'DISCLAIMER': None
}) # Default EPA header
html += links_left.ordered_list(model='pisces')
html += render_to_string('05hms_body_start.html', {
'TITLE': "Thank you for your comments!",
'DESCRIPTION': """An email has been sent to the PiSCES team.<br>
If a return email address was provided, we'll get back to you as soon as possible.<br><br>
<!--Return to <a href="/pisces">homepage</a>.-->
<form action="/pisces" method="get">
<input type="submit" value="Go back PiSCES homepage" />
</form>
"""
}) # HMS Workflow main body start
html += render_to_string('06hms_body_end.html') # HMS Workflow main body end
html += render_to_string('07hms_splashscripts.html') # EPA splashscripts import
html += render_to_string('10epa_drupal_footer.html') # Default EPA footer
response = HttpResponse()
response.write(html)
return response
def contact_page(request):
"""
:param request:
:return:
"""
page_title = "Piscine Stream Community Estimation System"
keywords = "PiSCES, Piscine Stream Community Estimation System, EPA"
imports = render_to_string('hms_default_imports.html')
html = render_to_string('01epa18_default_header.html', {
'TITLE': page_title,
'URL': str(request.get_host) + request.path,
'KEYWORDS': keywords,
'IMPORTS': imports,
'NOTPUBLIC': False,
'DISCLAIMER': None
}) # Default EPA header
html += links_left.ordered_list(model='pisces')
page_text = render_to_string("04pisces_contact_body.html", {}, request=request)
html += render_to_string('05pisces_body_start.html', {
'TITLE': "PiSCES Contact Us",
'DESCRIPTION': page_text
}) # HMS Workflow main body start
html += render_to_string('06hms_body_end.html') # HMS Workflow main body end
html += render_to_string('07hms_splashscripts.html') # EPA splashscripts import
html += render_to_string('10epa_drupal_footer.html') # Default EPA footer
response = HttpResponse()
response.write(html)
return response
| [
"deron.brock@gmail.com"
] | deron.brock@gmail.com |
5be977d0c32e84398780b29822f3d723b46e0eb8 | 40a73c565b602bce4a2bf0117c80fdb3ce9aeaee | /MisPerris/urls.py | cf32d713c9e54a3424da1bf296ea49207a5d8cdb | [] | no_license | bastianGV23/Social_Perris | 9fb26976245475f0c0648b519fa4d7a0c4b9bf3f | 5e22f115430279986fb185a65689cb449bee8e71 | refs/heads/master | 2020-04-06T23:22:46.615819 | 2018-11-16T13:50:28 | 2018-11-16T13:50:28 | 157,866,893 | 0 | 0 | null | 2018-11-16T13:50:49 | 2018-11-16T12:49:32 | Python | UTF-8 | Python | false | false | 1,495 | py | """MisPerris URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from apps.formulario.views import formulario_view
from apps.principal.views import IndexView,LogOut
from django.contrib.auth.views import LoginView, LogoutView
from django.conf import settings
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', IndexView.as_view(), name='index'),
url(r'^formulario/', formulario_view, name='contacto'),
url(r'^rescate/', include(('apps.rescate.urls','rescate'),namespace='rescate')),
url(r'^adopcion/', include(('apps.adopcion.urls','adopcion'),namespace='adopcion')),
url('', include('social.apps.django_app.urls', namespace='social')),
url(r'^salir/$', LogOut),
]
if settings.DEBUG:
from django.conf.urls.static import static
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"bastian.gv@outlook.com"
] | bastian.gv@outlook.com |
bbd0f23bcda4053e645882ffa2b31ede505b6e3d | f789f5e6d1e5c84dde4379dc59b39f279e2c6253 | /Unique Binary Search Trees/Accepted-6612953.py | 70c36c7afcd1fb6e9140446a6631975a37284a07 | [] | no_license | SakuraSa/MyLeetcodeSubmissions | ce46fe195742e43ab48444c953c10b68a7192fcd | 6c0952867bafe69e70ad08fbae088da995396e3b | refs/heads/master | 2016-09-03T06:30:02.792302 | 2014-11-13T09:17:11 | 2014-11-13T09:17:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | #Author : sakura_kyon@hotmail.com
#Question : Unique Binary Search Trees
#Link : https://oj.leetcode.com/problems/unique-binary-search-trees/
#Language : python
#Status : Accepted
#Run Time : 152 ms
#Description:
#Given n, how many structurally unique ###BST's### (binary search trees) that store values 1...n?
#For example,
#Given n = 3, there are a total of 5 unique BST's.
#```
# 1 3 3 2 1
# \ / / / \ \
# 3 2 1 1 3 2
# / / \ \
# 2 1 2 3
#```
#Code :
class Solution:
def __init__(self):
self.mem = {0:1, 1:1}
# @return an integer
def numTrees(self, n):
if n in self.mem:
return self.mem[n]
ret = 0
for i in range(n):
ret += self.numTrees(i) * self.numTrees(n - i - 1)
self.mem[n] = ret
return ret | [
"sakura_kyon@hotmail.com"
] | sakura_kyon@hotmail.com |
f00d10e19e12b9f68c9c87f8b7226e5333df3cd3 | 828ed7a5c35ce716e586b8b85ec2e882288a7eae | /tvdb_client/tests/client.py | d3fe898acc1ca43e15de8675814b64bdbd282506 | [
"Apache-2.0"
] | permissive | miigotu/tvdb_client | 9db857c25dfc9f2effe1bd47ff372fee0cb50fa0 | 9f7d90f987a9d1fb71c3a9aa47db7d1e5984813a | refs/heads/master | 2020-05-25T15:44:16.019858 | 2016-08-01T14:53:37 | 2016-08-01T14:53:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,061 | py | from unittest import TestCase
from tvdb_client.clients import ApiV1Client, ApiV2Client
from tvdb_client.exceptions import UserNotLoggedInException
__author__ = 'tsantana'
class LoginTestCase(TestCase):
def runTest(self):
pass
def test_001_01_api_v1_login_success(self):
pass
def test_001_02_api_v2_login_success(self):
api = ApiV2Client('thilux', '463B5371A1FCB382', 'F40C8DCCA265D3F3')
api.login()
self.assertTrue(api.is_authenticated)
def test_002_01_api_v1_login_invalid_api_key(self):
pass
def test_002_02_api_v2_login_invalid_api_key(self):
api = ApiV2Client('thilux', 'XXXXXXXXXXX', 'F40C8DCCA265D3F3')
api.login()
self.assertFalse(api.is_authenticated)
def test_003_01_api_v1_login_invalid_account_identifier(self):
pass
def test_003_02_api_v2_login_invalid_account_identifier(self):
api = ApiV2Client('thilux', '463B5371A1FCB382', 'XXIHWDIHWIHIE')
api.login()
self.assertFalse(api.is_authenticated)
def test_004_01_api_v1_login_invalid_username(self):
pass
def test_004_02_api_v2_login_invalid_username(self):
api = ApiV2Client('shambalalalallala', '463B5371A1FCB382', 'F40C8DCCA265D3F3')
api.login()
self.assertFalse(api.is_authenticated)
class SearchTestCase(TestCase):
def runTest(self):
pass
def test_001_01_api_v1_search_series_single_letter(self):
pass
def test_001_02_api_v2_search_series_single_letter(self):
api = ApiV2Client('thilux', '463B5371A1FCB382', 'F40C8DCCA265D3F3')
api.login()
resp = api.search_series(name='a')
self.assertIsNotNone(resp)
self.assertIn('data', resp)
self.assertGreater(len(resp['data']), 0)
def test_002_01_api_v1_search_series_single_show(self):
pass
def test_002_02_api_v2_search_series_single_show(self):
api = ApiV2Client('thilux', '463B5371A1FCB382', 'F40C8DCCA265D3F3', 'en')
api.login()
resp = api.search_series(name='Fear the walking dead')
self.assertIsNotNone(resp)
self.assertIn('data', resp)
self.assertEqual(len(resp['data']), 1)
def test_003_01_api_v1_search_series_nonexistent(self):
pass
def test_003_02_api_v1_search_series_nonexistent(self):
api = ApiV2Client('thilux', '463B5371A1FCB382', 'F40C8DCCA265D3F3', 'en')
api.login()
resp = api.search_series(name='Dosh Dosh Stoy Stoy Stoy')
self.assertIsNotNone(resp)
self.assertNotIn('data', resp)
self.assertEqual(api.__class__.__name__, resp['client_class'])
self.assertEqual(404, resp['code'])
def test_004_01_api_v1_search_series_not_logged_in(self):
pass
def test_004_02_api_v2_search_series_not_logged_id(self):
api = ApiV2Client('thilux', '463B5371A1FCB382', 'F40C8DCCA265D3F3', 'en')
self.assertRaises(UserNotLoggedInException, callableObj=api.search_series, name='Fear the walking dead')
| [
"tlsantana.un@gmail.com"
] | tlsantana.un@gmail.com |
a552bad7899811fcddc760b313e1d22e5f71cf6e | a44c4a543bd9b21f39526fbb5fd7f7b7162a525a | /catch2js.py | 5d77af36fdbd11690d2274f830d520415cd8a447 | [] | no_license | castro-miguel-1993/catch2js | 4dc30d595b7bb59d741c96fcb3ae97925889a766 | ac79b78a460be712cb3759a4a3f3b7f34ef33e7a | refs/heads/master | 2022-07-19T09:02:34.179648 | 2020-05-14T03:40:44 | 2020-05-14T03:40:44 | 263,801,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | import urllib.request
from urllib.request import urlopen, Request
import os
import sys
from bs4 import BeautifulSoup
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.3'}
try:
req = Request(url=sys.argv[1], headers=headers)
datos = urllib.request.urlopen(req).read().decode()
file = open("./"+sys.argv[2]+".txt", "w")
file.write("FILES .JS OF SITE: "+sys.argv[1]+ os.linesep+ os.linesep)
soup = BeautifulSoup(datos,features="html.parser")
tags = soup('script')
for tag in tags:
if tag.get('src'):
file.write(tag.get('src') + os.linesep)
print("Successfully completed")
file.close()
except:
print("ERROR: can't get files, check its parameters or contact the developer.")
| [
"noreply@github.com"
] | castro-miguel-1993.noreply@github.com |
f10d66837b8f7ebee62e48a4756ae0c6ec88875e | 53564ee0c9552e3ad4368fef9e2e2a65ac1b2952 | /scripts/distribui_palestras_es.py | 9cf8b68050f0c3d80e3ef4a74d5ea07bf79f15a1 | [] | no_license | alynnefs/pybr2021-org | 899a37d2cc2afec74d07c63a468097cada765ba5 | 81365949dcc6524f07f2d23fead4722ed8029c25 | refs/heads/main | 2023-08-27T12:44:41.632675 | 2021-10-14T23:52:54 | 2021-10-14T23:52:54 | 372,657,470 | 0 | 0 | null | 2021-06-01T00:15:24 | 2021-06-01T00:15:24 | null | UTF-8 | Python | false | false | 2,122 | py | import pandas as pd
def trata_entrada():
entrada = pd.read_excel("entrada-revisada.xlsx")
entrada = entrada[entrada[entrada.columns[1]]=="Español"].copy()
entrada = entrada.drop(columns=entrada.columns.values[:52])
entrada[entrada.columns[1]]=entrada[entrada.columns[1]].apply(lambda x : x.strip())
entrada = entrada.drop(columns=entrada.columns.values[2:20])
entrada = entrada.drop(columns=entrada.columns.values[3:6])
entrada = entrada.drop(columns=entrada.columns.values[7:10])
entrada = entrada.sort_values(by=[entrada.columns[1]], ignore_index=True)
entrada = entrada.reset_index()
entrada.to_excel("teste-saida.xlsx")
return entrada
def distribui_palestras():
""" Para cada posição na lista de entrada, ele atribuirá 3 palestras.
% len(entrada) é utilizado para atribuir as palestras circularmente."""
entrada = trata_entrada()
entrada["avaliador1"] = entrada.apply(lambda x: entrada[entrada.columns[1]].iloc[(x.name+(3*1))%len(entrada)], axis=1)
entrada["avaliador2"] = entrada.apply(lambda x: entrada[entrada.columns[1]].iloc[(x.name+(3*2))%len(entrada)], axis=1)
entrada["avaliador3"] = entrada.apply(lambda x: entrada[entrada.columns[1]].iloc[(x.name+(3*3))%len(entrada)], axis=1)
entrada["email1"] = entrada.apply(lambda x: entrada[entrada.columns[2]].iloc[(x.name+(3*1))%len(entrada)], axis=1)
entrada["email2"] = entrada.apply(lambda x: entrada[entrada.columns[2]].iloc[(x.name+(3*2))%len(entrada)], axis=1)
entrada["email3"] = entrada.apply(lambda x: entrada[entrada.columns[2]].iloc[(x.name+(3*3))%len(entrada)], axis=1)
aux1=entrada.copy().drop(columns=["email2","email3","avaliador2","avaliador3"]).rename(columns={"email1":"email","avaliador1":"avaliador"})
aux2=entrada.copy().drop(columns=["email1","email3","avaliador1","avaliador3"]).rename(columns={"email2":"email","avaliador2":"avaliador"})
aux3=entrada.copy().drop(columns=["email1","email2","avaliador1","avaliador2"]).rename(columns={"email3":"email","avaliador3":"avaliador"})
saida = pd.concat([aux1, aux2, aux3],ignore_index=True)
saida.to_excel("teste-saida.xlsx")
return saida
print(distribui_palestras())
| [
"izabela.cardoso.tw@naturapay.net"
] | izabela.cardoso.tw@naturapay.net |
01a67e7aa5fdbd2999b05e02fd301afcbe31c173 | 26e91aead18d0fad6f5ce8fc4adf7d8e05a2f07f | /byceps/events/snippet.py | 36bdd9c77f6f317d8d47c3faf09d1487650be7fa | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | leathe/byceps | 40c1f8a1aab3521fcac45d88eab6364d448d4e67 | cd0c618af63fed1cd7006bb67da46eac0ddbb1c7 | refs/heads/master | 2020-12-02T09:02:51.087511 | 2019-12-14T17:00:22 | 2019-12-14T17:00:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | """
byceps.events.snippet
~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from dataclasses import dataclass
from ..services.snippet.transfer.models import SnippetVersionID
from .base import _BaseEvent
@dataclass(frozen=True)
class _SnippetEvent(_BaseEvent):
snippet_version_id: SnippetVersionID
@dataclass(frozen=True)
class SnippetCreated(_SnippetEvent):
pass
@dataclass(frozen=True)
class SnippetUpdated(_SnippetEvent):
pass
| [
"homework@nwsnet.de"
] | homework@nwsnet.de |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.