blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cc0b891a842f0d95cbaa4fc90d61413db3782199 | 85c426913d63773c4802a4a3c354df909030654b | /python/FA3/Integration/Copy of Copy of PyFood me and rohita/Copy of Copy of PyFood me and rohita/validations/Validate.py | 02ad4496540e48daae0060d8a176a66e271c35b7 | [] | no_license | SensehacK/playgrounds | 17bf2a3133db6c0cafe185c4cc2c7b59862980aa | 3decd550cdb6034db8b497051acaaec8221073aa | refs/heads/master | 2023-05-11T20:05:31.680168 | 2023-04-30T00:01:58 | 2023-04-30T00:01:58 | 159,632,542 | 1 | 0 | null | 2023-03-05T11:34:34 | 2018-11-29T08:27:53 | Python | UTF-8 | Python | false | false | 5,566 | py | '''
Created on Mar 15, 2017
@author: kautilya.save
'''
from database import ViewDB,searchdb
from exceptions import CustomException2
def validate_search_category(city,area):
list_of_search_categories=searchdb.search_as_a_guest(city,area)
if(len(list_of_search_categories)==0):
raise CustomException2.Invalidcityareaname()
return list_of_search_categories
def validate_search_as_rating(city,area,rating_lower,rating_upper):
list_of_search_categories=searchdb.search_as_rating(city,area,rating_lower,rating_upper)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
return list_of_search_categories
def validate_search_as_likes(city,area):
list_of_search_categories=searchdb.search_as_likes(city,area)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
def validate_search_as_dislikes(city,area):
list_of_search_categories=searchdb.search_as_dislikes(city,area)
if(len(list_of_search_categories)==0):
raise CustomException2.Invalidfilter()
return list_of_search_categories
def validate_search_as_type(city,area,var1):
list_of_search_categories=searchdb.search_as_type(city,area,var1)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
return list_of_search_categories
def validate_search_as_rating_dislikes(city,area,rating_lower,rating_upper):
list_of_search_categories=searchdb.search_as_rating_dislikes(city,area,rating_lower,rating_upper)
if(len(list_of_search_categories)==0):
raise CustomException2.Invalidfilter()
return list_of_search_categories
def validate_search_as_rating_likes(city,area,rating_lower,rating_upper):
list_of_search_categories=searchdb.search_as_rating_likes(city,area,rating_lower,rating_upper)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
return list_of_search_categories
def validate_search_as_rating_type(city,area,rating_lower,rating_upper,var):
list_of_search_categories=searchdb.search_as_rating_type(city,area,rating_lower,rating_upper,var)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
return list_of_search_categories
def validate_search_as_dislike_like(city,area):
list_of_search_categories=searchdb.search_as_dislike_like(city,area)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
return list_of_search_categories
def validate_search_as_dislike_type(city,area,var1):
list_of_search_categories=searchdb.search_as_dislike_type(city,area,var1)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
return list_of_search_categories
def validate_search_as_like_type(city,area,var1):
list_of_search_categories=searchdb.search_as_like_type(city,area,var1)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
return list_of_search_categories
def validate_search_as_rating_dislike_like(city,area,rating_lower,rating_upper):
list_of_search_categories=searchdb.search_as_rating_dislike_like(city,area,rating_lower,rating_upper)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
return list_of_search_categories
def validate_search_as_rating_dislike_type(city,area,rating_lower,rating_upper,var1):
list_of_search_categories=searchdb.search_as_rating_dislike_type(city,area,rating_lower,rating_upper,var1)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
return list_of_search_categories
def validate_search_as_dislike_like_type(city,area,var1):
list_of_search_categories=searchdb.search_as_dislike_like_type(city,area,var1)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
return list_of_search_categories
def validate_search_as_like_type_rating(city,area,rating_lower,rating_upper,var1):
list_of_search_categories=searchdb.search_as_like_type_rating(city,area,rating_lower,rating_upper,var1)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
return list_of_search_categories
def validate_search_as_all(city,area,rating_lower,rating_upper,var1):
list_of_search_categories=searchdb.search_as_all(city,area,rating_lower,rating_upper,var1)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
return list_of_search_categories
def validate_hotel_name(city,area,restaurant_name):
list_of_search_categories=searchdb.hotel_name(city,area,restaurant_name)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
return list_of_search_categories
def validate_view_category(restaurant_type):
list_of_restaurant_categories=ViewDB.get_restaurant_categories(restaurant_type)
if(len(list_of_restaurant_categories)==0):
raise CustomException2 .InvalidCategoryException()
return list_of_restaurant_categories
def validate_view_category_items(category):
list_of_restaurant_categories_items=ViewDB.get_categories_fooditems(category)
if(len(list_of_restaurant_categories_items)==0):
raise CustomException2 .InvalidCatItemsException
return list_of_restaurant_categories_items
| [
"kautilyasave@gmail.com"
] | kautilyasave@gmail.com |
58cff43dd9b00a860369424cd66fd9750167eee5 | 3712a929d1124f514ea7af1ac0d4a1de03bb6773 | /开班笔记/python数据分析机器学习部分/机器学习/day06/tf.py | c6bbc842ae1c6bfc7f9cff4b546a67aee8307fc8 | [] | no_license | jiyabing/learning | abd82aa3fd37310b4a98b11ea802c5b0e37b7ad9 | 6059006b0f86aee9a74cfc116d2284eb44173f41 | refs/heads/master | 2020-04-02T20:47:33.025331 | 2018-10-26T05:46:10 | 2018-10-26T05:46:10 | 154,779,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import nltk.tokenize as tk
import sklearn.feature_extraction.text as ft
import sklearn.preprocessing as sp
doc = 'The brown dog is running. ' \
'The black dog is in the black room. ' \
'Running in the room is forbidden.'
print(doc)
sentences = tk.sent_tokenize(doc)
for i, sentence in enumerate(sentences):
print(i + 1, sentence)
cv = ft.CountVectorizer()
bow = cv.fit_transform(sentences).toarray()
print(bow)
words = cv.get_feature_names()
print(words)
tf = sp.normalize(bow, norm='l1')
print(tf)
| [
"yabing_ji@163.com"
] | yabing_ji@163.com |
b093c8113f7bbff2923760d0f934c28f35a0c438 | 244e751aa882c6df1abb04db8a4de70a0e804ece | /Lesson 8/01 - Visualization 1.py | b8d7326fd3af2964c0f679a09c10a1aa3ab5f3de | [] | no_license | Mostafa-At-GitHub/Intro-to-Data-Science--Udacity | 0c0656234a26edee84b430745302f330b8857885 | 6519e60eb23df6568d64f47cfe7d8600acb8e933 | refs/heads/master | 2021-09-01T00:40:13.821220 | 2017-12-23T20:07:47 | 2017-12-23T20:07:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,793 | py | import pandas as pd
from ggplot import *
def plot_weather_data(turnstile_weather):
'''
You are passed in a dataframe called turnstile_weather.
Use turnstile_weather along with ggplot to make a data visualization
focused on the MTA and weather data we used in assignment #3.
You should feel free to implement something that we discussed in class
(e.g., scatterplots, line plots, or histograms) or attempt to implement
something more advanced if you'd like.
Here are some suggestions for things to investigate and illustrate:
* Ridership by time of day or day of week
* How ridership varies based on Subway station (UNIT)
* Which stations have more exits or entries at different times of day
(You can use UNIT as a proxy for subway station.)
If you'd like to learn more about ggplot and its capabilities, take
a look at the documentation at:
https://pypi.python.org/pypi/ggplot/
You can check out:
https://s3.amazonaws.com/content.udacity-data.com/courses/ud359/turnstile_data_master_with_weather.csv
To see all the columns and data points included in the turnstile_weather
dataframe. However, due to the limitation of our Amazon EC2 server, we are giving you a random
subset, about 1/3 of the actual data in the turnstile_weather dataframe.
'''
temp_df = turnstile_weather[['rain','ENTRIESn_hourly', 'EXITSn_hourly']]
df = temp_df.groupby("rain", as_index=False).sum()
df["rain"][0] = "No Rain"
df["rain"][1] = "Rain"
print df
plot = ggplot(df, aes(x="rain", weight="ENTRIESn_hourly"))
plot += ggtitle("Density per Station")
plot += geom_bar(stat="identity")
plot += xlab('Station')
plot += ylab("Denisty per day")
return plot
| [
"mohamedanwarvic@gmail.com"
] | mohamedanwarvic@gmail.com |
51bafdf4e50b66a4ee9225e493ae81aad6aa8505 | c70ac4c4f159bf9d0d06870a8975143885b067de | /it_courses/wsgi.py | 7849650c3d17ba2337b969d120fbc654eb8a8652 | [] | no_license | assigdev/it_courses | b494856e43d02d4ba78018635c3b9fefb48c8522 | a2e80fdac0e0e8d43c0b32b7b00cf3ea3c8f7c26 | refs/heads/master | 2020-03-06T18:24:38.075774 | 2018-05-07T21:33:48 | 2018-05-07T21:33:48 | 127,006,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
WSGI config for it_courses project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "it_courses.settings")
application = get_wsgi_application()
| [
"assigdev@gmail.com"
] | assigdev@gmail.com |
2e829c6947f7da5b59fd8d9fd9451aeeaf9e4329 | 917a27a988bd9617115368f549e26b8a265bf7f9 | /fb1/migrations/0023_auto_20200805_1246.py | 72f2afc5826d7dc5e6d2b996922193d2a812a4d3 | [] | no_license | anand0101/FbPost | 0a33ee710120d4c4b753aa87b04a9166605653fe | c649a44070572b286412369d4975cca82c55cdd8 | refs/heads/master | 2022-11-28T08:01:29.967598 | 2020-08-06T12:21:28 | 2020-08-06T12:21:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,344 | py | # Generated by Django 3.0.6 on 2020-08-05 07:16
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fb1', '0022_auto_20200804_0616'),
]
operations = [
migrations.CreateModel(
name='Newsdata',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('imagelink', models.CharField(blank=True, max_length=422)),
('heading', models.CharField(blank=True, max_length=442)),
('body', models.TextField(blank=True)),
('date', models.DateTimeField(blank=True)),
],
),
migrations.AlterField(
model_name='imagepost',
name='date',
field=models.DateTimeField(default=datetime.datetime(2020, 8, 5, 12, 46, 30, 706606)),
),
migrations.AlterField(
model_name='postcomment',
name='date',
field=models.DateTimeField(default=datetime.datetime(2020, 8, 5, 12, 46, 30, 712607)),
),
migrations.AlterField(
model_name='videopost',
name='date',
field=models.DateTimeField(default=datetime.datetime(2020, 8, 5, 12, 46, 30, 709606)),
),
]
| [
"ansarihasnain3598@gmail.com"
] | ansarihasnain3598@gmail.com |
e29d72cdd89552c4900a47d83ec088569c38a8d6 | 0f5f6ff75cef3e81419216ba0191bb69a26c9068 | /aws/debugging.py | b4f37180a4ccbd3532904ea44281823bfc7115e0 | [] | no_license | daddyawesome/CodingP | 1c7bbb2edd30333f7cb1b78ec6a0428854c4fa2b | 66ab4e452c23f3f770d6ad1e32f604c65e1dcbd3 | refs/heads/master | 2022-10-13T18:36:23.068195 | 2022-10-04T07:01:58 | 2022-10-04T07:01:58 | 220,047,911 | 0 | 0 | null | 2020-07-07T20:49:07 | 2019-11-06T17:01:44 | Python | UTF-8 | Python | false | false | 430 | py | # Ask the user for a value and confirm the supplied value is greater than 0
def checkvalue(valuetocheck):
assert (type(valuetocheck) is int), "You must enter a number."
assert (valuetocheck > 0), "Value entered must be greater than 0"
if valuetocheck > 4:
print("Value is greater than 4")
else:
print("Value is lesser than 4")
var = int(input("Enter a number greater than 0: "))
checkvalue(var) | [
"sablay296@gmail.com"
] | sablay296@gmail.com |
f2740e448fe9f797da84617de0a145d889873a4b | 197ad5eecd8d5fb46e75dff67bab3be96dd961b0 | /graphene_mongoengine/fields.py | 3865af262e84bcb967d8544f664515fb44cf19c2 | [] | no_license | tomasgarzon/graphene-mongoengine | c18a5b51e411e905a8890bdc542898673d0280a7 | ecc2116739f56d065c07024c3082958f490307f5 | refs/heads/master | 2021-01-20T09:20:10.272424 | 2017-05-04T08:25:44 | 2017-05-04T08:25:44 | 90,237,184 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,272 | py | from functools import partial
from django.db.models.query import QuerySet
from graphene.types import Field, List
from graphene.relay import ConnectionField, PageInfo
from graphql_relay.connection.arrayconnection import connection_from_list_slice
from graphene_django.utils import maybe_queryset
from .utils import get_type_for_document
class MongoEngineListField(Field):
def __init__(self, _type, *args, **kwargs):
super(MongoEngineListField, self).__init__(List(_type), *args, **kwargs)
@property
def model(self):
return self.type.of_type._meta.node._meta.document
@staticmethod
def list_resolver(resolver, root, args, context, info):
return maybe_queryset(resolver(root, args, context, info))
def get_resolver(self, parent_resolver):
return partial(self.list_resolver, parent_resolver)
class MongoEngineConnectionField(ConnectionField):
def __init__(self, *args, **kwargs):
self.on = kwargs.pop('on', False)
super(MongoEngineConnectionField, self).__init__(*args, **kwargs)
@property
def model(self):
return self.type._meta.node._meta.document
def get_manager(self):
if self.on:
return getattr(self.model, self.on)
else:
return None
@staticmethod
def connection_resolver(resolver, connection, default_manager, root, args, context, info):
iterable = resolver(root, args, context, info)
if iterable is None:
iterable = default_manager
iterable = maybe_queryset(iterable)
if isinstance(iterable, QuerySet):
_len = iterable.count()
else:
_len = len(iterable)
connection = connection_from_list_slice(
iterable,
args,
slice_start=0,
list_length=_len,
list_slice_length=_len,
connection_type=connection,
edge_type=connection.Edge,
pageinfo_type=PageInfo,
)
connection.iterable = iterable
connection.length = _len
return connection
def get_resolver(self, parent_resolver):
return partial(self.connection_resolver, parent_resolver, self.type, self.get_manager())
class MongoEngineDocumentField(Field):
def __init__(self, document, *args, **kwargs):
self.document = document
super(MongoEngineDocumentField, self).__init__(*args, **kwargs)
def internal_type(self, schema):
_type = self.get_object_type(schema)
if not _type and self.parent._meta.only_fields:
raise Exception(
"Collection %r is not accessible by the schema. "
"You can either register the type manually "
"using @schema.register. "
"Or disable the field in %s" % (
self.document,
self.parent,
)
)
return schema.T(_type)
def get_object_type(self, schema):
return get_type_for_document(schema, self.document)
@property
def List(self):
return List(self, *self.args, **self.kwargs)
def get_connection_field(*args, **kwargs):
return MongoEngineConnectionField(*args, **kwargs)
| [
"tomasgarzonhervas@gmail.com"
] | tomasgarzonhervas@gmail.com |
fb81536c36d1bcf3197847f734633a588c8236d2 | 84db91ca8e14687251eca5d4ffe1a50fde89e4d9 | /parsbot/chat/migrations/0001_initial.py | 449f59a6375bd31f545fba965f0a53fd9338819d | [
"MIT"
] | permissive | aodarc/project009 | 426fba89f7978dec5605eadf5b72932f8866201d | 3fcd795cd936223442cf09a0a3494fc0fb54ceb3 | refs/heads/master | 2020-03-28T14:33:40.728781 | 2018-09-24T20:12:15 | 2018-09-24T20:12:15 | 148,499,017 | 0 | 1 | MIT | 2018-09-24T15:52:58 | 2018-09-12T15:08:02 | Python | UTF-8 | Python | false | false | 1,583 | py | # Generated by Django 2.1.1 on 2018-09-24 17:06
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_at', models.DateTimeField(auto_created=True)),
('modified_at', models.DateTimeField(auto_now_add=True)),
('title', models.CharField(max_length=255, verbose_name='Products')),
('url', models.URLField(verbose_name='Product URL')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ProductHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_at', models.DateTimeField(auto_created=True)),
('price', models.DecimalField(decimal_places=2, max_digits=19)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='history', to='chat.Product')),
],
),
]
| [
"odarchenko@ex.ua"
] | odarchenko@ex.ua |
72a8fb5dde9f4d106f9351a271aa41bde1edf893 | 97062249c6eb04069c6fb01e71d06bc334c828e1 | /apps/sqoop/src/sqoop/api/submission.py | 44009836ab097b7541ac0a0f966c8dd8f1f024fe | [
"Apache-2.0"
] | permissive | Albertsss/hue | 1c8b31c64cc420a029f5b5b80712fb3d0c6cbd6e | 454d320dd09b6f7946f3cc05bc97c3e2ca6cd485 | refs/heads/master | 2021-07-08T17:21:13.237871 | 2018-05-30T06:03:21 | 2018-05-30T06:03:21 | 135,386,450 | 0 | 1 | Apache-2.0 | 2020-07-25T13:36:58 | 2018-05-30T04:06:18 | Python | UTF-8 | Python | false | false | 2,077 | py | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import socket
from django.utils.translation import ugettext as _
from sqoop import client, conf
from decorators import get_submission_or_exception
from desktop.lib.django_util import JsonResponse
from desktop.lib.exceptions import StructuredException
from desktop.lib.rest.http_client import RestException
from exception import handle_rest_exception
from utils import list_to_dict
from django.views.decorators.cache import never_cache
__all__ = ['get_submissions', 'submissions']
LOG = logging.getLogger(__name__)
@never_cache
def get_submissions(request):
response = {
'status': 0,
'errors': None,
'submissions': []
}
status = request.GET.get('status', 'submissions').split(',')
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE)
submissions = c.get_submissions()
response['submissions'] = list_to_dict(submissions)
except RestException, e:
response.update(handle_rest_exception(e, _('Could not get submissions.')))
return JsonResponse(response)
@never_cache
def submissions(request):
if request.method == 'GET':
return get_submissions(request)
else:
raise StructuredException(code="INVALID_METHOD", message=_('GET request required.'), error_code=405)
| [
"540227148@qq.com"
] | 540227148@qq.com |
43e61b6c59561aa8e5347f9f382de8b3bafcb311 | c46becf6497484e4f0a904ad0104a3e971982481 | /upbit.py | 9cd7ccc8e666e7e4d0f154d05560fef830082c31 | [] | no_license | damoa-recommend/time-series-ARIMA | 4cfa8213cda0bb4843c583aad94185f27540372a | cb11f7bc0e98c96d0cc2bf532ac46da8ef586240 | refs/heads/master | 2023-03-26T20:38:18.728032 | 2021-03-28T06:12:58 | 2021-03-28T06:12:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,575 | py | from websocket import WebSocketApp
import json, ssl, asyncio
from datetime import datetime
from model import add_data, fit, forecast
try:
import thread
except ImportError:
import _thread as thread
import time
def on_message(ws, message):
msg = json.loads(message.decode('utf-8'))
KRW_RATE = 1129
price = msg["trade_price"] / KRW_RATE
ts = datetime.fromtimestamp(int(msg["trade_timestamp"]) / 1000)
add_data({
"ts": ts,
"price": price,
"index": int(msg["trade_timestamp"])
})
fit()
forecast_price = forecast()
print('[%s] 실제가격: %10.2f, 예측가격: %10.2f, 예측가격 대비 실제가격: %10.2f'%(ts, price, forecast_price, (forecast_price-price) * KRW_RATE))
def on_error(ws, error):
print(error)
def on_close(ws):
print("close")
def on_open(ws):
def run(*args):
# https://docs.upbit.com/docs/upbit-quotation-websocket 문서참고
# ticker: 현재가, trade: 채결내역, orderbook: 호가
originData = [
{ "ticket": "UNIQUE_TICKET" },
# { "type": "orderbook", "codes": ["KRW-MTL"], "isOnlyRealtime": True },
{ "type": "ticker", "codes": ["KRW-BTC"] },
# { "type": "trade", "codes": ["KRW-MTL"] }
]
ws.send(json.dumps(originData))
thread.start_new_thread(run, ())
if __name__ == "__main__":
fit()
ws = WebSocketApp(
"wss://api.upbit.com/websocket/v1",
on_message = on_message,
on_error = on_error,
on_close = on_close,
)
ws.on_open = on_open
ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE}) | [
"pjt3591oo@gmail.com"
] | pjt3591oo@gmail.com |
61bbf79465067d63c29ee60dc6d48f4dca794443 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/object_attr_get-105.py | 667ce2fd6395edeaed0eb054576d151fe61bfa6f | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | class A(object):
a:int = 42
class B(A):
b:bool = True
def __init__(self:"B"):
print("B")
a:A = None
b:B = None
a = b = B()
print(a.a)
print($Exp.a)
print(b.b)
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
9c1c5043ec4afa52876e8466255b46d56f6a2136 | 3716e91c0a18a2cf0b5807cc673d95a7539b008c | /DungeonsKitgard/DeadlyDungeonRescue.py | 284f1afc14a5fc11ad02ee961f1401bbc26d1654 | [] | no_license | kiwiapple87/CodeCombat-1 | 47f0fa6d75d6d3e9fb9c28feeb6fe2648664c1aa | ce0201e5ed099193ca40afd3b7abeee5a3732387 | refs/heads/master | 2021-05-01T16:38:03.575842 | 2016-08-25T11:13:26 | 2016-08-25T11:13:26 | 66,552,813 | 1 | 0 | null | 2016-08-25T11:39:20 | 2016-08-25T11:39:18 | null | UTF-8 | Python | false | false | 1,397 | py | # http://codecombat.com/play/level/deadly-dungeon-rescue
# Сбегите из подземелья после спасения измученного крестьянина.
# Вы можете спрятаться за горгульями.
# Убийство охранников может привести к нежелательным последствиям.
# Если вы сможете собрать все сокровища, вы можете получить дополнительную награду.
self.moveUp(5)
self.moveRight(6)
self.moveDown(4)
self.moveRight(6)
self.moveDown(4)
self.attack('Torture Room Door')
self.attack('Torture Room Door')
self.attack('Torture Master')
self.attack('Torture Master')
self.moveRight(2)
self.moveDown(2)
self.moveLeft(8)
self.attack('South Vault Door')
self.attack('South Vault Door')
self.moveUp(2)
self.moveRight()
self.moveDown()
self.moveLeft(3)
self.moveRight()
self.moveUp(6)
self.moveLeft(2)
self.moveRight(2)
self.moveDown(4)
self.moveUp(4)
self.moveRight(2)
self.moveLeft(2)
self.moveDown(4)
self.moveUp(4)
self.moveDown(7)
self.moveLeft(3)
# self.moveRight(4)
# self.moveLeft(2)
self.moveUp(9)
self.moveRight(6)
self.moveDown(2)
self.attack('Exit Door')
self.attack('Exit Door')
self.moveRight(7)
# self.moveDown(4)
# self.attack('Torture Room Door')
# self.attack('Torture Room Door')
# self.moveLeft(2)
# self.moveRight(2)
| [
"vadim-job-hg@yandex.ru"
] | vadim-job-hg@yandex.ru |
b8c3b6c0035aae5cda585026ddf1459337697870 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_shifted.py | f91e9b4d9a75c8d13f05fdca4c6f193c33d00954 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py |
from xai.brain.wordbase.verbs._shift import _SHIFT
#calss header
class _SHIFTED(_SHIFT, ):
def __init__(self,):
_SHIFT.__init__(self)
self.name = "SHIFTED"
self.specie = 'verbs'
self.basic = "shift"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
e61e59a67298c69be410f34ad18bf220e57a3d6c | 92e8c0b58c3b005c5f74f770d2e91c7fc91cf181 | /tests/scraper/scraper_processor_run_test.py | ff2bb27705e37508ecc9a1eb61114d289785e23e | [] | no_license | xhijack/django-dynamic-scraper | d1e8ab6e68a34fdea810d84aa312f176610289af | 7b09960e66f7029f50266033848eaba81352b212 | refs/heads/master | 2021-01-16T18:09:35.081262 | 2016-07-15T23:45:32 | 2016-07-15T23:45:32 | 62,983,247 | 0 | 0 | null | 2016-07-10T05:28:29 | 2016-07-10T05:28:29 | null | UTF-8 | Python | false | false | 7,715 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging, os.path, unittest
from twisted.internet import reactor
from scrapy.exceptions import CloseSpider
from scraper.models import Event
from scraper.scraper_test import EventSpider, ScraperTest
from dynamic_scraper.models import SchedulerRuntime, Log
class ScraperProcessorRunTest(ScraperTest):
def setUpProcessorTest(self):
self.se_url.processors = 'pre_url'
self.se_url.proc_ctxt = "'pre_url': 'http://localhost:8010/static/site_with_processor/'"
self.se_url.save()
self.event_website.url = os.path.join(self.SERVER_URL, 'site_with_processor/event_main.html')
self.event_website.save()
def setUpProcessorTestWithDetailPageUrlPlaceholder(self):
self.se_url.processors = 'pre_url'
self.se_url.proc_ctxt = "'pre_url': 'http://localhost:8010/static/{title}/'"
self.se_url.save()
self.event_website.url = os.path.join(self.SERVER_URL, 'site_with_processor/event_main_placeholder.html')
self.event_website.save()
def test_processor(self):
self.setUpProcessorTest()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 2)
@unittest.skip("Skipped due to unresolved problem that order of processor execution can not clearly determined.")
def test_multiple_processors_use(self):
self.setUpProcessorTest()
self.se_desc.processors = 'pre_string, post_string '
self.se_desc.proc_ctxt = "'pre_string': 'before_', 'post_string': '_after',"
self.se_desc.save()
self.run_event_spider(1)
self.assertEqual(Event.objects.get(id=1).description, 'before_Event 2 description_after')
def test_replace_processor_wrong_x_path(self):
self.setUpProcessorTest()
self.se_title.x_path = '/div[@class="class_which_is_not_there"]/text()'
self.se_title.processors = 'replace'
self.se_title.proc_ctxt = "'replace': 'This text is a replacement'"
self.se_title.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 0)
def test_replace_processor_correct_x_path(self):
self.setUpProcessorTest()
self.se_title.processors = 'replace'
self.se_title.proc_ctxt = "'replace': 'This text is a replacement'"
self.se_title.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 2)
def test_replace_processor_unicode_replace(self):
self.setUpProcessorTest()
self.se_title.processors = 'replace'
self.se_title.proc_ctxt = "'replace': 'Replacement with beautiful unicode ❤ ☀ ★ ☂ ☻ ♞ ☯ ☭ ☢'"
self.se_title.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 2)
def test_static_processor_wrong_x_path(self):
self.setUpProcessorTest()
self.se_title.x_path = '/div[@class="class_which_is_not_there"]/text()'
self.se_title.processors = 'static'
self.se_title.proc_ctxt = "'static': 'This text should always be there'"
self.se_title.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 2)
def test_static_processor_empty_x_path(self):
self.setUpProcessorTest()
self.se_title.x_path = ''
self.se_title.processors = 'static'
self.se_title.proc_ctxt = "'static': 'This text should always be there'"
self.se_title.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.filter(title='This text should always be there')), 2)
def test_static_processor_correct_x_path(self):
self.setUpProcessorTest()
self.se_title.processors = 'static'
self.se_title.proc_ctxt = "'static': 'This text should always be there'"
self.se_title.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 2)
def test_static_processor_unicode_text(self):
self.setUpProcessorTest()
self.se_title.processors = 'static'
self.se_title.proc_ctxt = "'static': 'This text should always be there ❤ ☀ ★ ☂ ☻ ♞ ☯ ☭ ☢'"
self.se_title.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.filter(title='This text should always be there ❤ ☀ ★ ☂ ☻ ♞ ☯ ☭ ☢')), 2)
def test_reg_exp(self):
self.se_desc.reg_exp = '(\d{6})'
self.se_desc.save()
self.event_website.url = os.path.join(self.SERVER_URL, 'site_with_reg_exp/event_main.html')
self.event_website.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 2)
self.assertEqual(Event.objects.get(title='Event 1').description, '563423')
def test_processor_with_detail_page_url_placeholder(self):
self.setUpProcessorTestWithDetailPageUrlPlaceholder()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 1)
self.assertEqual(
Event.objects.get(title='site_with_processor').url,
'http://localhost:8010/static/site_with_processor/event1.html')
def test_processor_with_placeholder_mp_to_dp(self):
self.setUpProcessorTest()
self.se_desc.processors = 'post_string'
self.se_desc.proc_ctxt = "'post_string': '_START_{title}_END'"
self.se_desc.save()
self.run_event_spider(1)
self.assertEqual(Event.objects.filter(description='Event 1 description_START_Event 1_END').count(), 1)
def test_processor_with_placeholder_mp_to_dp_unicode(self):
self.event_website.url = os.path.join(self.SERVER_URL, 'site_unicode/event_main.html')
self.event_website.save()
self.se_desc.processors = 'post_string'
self.se_desc.proc_ctxt = "'post_string': '_START_{title}_END'"
self.se_desc.save()
self.run_event_spider(1)
self.assertEqual(Event.objects.filter(description='Event 1 description ♖ ☦ ✝ ❖ ➎ ♠ ♣ ♥_START_Event 1 ❤ ☀ ★ ☂ ☻ ♞ ☯ ☭ ☢_END').count(), 1)
def test_processor_with_placeholder_dp_to_mp(self):
self.setUpProcessorTest()
self.se_title.processors = 'post_string'
self.se_title.proc_ctxt = "'post_string': '_START_{description}_END'"
self.se_title.save()
self.run_event_spider(1)
self.assertEqual(Event.objects.filter(title='Event 1_START_Event 1 description_END').count(), 1)
def test_processor_with_placeholder_tmp_to_mp(self):
self.setUpProcessorTest()
self.se_title.processors = 'post_string'
self.se_title.proc_ctxt = "'post_string': '_START_{extra_standard_1}_END'"
self.se_title.save()
self.run_event_spider(1)
self.assertEqual(Event.objects.filter(title='Event 1_START_Event 1_END').count(), 1)
def test_processor_with_placeholder_tmp_with_placeholder_to_mp(self):
self.setUpProcessorTest()
self.se_title.processors = 'post_string'
self.se_title.proc_ctxt = "'post_string': '_START_{extra_standard_1}_END'"
self.se_title.save()
self.se_es_1.processors = 'remove_chars'
self.se_es_1.proc_ctxt = "'remove_chars': '[0-9 ]+'"
self.se_es_1.save()
self.run_event_spider(1)
self.assertEqual(Event.objects.filter(title='Event 1_START_Event_END').count(), 1)
| [
"Holger.Drewes@googlemail.com"
] | Holger.Drewes@googlemail.com |
7e2b34df685708489aa33c5b08b40994f15d6866 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/parity_20200821124709.py | 072436c7d2907623d847dd852d9c36fed3b03bda | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | def parity(A):
# we can have two arrays one to store even and other to store odd
# that would cost memory
even = []
odd = []
for i in range(len(A)):
if A[i]%2 == 0:
parity([3,1,2,4]) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
823499cb9328a7731d079ae7523ae00c2b7341b3 | e71ecfe679dd8c800e8b0960d4ba68e19401a4fc | /PyGithub_examples/search_by_code.py | a86993ecc3ded7531443e407581a721b05c40738 | [] | no_license | igizm0/SimplePyScripts | 65740038d36aab50918ca5465e21c41c87713630 | 62c8039fbb92780c8a7fbb561ab4b86cc2185c3d | refs/heads/master | 2021-04-12T10:48:17.769548 | 2017-06-15T18:53:04 | 2017-06-15T18:53:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,000 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
LOGIN = None
PASSWORD = None
# http://user:password@proxy_host:proxy_port
PROXY = None
if PROXY:
import os
os.environ['http_proxy'] = PROXY
from github import Github
gh = Github(LOGIN, PASSWORD)
# print(list(gh.search_code('requests auth github filename:.py language:python')[:5]))
search_query = 'requests auth github filename:.py language:python'
# print(gh.search_code(search_query).totalCount)
# The Search API has a custom rate limit. For requests using Basic Authentication, OAuth, or client ID and
# secret, you can make up to 30 requests per minute. For unauthenticated requests, the rate limit allows
# you to make up to 10 requests per minute.
#
# Если авторизован, то каждые 2 секунды можно слать запрос, иначе каждые 6
timeout = 2 if LOGIN and PASSWORD else 6
# Немного добавить на всякий
timeout += 0.5
import time
search_result = gh.search_code(search_query)
total_count = search_result.totalCount
page = 0
data = search_result.get_page(page)
print(data[0])
print(dir(data[0]))
print(data[0].url)
print(data[0].content)
from base64 import b64decode as base64_to_text
print(base64_to_text(data[0].content.encode()).decode())
print(data[0].html_url)
# get user from repo url
user = data[0].html_url.split('/')[3]
print(user)
# i = 1
# while total_count > 0:
# data = search_result.get_page(page)
# for result in data:
# print(i, result)
# i += 1
#
# print('page: {}, total: {}, results: {}'.format(page, total_count, len(data)))
# page += 1
# total_count -= len(data)
#
# # Задержка запросов, чтобы гитхаб не блокировал временно доступ
# time.sleep(timeout)
# i = 1
# for match in gh.search_code(search_query):
# print(i, match)
# i += 1
#
# time.sleep(timeout)
#
# # print(dir(match))
# # break
| [
"gil9red@gmail.com"
] | gil9red@gmail.com |
da700d1576d08b4500612bcf1e824f1dee1cd1a6 | ffe2e0394c3a386b61e0c2e1876149df26c64970 | /cal.py | 922ab0f65f531f1c5d5749bfb43c69c5da379a0d | [] | no_license | garethpaul/WillBeOut | 202e0ad7a12800c6008ec106c67ee7d23d256a07 | c8c40f2f71238c5a5ac6f5ce0cfb3a07e166b341 | refs/heads/master | 2016-09-05T14:02:15.648358 | 2013-01-16T17:26:43 | 2013-01-16T17:26:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,412 | py | import tornado.auth
import tornado.web
import base
import json
from cgi import escape
class CalHandler(base.BaseHandler):
@tornado.web.authenticated
def post(self):
_user_id = self.get_current_user()['id']
_user_name = self.get_current_user()['name']
_hour = self.get_argument('hour')
_day = self.get_argument('day')
_date = self.get_argument('d')
_month = self.get_argument('month')
_week = self.get_argument('week')
_string = self.get_argument('string')
# check if vote exists
c = self.db.execute(
"""INSERT INTO willbeout_times (user_id, user_name, hour, day, month, week, string, d) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)""",
int(_user_id), escape(_user_name), str(_hour), str(_day), int(
_month), int(_week), str(_string), int(_date))
self.write('OK')
@tornado.web.authenticated
def get(self):
_json = []
_user_id = self.get_current_user()['id']
_wk = self.get_argument('wk')
for i in self.db.query(
"SELECT * FROM willbeout_times WHERE user_id = %s AND week = %s",
escape(_user_id), _wk):
_json.append({'day': str(i.day), 'month': i.month, 'hour':
i.hour, 'date': i.d, 'string': str(i.string)})
self.write(json.dumps(_json))
| [
"gareth@garethpaul.com"
] | gareth@garethpaul.com |
714c50c40ca01e9b0f9ed55c904b6094b746454f | 8c8c56dfd72f3de4c2637050d113a58193ee848a | /scripts/create_span_concept_dict.py | b97a17717f886df57237336e76b318c773e027cd | [] | no_license | raosudha89/amr_emnlp | 9eb83d68e4c81cd257e2f0d0ed2ac29440563ca4 | 91b3ca6526c6872fed2cdfff59ff83342353ae07 | refs/heads/master | 2021-01-10T04:59:47.845470 | 2016-02-01T00:14:31 | 2016-02-01T00:14:31 | 50,605,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | import sys
import cPickle as pickle
from collections import OrderedDict
argv = sys.argv[1:]
if len(argv) < 1:
print "usage: create_span_concept_dict.py <concept_training_dataset.p>"
sys.exit()
concept_training_dataset = pickle.load(open(argv[0], "rb"))
span_concept_dict = {}
for id, concept_training_data in concept_training_dataset.iteritems():
for [span, pos, concept, name, ner] in concept_training_data:
if span_concept_dict.has_key(span):
if span_concept_dict[span].has_key(concept):
span_concept_dict[span][concept] += 1
else:
span_concept_dict[span][concept] = 1
else:
span_concept_dict[span] = {concept:1}
#Sort the concepts for each span by their frequency
for span, concepts in span_concept_dict.iteritems():
span_concept_dict[span] = OrderedDict(sorted(concepts.items(), key=lambda concepts: concepts[1], reverse=True)).items()
print_to_file = 1
if print_to_file:
for span, concepts in span_concept_dict.iteritems():
print span, concepts
pickle.dump(span_concept_dict, open("span_concept_dict.p", "wb"))
| [
"raosudha@umiacs.umd.edu"
] | raosudha@umiacs.umd.edu |
7069511abb46a5cbe8b75a04015b09471f7bea14 | 9ac90488fa5ccd5100e2593e34031d4692664e28 | /data/input_files_new_philly_dta/DTA_interface.py | 0b53ff74280c6a95b274b48b9cc2ab396d8f0bfd | [
"MIT"
] | permissive | AlanPi1992/MAC-POSTS | 18a2c9f25a06d6512a96de986c4b2d38f9f9fa32 | 4e4ed3bb6faa5ebd0aa5059b2dfff103fe8f1961 | refs/heads/master | 2021-06-27T06:18:58.116114 | 2019-05-22T21:54:45 | 2019-05-22T21:54:45 | 105,393,332 | 0 | 1 | MIT | 2018-11-26T04:45:20 | 2017-09-30T18:57:17 | Jupyter Notebook | UTF-8 | Python | false | false | 4,077 | py | import numpy as np
import os
class Link:
from1 = None
to1 = None
ID = None
linkType = None
name = None
length = np.float(0)
FFS = np.float(0)
cap = np.float(0)
RHOJ = np.float(0)
lane = None
hasCounts = False
hasSpeed = False
volume = np.float()
lambda_plus = np.float()
lambda_minus = np.float()
v = np.float()
u = np.float()
def __init__(self, re):
words = re.split()
self.ID = int(words[0])
self.linkType = words[1]
self.name = words[2]
self.from1 = int(words[3])
self.to1 = int(words[4])
self.length = np.float32(words[5])
self.FFS = np.float64(words[6])
self.cap = np.float64(words[7])
self.RHOJ = np.float64(words[8])
self.lane = int(words[9])
def isConnector(self):
return int(self.RHOJ > 9999)
def read_output(total_inverval, path):
output = dict()
link_id_list = list()
f = file(path + "record/MNM_output_record_interval_volume", 'r')
line = f.readline()
words = line.split()
num_link = len(words)
for str_link_id in words:
link_id = int(str_link_id)
output[link_id] = np.zeros(total_inverval)
link_id_list.append(link_id)
line = f.readline()
counter = 0
while line:
words = line.split()
for idx, str_link_volume in enumerate(words):
output[link_id_list[idx]][counter] = np.float(str_link_volume)
counter = counter + 1
line = f.readline()
if (counter != total_inverval):
print "Potential error"
f.close()
return output
def get_link_dic(path):
linkDic = dict()
link_log = file(path + "Philly.lin", "r").readlines()[1:]
for line in link_log:
e = Link(line)
if e.linkType == "LWRLK":
linkDic[e.ID] = e
return linkDic
def get_matrix(link_dict, output_dict, total_inverval):
output_matrix = np.zeros((len(link_dict), total_inverval + 1))
for idx, link_id in enumerate(link_dict.keys()):
output_matrix[idx][0] = link_id
output_matrix[idx, 1:total_inverval+1] = output_dict[link_id] / (link_dict[link_id].RHOJ * np.float(link_dict[link_id].lane) * link_dict[link_id].length)
return output_matrix
def read_results(total_inverval, path):
link_dict = get_link_dic(path)
output_dict = read_output(total_inverval, path)
results = get_matrix(link_dict, output_dict, total_inverval)
return results
def rewrite_conf(request, conf_name):
f = file(conf_name + "new", "w")
conf_log = file(conf_name).readlines()
for line in conf_log:
change_flag = False
for trigger, value in request.iteritems():
if line.startswith(trigger):
print "changing:", trigger
change_flag = True
f.write(str(trigger) + " = " + str(value).strip("\n\t") + "\n")
if not change_flag:
f.write(line)
def replace_conf(conf_name):
new_conf_name = conf_name + "new"
if os.path.exists(new_conf_name):
os.remove(conf_name)
os.rename(new_conf_name, conf_name)
def modify_conf(request, path):
conf_name = path + "config.conf"
rewrite_conf(request, conf_name)
replace_conf(conf_name)
def run_MNM(path):
os.system("." + path + "dta_response")
##################################################
####### main ##########
##################################################
def get_DNL_results(params):
# total_inverval = params["total_inverval"]
total_inverval = 60
start_interval = 20
request = dict()
path = "/"
request["max_interval"] = total_inverval
request["start_assign_interval"] = start_interval
modify_conf(request, path)
run_MNM(path)
results = read_results(total_inverval, path)
return results
params_local = dict()
# # params_local["total_inverval"] = 167
# # params_local["start_assign_interval"] = 0
a = get_DNL_results(params_local)
# np.savetxt("a.txt", a)
# # linkDic = dict()
# match_file = file("match_file", "w")
# link_log = file("Philly.lin", "r").readlines()[1:]
# for line in link_log:
# e = Link(line)
# if e.linkType == "LWRLK":
# linkDic[e.ID] = e
# for link in linkDic.itervalues():
# print link.ID
# match_file.write(" ".join([str(e) for e in [link.ID, link.name]]) + "\n")
| [
"lemma171@gmail.com"
] | lemma171@gmail.com |
2a60db08d08a074061f2c691d287318b4559dc11 | 7839d009f3ae0a0c1bc360b86756eba80fce284d | /build/rostest/catkin_generated/pkg.installspace.context.pc.py | 7235d889406793f9882e29d9c47b2428384ccc19 | [] | no_license | abhat91/ros_osx | b5022daea0b6fdaae3489a97fdb1793b669e64f5 | 39cd8a79788d437927a24fab05a0e8ac64b3fb33 | refs/heads/master | 2021-01-10T14:43:41.047439 | 2016-03-13T23:18:59 | 2016-03-13T23:18:59 | 53,812,264 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/opt/ros/jade/include;/usr/local/include".split(';') if "/opt/ros/jade/include;/usr/local/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-l:/usr/local/lib/libboost_system-mt.dylib;-l:/usr/local/lib/libboost_thread-mt.dylib;-l:/usr/local/lib/libboost_chrono-mt.dylib;-l:/usr/local/lib/libboost_date_time-mt.dylib;-l:/usr/local/lib/libboost_atomic-mt.dylib".split(';') if "-l:/usr/local/lib/libboost_system-mt.dylib;-l:/usr/local/lib/libboost_thread-mt.dylib;-l:/usr/local/lib/libboost_chrono-mt.dylib;-l:/usr/local/lib/libboost_date_time-mt.dylib;-l:/usr/local/lib/libboost_atomic-mt.dylib" != "" else []
PROJECT_NAME = "rostest"
PROJECT_SPACE_DIR = "/opt/ros/jade"
PROJECT_VERSION = "1.11.16"
| [
"abhat@wpi.edu"
] | abhat@wpi.edu |
d24d9b532639b9ddb03b9b8f313d705c1a0aa4d2 | 4e382ae46cf997ea2dbdfcfa463a57d3e0e9ad97 | /sols/find_the_difference.py | 8ac2f6c88a73e8e3e625f600b9cab2ad1558bb5c | [] | no_license | hayeonk/leetcode | 5136824838eb17ed2e4b7004301ba5bb1037082f | 6485f8f9b5aa198e96fbb800b058d9283a28e4e2 | refs/heads/master | 2020-04-28T03:37:16.800519 | 2019-06-01T14:34:45 | 2019-06-01T14:34:45 | 174,943,756 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | class Solution(object):
def findTheDifference(self, s, t):
ret = 0
for c in s:
ret ^= ord(c)
for c in t:
ret ^= ord(c)
return chr(ret) | [
"31617695+hayeonk@users.noreply.github.com"
] | 31617695+hayeonk@users.noreply.github.com |
992b1d21729bf38534b92b9c117474340f6d1e7c | da0a0045d4e7b0de12a9459e5546332ccc7cce0e | /Design_Patterns/Structural/Adapter/example1/third_party_billing_system.py | 91d45cfb3c8a513e62f49f5e13c1c9b9a26457f8 | [] | no_license | git4rajesh/python-learnings | d64e7c76698b7f2255a77a8233a90774db78f030 | ee6b7f7844079e94801c19a1dd80921e1741e58e | refs/heads/master | 2022-11-01T08:47:45.097034 | 2019-04-28T02:14:34 | 2019-04-28T02:14:34 | 172,183,208 | 0 | 1 | null | 2022-10-12T08:35:43 | 2019-02-23T07:04:59 | Python | UTF-8 | Python | false | false | 432 | py | class Third_Party_Billing_System:
@staticmethod
def process_salary(lst_emp_obj):
for emp in lst_emp_obj:
if emp.designation == 'Mgr':
emp.salary = 1000
elif emp.designation == 'QA':
emp.salary = 2000
elif emp.designation == 'Engr':
emp.salary = 3000
else:
emp.salary = 5000
return lst_emp_obj
| [
"rvenkataraman"
] | rvenkataraman |
d7366b50353afa370e0f073c0930672676fc801a | 81efabfbef513ba9d45f28c2fce5e9ab5eb19eec | /Example_Buzzer.py | 0ad2adf9df635f5f17d95419322dbe1895b509b1 | [] | no_license | ncdcommunity/Raspberry_Pi_PCA9536_Input_Output_Module_Python_library | e219f89ab1e5de748f4db804250d9962a41c2cbf | e910ad2a8bbb4c492179bd593d3c7f31ef92d368 | refs/heads/master | 2021-03-24T13:21:48.969471 | 2018-02-08T07:01:09 | 2018-02-08T07:01:09 | 120,723,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | # Distributed with a free-will license.
# Use it any way you want, profit or free, provided it fits in the licenses of its associated works.
# PCA9536_BZ
# This code is designed to work with the PCA9536_I2CBZ I2C Mini Module available from ControlEverything.com.
# https://shop.controleverything.com/products/digital-3-channel-input-output-with-buzzer?variant=25687479179#tabs-0-product_tabset-2
import time
from PCA9536 import PCA9536
pca9536 = PCA9536()
while True :
pca9536.select_io()
pca9536.select_pin()
pca9536.input_output_config()
time.sleep(0.5)
pca9536.read_data()
print " ******************************** "
time.sleep(0.5)
| [
"ryker1990@gmail.com"
] | ryker1990@gmail.com |
0871e550193f28e9c243723ca06cb964eb1e0256 | a98c455a318ab2d47b10ef1aa195b7dfd1b5449c | /codes/fashionmnist_tobf.py | 31f7ae4dc70bc7816376311fda79de25e325bbae | [] | no_license | WanliXue/BF_implemation | ddd463ed906e1f4ee0de492da48bc6de3574bfd0 | 211aa963f3be755858daf03fca5690d3c9532053 | refs/heads/main | 2022-12-26T07:04:05.280651 | 2020-10-13T02:08:55 | 2020-10-13T02:08:55 | 303,561,823 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,946 | py | import numpy as np
# import os
# print(os.getcwd())
import math
import time
from BF_TS import BF_TS
def conver_data_to_bf(data):
com_to_len = data.shape[1]
# print(data.shape)
#
compressed_data = data[:, :]
#
# ------------Put into compressed and BF data---------
length = 10000
b = 5
num_hash = 10
dis = float(5)
g = (2 * b + 1) * com_to_len
false_positive = math.pow(1 - math.exp(-(num_hash * g) / length), num_hash)
# print ('lenth:',length,'b:',b,'num_hash:',num_hash,'dis:',dis ,'false_positive: ', false_positive)
false_positive = math.pow(1 - math.exp(-(float)(num_hash * g) / length), num_hash)
print ('lenth:', length, 'num_hash:', num_hash, 'false_positive: ', false_positive)
## # generate the npy with the bf and data
bf_ts = BF_TS(length, num_hash, b, dis / (2 * b), dis)
##
# ---------------------
print ('BF filter')
# print 'start'
start_time = time.time()
bf_train = bf_ts.convert_set_to_bf(compressed_data) # the result it a list and hard to convert to np array
print ('BF filter done')
cifar_batch = bf_ts.convert_bitarray_to_train_data(bf_train, len(bf_train), length)
print('bf done using time: {} mins'.format((time.time() - start_time) / 60))
# cifar_bfed = np.stack([cifar_batch, cifar_batch2, cifar_batch3], axis=2)
return cifar_batch
# ---------------------
train_path = '/Users/wanli/Dropbox/ppml_code_with_dataset/CIFAR_mnist/Fashion_train_random60_full.npy'
test_path = '/Users/wanli/Dropbox/ppml_code_with_dataset/CIFAR_mnist/Fashion_test_random60_full.npy'
data = np.load(train_path) # (9000,300,3)
bfed = conver_data_to_bf(data)
save_path = '../data/fashion_bfed_train_random60.npy'
np.save(save_path, bfed)
data_test = np.load(test_path) # (9000,300,3)
bfed_test = conver_data_to_bf(data_test)
save_path = '../data/fashion_bfed_test_random60.npy'
np.save(save_path, bfed_test) | [
"xuewanli.lee@gmail.com"
] | xuewanli.lee@gmail.com |
b019c18b74461fd6a01e93019d9a39a9681330c1 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_Anscombe/trend_ConstantTrend/cycle_7/ar_12/test_artificial_1024_Anscombe_ConstantTrend_7_12_100.py | 3e21083297c74f56fbd8826c3dc7a03e1537c962 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 275 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 7, transform = "Anscombe", sigma = 0.0, exog_count = 100, ar_order = 12); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
654de6277b4e8c96c01f3c4013478da037b626d2 | 36ac195ecceb868e78372bc8e976066cc9ff0fae | /torch_glow/tests/nodes/batchnorm3d_test.py | 6fed650f00e7de8b34a67493a89a151fbdc61b73 | [
"Apache-2.0"
] | permissive | jeff60907/glow | d283d65bc67e0cc9836854fa7e4e270b77023fff | 34214caa999e4428edbd08783243d29a4454133f | refs/heads/master | 2021-09-23T07:30:29.459957 | 2021-09-14T01:47:06 | 2021-09-14T01:48:00 | 216,199,454 | 0 | 0 | Apache-2.0 | 2019-10-19T12:00:31 | 2019-10-19T12:00:31 | null | UTF-8 | Python | false | false | 2,270 | py | from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn as nn
from tests import utils
class TestBatchNorm3D(utils.TorchGlowTestCase):
def test_batchnorm_basic(self):
"""
Basic test of the PyTorch 3D batchnorm Node on Glow.
"""
class SimpleBatchNorm(nn.Module):
def __init__(self, num_channels, running_mean, running_var):
super(SimpleBatchNorm, self).__init__()
self.batchnorm = nn.BatchNorm3d(num_channels)
self.batchnorm.running_mean = running_mean
self.batchnorm.running_var = running_var
def forward(self, x):
return self.batchnorm(x)
num_channels = 4
running_mean = torch.rand(num_channels)
running_var = torch.rand(num_channels)
model = SimpleBatchNorm(num_channels, running_mean, running_var)
model.eval()
inputs = torch.randn(1, num_channels, 4, 5, 5)
utils.compare_tracing_methods(model, inputs, fusible_ops={"aten::batch_norm"})
def test_batchnorm_with_weights(self):
"""
Test of the PyTorch 3D batchnorm Node with weights and biases on Glow.
"""
class SimpleBatchNorm(nn.Module):
def __init__(self, num_channels, weight, bias, running_mean, running_var):
super(SimpleBatchNorm, self).__init__()
self.batchnorm = nn.BatchNorm3d(num_channels)
self.batchnorm.weight = torch.nn.Parameter(weight)
self.batchnorm.bias = torch.nn.Parameter(bias)
self.batchnorm.running_mean = running_mean
self.batchnorm.running_var = running_var
def forward(self, x):
return self.batchnorm(x)
num_channels = 4
weight = torch.rand(num_channels)
bias = torch.rand(num_channels)
running_mean = torch.rand(num_channels)
running_var = torch.ones(num_channels)
inputs = torch.randn(1, num_channels, 4, 5, 5)
model = SimpleBatchNorm(num_channels, weight, bias, running_mean, running_var)
model.eval()
utils.compare_tracing_methods(model, inputs, fusible_ops={"aten::batch_norm"})
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
59d1905f4c830c16a12da935f6d105de60ee67ef | 15dfa10d195fb6187765aa1d6a42f6152e8cd4e1 | /sof_parser/sof_parser/pipelines.py | 6a2cdf0a24fac33c26e82f57d0c644bf88554a77 | [] | no_license | didoogan/sof_parser | d790e12d5290dd110ddc1511a74a02876dba607b | c3b9064425e74ebb67e34319a462b5401732990c | refs/heads/master | 2020-12-29T02:06:59.956070 | 2016-09-21T14:45:55 | 2016-09-21T14:45:55 | 68,790,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class SofParserPipeline(object):
def process_item(self, item, spider):
return item
| [
"tzd0409@gmail.com"
] | tzd0409@gmail.com |
a9ada2f135bdc88b9aaf30c65ca4588856fbeb5f | 84b05857cbe74d190bdbee18d442d0c720b1b84d | /Coderbyte_algorithms/Easy/MovingMedian/MovingMedian.py | be21ca848c07cd1ea8955337db4131d680066937 | [] | no_license | JakubKazimierski/PythonPortfolio | 1c8c7e7b0f1358fc42a2295b807d0afafd8e88a3 | 3aa62ad36c3b06b2a3b05f1f8e2a9e21d68b371f | refs/heads/master | 2023-06-01T01:16:22.897097 | 2023-05-15T01:05:22 | 2023-05-15T01:05:22 | 311,473,524 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,443 | py | '''
Moving Median from Coderbyte
December 2020 Jakub Kazimierski
'''
import statistics
def MovingMedian(arr):
'''
Have the function MovingMedian(arr)
read the array of numbers stored in arr
which will contain a sliding window size, N,
as the first element in the array and the rest
will be a list of numbers. Your program should
return the Moving Median for each element based
on the element and its N-1 predecessors, where N
is the sliding window size. The final output should
be a string with the moving median corresponding
to each entry in the original array separated by commas.
Note that for the first few elements
(until the window size is reached), the median is computed
on a smaller number of entries.
For example: if arr is [3, 1, 3, 5, 10, 6, 4, 3, 1]
then your program should output "1,2,3,5,6,6,4,3"
'''
try:
median_list = []
for i in range(1, len(arr)):
if i < arr[0]:
median_list.append(int(statistics.median(arr[1:i+1])))
else:
# n=arr[0], (n-1)th element before i, starts from index i+1-n
start = i+1-arr[0]
median_list.append(int(statistics.median(arr[start:i+1])))
return ",".join(str(median) for median in median_list)
except(TypeError):
return -1 | [
"j.m.kazimierski@gmail.com"
] | j.m.kazimierski@gmail.com |
302781dcebe0e1f90f184cdf719806f48bc0785d | f199898334653e32d6a13922063e98f6cc477db5 | /tests/test_air.py | 8f734e633fc83fe54da3b9d9da5639cd14145903 | [
"MIT"
] | permissive | wptree/akshare | c31bb822d806974be951c3b2258312abdec09a6e | 7697506d277f14d1719e60c3d19e73ff7d69e6af | refs/heads/master | 2021-02-07T22:30:00.957989 | 2020-02-29T10:29:27 | 2020-02-29T10:29:27 | 244,083,276 | 1 | 0 | MIT | 2020-03-01T03:59:01 | 2020-03-01T03:59:00 | null | UTF-8 | Python | false | false | 1,314 | py | # -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: Albert King
date: 2019/12/12 18:16
contact: jindaxiang@163.com
desc: To test intention, just write test code here!
"""
# from akshare.air.aqi_study import air_hourly
from akshare.index.index_weibo import weibo_index
from akshare.event.franchise import franchise_china
# from akshare.fortune.fortune_500 import fortune_rank
def test_franchise_china():
franchise_china_df = franchise_china()
assert franchise_china_df.shape[0] > 0
# def test_air_hourly():
# """
# test air_hourly interface
# :return: air_hourly_df
# :rtype: pandas.DataFrame
# """
# air_hourly_df = air_hourly("成都", "2019-12-10")
# assert air_hourly_df.shape[0] > 0
def test_weibo_index():
"""
test weibo_index interface
:return: weibo_index_df
:rtype: pandas.DataFrame
"""
weibo_index_df = weibo_index(word="口罩", time_type="3month")
assert weibo_index_df.shape[0] > 0
# def test_fortune():
# """
# test fortune_rank interface
# :return: fortune_rank_df
# :rtype: pandas.DataFrame
# """
# fortune_rank_df = fortune_rank(year=2011) # 2010 不一样
# assert fortune_rank_df.shape[0] > 0
if __name__ == "__main__":
# test_air_hourly()
test_weibo_index()
# test_fortune()
| [
"jindaxiang@163.com"
] | jindaxiang@163.com |
6f90146523ad83c22bd917cb55bcb3196765cb28 | d3f680630426ff3a63d564e78bb1480863a7f0f6 | /services/web__aps_dz.py | 4c103effdaa88ee80facaa3ba0bd5fd20edee578 | [] | no_license | JesseWeinstein/NewsGrabber | 09f9d567449e99ba211e4ba61b42c53276de235b | b431dc5f313d4718c6328aaaa97da1bc8e136023 | refs/heads/master | 2020-12-31T02:32:48.359448 | 2016-01-23T14:20:26 | 2016-01-23T14:20:26 | 48,966,133 | 1 | 0 | null | 2016-01-04T00:33:41 | 2016-01-04T00:33:41 | null | UTF-8 | Python | false | false | 456 | py | refresh = 5
version = 20160122.01
urls = ['http://www.aps.dz/algerie?format=feed',
'http://www.aps.dz/economie?format=feed',
'http://www.aps.dz/sport?format=feed',
'http://www.aps.dz/monde?format=feed',
'http://www.aps.dz/societe?format=feed',
'http://www.aps.dz/regions?format=feed',
'http://www.aps.dz/culture?format=feed',
'http://www.aps.dz/sante-sciences-tech?format=feed']
regex = [r'^https?:\/\/[^\/]*aps\.dz']
videoregex = []
liveregex = [] | [
"Arkiver@hotmail.com"
] | Arkiver@hotmail.com |
86ab4c0936eb638ea5a4cdc51d104fe7ae8991a0 | bf92a619b9b850678bb691915e45c39cd740fa63 | /apps/freeway/main.py | de0fdd6c91d2e76ff7289f891c78993f55fdaea0 | [] | no_license | jrecuero/jc2cli | a045f1efa431f53351dfac968852fd82e8c963b6 | c97615828880021b3965756aed939e39bac949b6 | refs/heads/master | 2021-05-10T10:16:34.698398 | 2018-11-06T17:43:53 | 2018-11-06T17:43:53 | 118,377,662 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,198 | py | import ehandler
import race
import freeway
import section
import device
from cursor import Cursor
if __name__ == '__main__':
Cursor.print(Cursor.clear_entire_screen())
Cursor.print(Cursor.move_upper_left(0))
_ehdlr = ehandler.EHandler()
_race = race.Race()
_freeway = freeway.Freeway()
_freeway.add_section(section.Section(100, 1, section.Spec.Straight))
_freeway.add_section(section.Section(50, 1, section.Spec.Turn))
_freeway.add_section(section.Section(100, 1, section.Spec.Straight))
_freeway.add_section(section.Section(50, 1, section.Spec.Turn))
_race.freeway = _freeway
_devices = [device.Device('dev-80', 'dev-class', 'dev-sub', 80),
device.Device('dev-50', 'dev-class', 'dev-sub', 50),
device.Device('dev-90', 'dev-class', 'dev-sub', 90),
device.Device('dev-60', 'dev-class', 'dev-sub', 60),
device.Device('dev-70', 'dev-class', 'dev-sub', 70), ]
# _devices = [device.Device('dev-80', 'dev-class', 'dev-sub', 80), ]
for dev in _devices:
_race.add_device(dev)
_race.laps = 5
_ehdlr.race = _race
_ehdlr.setup()
_ehdlr.delay = 100
_ehdlr.start()
| [
"jose.recuero@gmail.com"
] | jose.recuero@gmail.com |
b15f080badda883ba9ec4368b1e34032afe7f2a8 | 06fec21ab6be610d7e491eaa55f776587ed6fadd | /hubapp/migrations/0007_add_price.py | 988a6501030a2dbba177e0b2c561af43c2b411c3 | [] | no_license | rcoffie/ehub | 6e6e493d252b4d8a5360616ea64dd85fdc3b15f8 | 28213469c612088acb3a62ca9bf1f3c2a0dd5756 | refs/heads/master | 2022-09-29T09:05:49.705294 | 2020-06-07T09:26:16 | 2020-06-07T09:26:16 | 263,087,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | # Generated by Django 3.0.6 on 2020-06-06 13:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hubapp', '0006_auto_20200606_1302'),
]
operations = [
migrations.AddField(
model_name='add',
name='price',
field=models.DecimalField(decimal_places=2, default=True, max_digits=6),
),
]
| [
"rcoffie22@yahoo.com"
] | rcoffie22@yahoo.com |
8ceae620a6ca4aa42c19252edc4912bc71ab0105 | 9afbcb367de9bf055d531d285bc299a9ca3040fe | /django_session/django_session/settings.py | 8d2cce87371602bcd47e87324c303b6c4fded580 | [] | no_license | mysqlplus163/aboutPython | a41a5bc2efd43b53d4acf96e7477e80c022cf657 | fa7c3e6f123158011d8726b28bfcd0dee02fa853 | refs/heads/master | 2020-03-21T05:06:19.949902 | 2018-03-14T16:04:54 | 2018-03-14T16:04:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,177 | py | """
Django settings for django_session project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ga+l=(++6huhfamtls@f_qt-^mufus0ios8074=38ttx=)js7c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app01',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_session.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_session.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"liwenzhou7@gmail.com"
] | liwenzhou7@gmail.com |
378d8d6243e4eb15707ac59dc6e7bf93f80452e7 | 67572ef7c6ac88a335dd884ac19dd8f5519145fa | /4_Recursion/recursive_sierspinski.py | 29161fc863bc4e448ba111480df36cefa2bc8fd9 | [
"MIT"
] | permissive | ZoroOP/Problem-Solving-With-Algorithms-And-Data-Structures | ccb2eb306229097dd8c930523e20ed7115a1e8ef | be29b46b9f4e579644ca2d44675c0ce7dcb29b3b | refs/heads/master | 2021-10-17T00:59:53.654643 | 2019-02-13T05:40:27 | 2019-02-13T05:40:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | """
Draw a sierpinski triangle recursively with the turtle module.
"""
import turtle
def drawTriangle(points,color,myTurtle):
myTurtle.fillcolor(color)
myTurtle.up()
myTurtle.goto(points[0][0],points[0][1])
myTurtle.down()
myTurtle.begin_fill()
myTurtle.goto(points[1][0],points[1][1])
myTurtle.goto(points[2][0],points[2][1])
myTurtle.goto(points[0][0],points[0][1])
myTurtle.end_fill()
def getMid(p1,p2):
return ( (p1[0]+p2[0]) / 2, (p1[1] + p2[1]) / 2)
def sierpinski(points,degree,myTurtle):
colormap = ['blue','red','green','white','yellow',
'violet','orange']
drawTriangle(points,colormap[degree],myTurtle)
if degree > 0:
sierpinski([points[0],
getMid(points[0], points[1]),
getMid(points[0], points[2])],
degree-1, myTurtle)
sierpinski([points[1],
getMid(points[0], points[1]),
getMid(points[1], points[2])],
degree-1, myTurtle)
sierpinski([points[2],
getMid(points[2], points[1]),
getMid(points[0], points[2])],
degree-1, myTurtle)
def main():
myTurtle = turtle.Turtle()
myWin = turtle.Screen()
myPoints = [[-100,-50],[0,100],[100,-50]]
sierpinski(myPoints,3,myTurtle)
myWin.exitonclick()
main()
| [
"anthony.r.chao@gmail.com"
] | anthony.r.chao@gmail.com |
7988c3051318b94f342323a66c7309ca285ec7a3 | e8215b98dcf46417e720cc6ef4a0329474ae9b82 | /PHYS210/Project 2-Animation - Backup/ising.py | 17cd108c5c818627b3c2906932314eaf85891cf0 | [] | no_license | rgkaufmann/PythonCodes | 2d47bab84ec851fc962598f613b1e666a14c8efd | a5d5cd993beabdb79897a05b35420ad82f438f51 | refs/heads/master | 2021-06-13T23:19:09.109162 | 2021-03-03T06:00:04 | 2021-03-03T06:00:04 | 162,771,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,839 | py | # PHYS 210 - Project 2 - The Ising Model Ryan Kaufmann
# Bonus: Add a second animation showing the individual spins of each electron
# In each of the desired temperatures (0.1K, 2.5K, 100K)
import numpy as np # Importing all important function, numpy
import matplotlib.pyplot as plt # for arrays, plt for plotting, anim for
import matplotlib.animation as anim # animations, warnings to clear warnings
import warnings # and sys to increase recursion limit
import cProfile, pstats, io
import sys
warnings.simplefilter(action='ignore', category=UserWarning) # Ignores warning
plt.rcParams['animation.ffmpeg_path']='C:/Users/ryank/Downloads/ffmpeg-20171102-d5995c5-win64-static/bin/ffmpeg.exe'
# When using the MovieWriter in the animation saving
plt.rcParams['image.cmap'] = 'Paired'
pr2 = cProfile.Profile()
pr2.enable()
# Declaring all variables used in the program and increases recursion limit
sys.setrecursionlimit(4000)
SigmaSpins = np.random.choice((-1, 1), (50, 50))
Zeros = np.zeros((50, 50))
Temperatures = np.array([0.01, 0.1, 1, 2, 2.5, 3, 4, 5, 10, 100])
SpinDataTemp001 = [SigmaSpins]
SpinDataTemp025 = [SigmaSpins]
SpinDataTemp100 = [SigmaSpins]
# Convergence calculates the electron spin configuration that the original
# configuration reaches after 600000 iterations. It takes an original state
# the current iteration of recusion, and the temperature of the system
def Convergence(sigma, count, temperature):
for indx in range(500):
# For loop calculates a random coordinate on the configuration
# Then calculates the energy difference between the original state
# And the new state (Derivation at bottom)
coors = np.random.randint(-1, 49, 2)
Energydif = (sigma[(coors[0]+1), coors[1]] +
sigma[coors[0]-1, coors[1]] +
sigma[coors[0], (coors[1]+1)] +
sigma[coors[0], coors[1]-1])
Energydif = -2*sigma[coors[0], coors[1]]*Energydif
# Finally find whether or not the electron spin should be switched
# And switches it or not. If the probability needs to be calculated,
# It is compared to a random number and determined to be switched
if Energydif >= 0:
sigma[coors[0], coors[1]] = -1*sigma[coors[0], coors[1]]
else:
probability = np.exp(Energydif/temperature)
if np.random.random() < probability:
sigma[coors[0], coors[1]] = -1*sigma[coors[0], coors[1]]
# After 500 iterations, it checks if it has been 1000 iterations since the
# Last recording of a electron spin. If it has been 1000 iterations, it
# Records it to be used with the animation segment.
if temperature == 0.1:
global SpinDataTemp001
SpinDataTemp001.append(sigma.tolist())
elif temperature == 2.5:
global SpinDataTemp025
SpinDataTemp025.append(sigma.tolist())
elif temperature == 100:
global SpinDataTemp100
SpinDataTemp100.append(sigma.tolist())
# Then it decides if it should be through another iteration or returned
if count >= 1199:
return sigma
else:
return Convergence(sigma, count+1, temperature)
# ConvergenceSet goes through a set of spin configurations and gets the
# Magnetic moment for each using the same temperature. It adds them
# To one array and then returns the complete array
def ConvergenceSet(setsigmaspins, temperature):
if setsigmaspins.size == SigmaSpins.size:
return np.sum(Convergence(setsigmaspins[:, :, 0], 0, temperature))
else:
return np.append(np.sum(Convergence(setsigmaspins[:, :, 0],
0, temperature)),
ConvergenceSet(setsigmaspins[:, :, 1:], temperature))
# TemperatureSet goes through a set of temperatures and gets five magnetic
# Moments for each using each temperature. It then adds them to one
# Array and then returns the complete array.
def TemperatureSet(temperatureset):
FiveTimesSigmaSpins = np.repeat(SigmaSpins[:, :, np.newaxis], 5, axis=2)
if temperatureset.size == 1:
return ConvergenceSet(FiveTimesSigmaSpins,
temperatureset[0])[:, np.newaxis]
else:
return np.append(ConvergenceSet(FiveTimesSigmaSpins,
temperatureset[0])[:, np.newaxis],
TemperatureSet(temperatureset[1:]),
axis=1)
# UpdateHeat replaces the data in the heat map with a 'newer' data set
def updateHeat(num, spins):
Heat.set_data(spins[num])
# UpdateQuiver replaces the data in the vector field with a 'newer' data set
def updateQuiver(num, spins):
Color = np.arctan2(Zeros, spins[num])
Quiver.set_UVC(Zeros, spins[num], Color)
# Animate takes in various parameters to construct a figure and form the
# Animation. Then it saves the animation to a file.
def Animate(Temp, File, Type, SpinData):
fig = plt.figure()
fig.suptitle('Electron Spins at {}K'.format(Temp))
if Type == 'Heat':
global Heat
Heat = plt.imshow(SigmaSpins, cmap='inferno')
animation = anim.FuncAnimation(fig, updateHeat, frames=1200,
repeat=False, fargs=(SpinData, ))
animation.save(File, fps=20)
elif Type == 'Quiver':
global Quiver
Quiver = plt.quiver(Zeros, SigmaSpins, np.arctan2(Zeros, SigmaSpins),
pivot='middle')
animation = anim.FuncAnimation(fig, updateQuiver, frames=1200,
repeat=False, fargs=(SpinData, ))
animation.save(File, fps=20)
# Gathers data on the convergence configurations given initial spin
MagMoments = TemperatureSet(Temperatures).transpose()
MaxMagMoments = np.amax(np.abs(MagMoments), axis=1)
# Constructs the plot for the magnetic moments versus the temperature
title = 'Magnetic Moment Against Temperature'
title = title + ' As Calculated by the Ising Model'
plt.semilogx(Temperatures, MaxMagMoments)
plt.title(title)
plt.xlabel('Temp (K)')
plt.ylabel('Magnetic Moment')
plt.savefig('Tcurie.pdf')
# Animates each of the required temperatures using both Heat and Quiver funcs
Animate(0.1, 'temp_0.1.mp4', 'Heat', SpinDataTemp001)
Animate(0.1, 'temp_0.1Quiver.mp4', 'Quiver', SpinDataTemp001)
Animate(2.5, 'temp_2.5.mp4', 'Heat', SpinDataTemp025)
Animate(2.5, 'temp_2.5Quiver.mp4', 'Quiver', SpinDataTemp025)
Animate(100, 'temp_100.mp4', 'Heat', SpinDataTemp100)
Animate(100, 'temp_100Quiver.mp4', 'Quiver', SpinDataTemp100)
pr2.disable()
file = open('FullIsingStats.txt', 'w')
s = io.StringIO()
sortby = 'tottime'
ps = pstats.Stats(pr2, stream=s).sort_stats(sortby)
ps.print_stats()
file.write(s.getvalue())
file.close() | [
"ryankaufmannprof@gmail.com"
] | ryankaufmannprof@gmail.com |
dc3d0bbf003d9e703385315bc3b4b2710809e86f | 4c1da0c18482031ea650b32b1ee19cd8e16338fb | /exit_window_v0.py | 34b21c5da8185119ea480f48991bdc8f863aff28 | [] | no_license | adamjaro/irview | 03dbb4c4a3e2a1082e246552b104602108bfd44a | a47c6a49af5411bc167e35fdee961e27243b49f3 | refs/heads/master | 2021-06-19T22:52:27.242434 | 2021-06-17T22:11:32 | 2021-06-17T22:11:32 | 211,398,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,769 | py |
import math
import ROOT as rt
from ROOT import TVector2, TGraph, TText
#_____________________________________________________________________________
class exit_window_v0(object):
#photon exit window, part of electron beam pipe, rear side
#_____________________________________________________________________________
def __init__(self, geom):
#input points, in electron beamline frame
if geom == "flat":
self.pos = [(-21.7, -5), (-21.7, 5)] # flat geometry
if geom == "tilt":
self.pos = [(-18.8, -5), (-21.7, 5)] # tilted geometry
#print the geometry
self.print_position()
#points in z and x, both in m
self.zx_pos = []
for i in self.pos:
# z at 0 and x at 1, converted to cm
self.zx_pos.append( TVector2(i[0], 0.01*i[1]) )
#angle of initial rotation
self.rotate(-0.008)
#_____________________________________________________________________________
def rotate(self, theta):
#rotate by angle theta about the origin
for i in xrange(len(self.zx_pos)):
self.zx_pos[i] = self.zx_pos[i].Rotate(theta)
#_____________________________________________________________________________
def draw_2d(self):
#draw the exit window
self.geom = TGraph(len(self.zx_pos))
self.geom.SetLineColor(rt.kGreen+1)
self.geom.SetLineWidth(4)
ipoint = 0
for i in self.zx_pos:
self.geom.SetPoint(ipoint, i.X(), 100*i.Y())
ipoint += 1
self.geom.Draw("lsame")
#label
zpos = (self.zx_pos[0].X() + self.zx_pos[1].X())/2.
self.label = TText(zpos, (self.zx_pos[0].Y())*100-6, "Exit window")
self.label.SetTextSize(0.03)
#self.label.SetTextAngle(90)
#self.label.SetTextAlign(32)
self.label.SetTextAlign(23)
#self.label.Draw("same")
#_____________________________________________________________________________
def print_position(self):
#show position and angle of the exit window
z1 = self.pos[0][0]*1e3 # to mm
z2 = self.pos[1][0]*1e3
x1 = self.pos[0][1]*10. # to mm
x2 = self.pos[1][1]*10.
print("z_mid:", (z1 + z2)/2., "mm")
print("x_mid:", (x1 + x2)/2., "mm")
#length in x-z plane
dl = math.sqrt((z1-z2)**2 + (x1-x2)**2)
print("len:", dl, "mm")
#angle in x-z plane
dz = abs(z2-z1)
dx = abs(x2-x1)
#theta = math.atan( dx/dz )
theta = math.asin( dx/dl )
print("dz:", dz, "mm")
print("dx:", dx, "mm")
print("theta:", theta, "rad")
print("pi/2 - theta:", math.pi/2. - theta, "rad")
| [
"jaroslav.adam@cern.ch"
] | jaroslav.adam@cern.ch |
1ee2a6377c7360321e648ff0ee8fd16d7f80d533 | ba0cbdae81c171bd4be7b12c0594de72bd6d625a | /MyToontown/py2/toontown/safezone/DLPlayground.pyc.py | 5e65850004a6b36058f664b5dfb7c98d89449186 | [] | no_license | sweep41/Toontown-2016 | 65985f198fa32a832e762fa9c59e59606d6a40a3 | 7732fb2c27001264e6dd652c057b3dc41f9c8a7d | refs/heads/master | 2021-01-23T16:04:45.264205 | 2017-06-04T02:47:34 | 2017-06-04T02:47:34 | 93,279,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | # 2013.08.22 22:24:38 Pacific Daylight Time
# Embedded file name: toontown.safezone.DLPlayground
from pandac.PandaModules import *
import Playground
import random
class DLPlayground(Playground.Playground):
__module__ = __name__
def __init__(self, loader, parentFSM, doneEvent):
Playground.Playground.__init__(self, loader, parentFSM, doneEvent)
def showPaths(self):
from toontown.classicchars import CCharPaths
from toontown.toonbase import TTLocalizer
self.showPathPoints(CCharPaths.getPaths(TTLocalizer.Donald))
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\toontown\safezone\DLPlayground.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:24:38 Pacific Daylight Time
| [
"sweep14@gmail.com"
] | sweep14@gmail.com |
8433d0ea23babf77d16a0856540607257f5e75b6 | abdab481e9c4f64fc3d2f84539c58b51fd66f198 | /numpy/core/arrayprint.pyi | b8c7b1604647f74d163548109b0a7e327c99aef6 | [] | no_license | depixusgenome/linting | aa068d9b5dd393c668429a1fed2e0dfc5d675125 | 4e3398fab98f873f77f8e8ab81eaeb7df215e7f7 | refs/heads/master | 2020-09-24T08:39:02.645467 | 2019-10-11T20:33:20 | 2019-10-11T20:33:20 | 225,716,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,249 | pyi | # Stubs for numpy.core.arrayprint (Python 3.5)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from typing import Any, Optional
from . import numerictypes as _nt
def set_printoptions(precision: Optional[Any] = ..., threshold: Optional[Any] = ..., edgeitems: Optional[Any] = ..., linewidth: Optional[Any] = ..., suppress: Optional[Any] = ..., nanstr: Optional[Any] = ..., infstr: Optional[Any] = ..., formatter: Optional[Any] = ...): ...
def get_printoptions(): ...
def array2string(a, max_line_width: Optional[Any] = ..., precision: Optional[Any] = ..., suppress_small: Optional[Any] = ..., separator: str = ..., prefix: str = ..., style: Any = ..., formatter: Optional[Any] = ...): ...
class FloatFormat:
precision = ... # type: Any
suppress_small = ... # type: Any
sign = ... # type: Any
exp_format = ... # type: bool
large_exponent = ... # type: bool
max_str_len = ... # type: int
def __init__(self, data, precision, suppress_small, sign: bool = ...) -> None: ...
special_fmt = ... # type: Any
format = ... # type: Any
def fillFormat(self, data): ...
def __call__(self, x, strip_zeros: bool = ...): ...
class IntegerFormat:
format = ... # type: Any
def __init__(self, data) -> None: ...
def __call__(self, x): ...
class LongFloatFormat:
precision = ... # type: Any
sign = ... # type: Any
def __init__(self, precision, sign: bool = ...) -> None: ...
def __call__(self, x): ...
class LongComplexFormat:
real_format = ... # type: Any
imag_format = ... # type: Any
def __init__(self, precision) -> None: ...
def __call__(self, x): ...
class ComplexFormat:
real_format = ... # type: Any
imag_format = ... # type: Any
def __init__(self, x, precision, suppress_small) -> None: ...
def __call__(self, x): ...
class DatetimeFormat:
timezone = ... # type: Any
unit = ... # type: Any
casting = ... # type: Any
def __init__(self, x, unit: Optional[Any] = ..., timezone: Optional[Any] = ..., casting: str = ...) -> None: ...
def __call__(self, x): ...
class TimedeltaFormat:
format = ... # type: Any
def __init__(self, data) -> None: ...
def __call__(self, x): ...
| [
"pol.davezac@depixus.com"
] | pol.davezac@depixus.com |
96d191f3d7de629e749b76de9c56cc154bb389f5 | 5b5aee20bf23dfe1a6314fb524c4cc31e140ee64 | /thuoclao/check/migrations/0001_initial.py | 5b551428b8fbec05aa632d32ed1e93ba3ac8071c | [] | no_license | locvx1234/ThuoclaoPing | 7f75aa658a7b97d4b8c8982b2477e93463861cf2 | 8c821c0adf50a93a17c29255905bcf32101fe171 | refs/heads/master | 2023-08-17T05:29:38.392255 | 2019-10-22T02:44:41 | 2019-10-22T02:44:41 | 128,077,290 | 0 | 1 | null | 2023-09-09T02:36:36 | 2018-04-04T14:50:33 | JavaScript | UTF-8 | Python | false | false | 4,370 | py | # Generated by Django 2.0.4 on 2018-07-20 02:13
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Alert',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('email_alert', models.EmailField(blank=True, max_length=100)),
('telegram_id', models.CharField(blank=True, help_text='Telegram ID', max_length=10)),
('webhook', models.URLField(blank=True, help_text='URL to send message into Slack.')),
('delay_check', models.IntegerField(default=10, help_text='Interval time to check status host. - unit: second')),
],
options={
'ordering': ('user',),
},
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('group_name', models.CharField(max_length=45)),
('description', models.TextField(blank=True, null=True)),
('ok', models.IntegerField(blank=True, null=True)),
('warning', models.IntegerField(blank=True, null=True)),
('critical', models.IntegerField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Group_attribute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('attribute_name', models.CharField(max_length=45)),
('value', models.CharField(max_length=100)),
('type_value', models.IntegerField(help_text='0: integer, 1: bool, 2: date, 3: string, 4: ip-domain, 5: URL', null=True)),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='check.Group')),
],
),
migrations.CreateModel(
name='Host',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hostname', models.CharField(max_length=45)),
('description', models.TextField(blank=True, null=True)),
('status', models.IntegerField(default=-1, help_text='0: ok, 1: warning, 2: critical')),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='check.Group')),
],
options={
'ordering': ('hostname',),
},
),
migrations.CreateModel(
name='Host_attribute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('attribute_name', models.CharField(max_length=45)),
('value', models.CharField(max_length=100)),
('type_value', models.IntegerField(help_text='0: integer, 1: bool, 2: date, 3: string, 4: ip-domain, 5: URL', null=True)),
('host', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='check.Host')),
],
),
migrations.CreateModel(
name='Service',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('service_name', models.CharField(max_length=45)),
],
options={
'ordering': ('service_name',),
},
),
migrations.AddField(
model_name='group',
name='service',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='check.Service'),
),
migrations.AddField(
model_name='group',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"locvx1234@gmail.com"
] | locvx1234@gmail.com |
0c4e1416b8bf147efb68a6aa8f6cd1d073d05606 | 64cd09628f599fe18bf38528309349f7ac0df71e | /Introduction/01_Introduction_python/10 Python functions/zip.py | 9752853642d820a77fb69fc2a92b8d95daa4c8cf | [] | no_license | JunyoungJang/Python | 958c057b2fd37c03876d3cf566ee27ee637bb020 | 76d4cd441deff8061e10608e0848360bc4f34490 | refs/heads/master | 2021-01-19T21:54:42.208469 | 2020-02-14T09:54:17 | 2020-02-14T09:54:17 | 83,768,220 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | x = [1, 2, 3]
y = [4, 5, 6]
print zip(x, y) # [(1, 4), (2, 5), (3, 6)]
x = [1, 2, 3]
y = [4, 5]
print zip(x, y) # [(1, 4), (2, 5)]
x = [1, 2]
y = [4, 5, 6]
print zip(x, y) # [(1, 4), (2, 5)]
| [
"lakino@yonsei.ac.kr"
] | lakino@yonsei.ac.kr |
fd276b40e39dc6d6a2c51ea5ff00896c701319db | ded3109fc9a05b60c36da2c41017f799fb887f07 | /moose_nerp-1/moose_nerp/graph/plot_channel.py | c5f38765ede441d9f83cf23a57ba64812dc2569b | [] | no_license | ModelDBRepository/245563 | f5f0d48213a9deb9b07ea694136e008cbcfdeff0 | 97cd40113230c4ddadc77725bb5148fcc2f5b9a7 | refs/heads/master | 2020-04-24T12:26:02.905630 | 2019-02-21T22:23:39 | 2019-02-21T22:23:39 | 171,954,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,160 | py | import numpy as np
import matplotlib.pyplot as plt
import moose
def plot_gate_params(chan,plotpow, VMIN=-0.1, VMAX=0.05, CAMIN=0, CAMAX=1):
#print "PLOT POWER", plotpow, chan.path,chan.Xpower
"""Plot the gate parameters like m and h of the channel."""
if chan.className == 'HHChannel':
cols=1
#n=range(0,2,1)
if chan.Zpower!=0 and (chan.Xpower!=0 or chan.Ypower!=0) and chan.useConcentration == True:
fig,axes=plt.subplots(3,cols,sharex=False)
axes[1].set_xlabel('voltage')
axes[2].set_xlabel('Calcium')
else:
fig,axes=plt.subplots(2,cols,sharex=True)
axes[1].set_xlabel('voltage')
plt.suptitle(chan.name)
if chan.Xpower > 0:
gate=moose.element(chan.path + '/gateX')
ma = gate.tableA
mb = gate.tableB
varray = np.linspace(gate.min, gate.max, len(ma))
axes[0].plot(varray, 1e3 / mb, label='mtau ' + chan.name)
if plotpow:
label = '(minf)**{}'.format(chan.Xpower)
inf = (ma / mb) ** chan.Xpower
else:
label = 'minf'
inf = ma / mb
axes[1].plot(varray, inf, label=label)
axes[1].axis([gate.min, gate.max, 0, 1])
if chan.Ypower > 0:
gate=moose.element(chan.path + '/gateY')
ha = gate.tableA
hb = gate.tableB
varray = np.linspace(gate.min, gate.max, len(ha))
axes[0].plot(varray, 1e3 / hb, label='htau ' + chan.name)
axes[1].plot(varray, ha / hb, label='hinf ' + chan.name)
axes[1].axis([gate.min, gate.max, 0, 1])
#
if chan.Zpower!=0:
gate=moose.element(chan.path + '/gateZ')
za = gate.tableA
zb = gate.tableB
xarray=np.linspace(gate.min,gate.max,len(za))
if (chan.Xpower==0 and chan.Ypower==0) or chan.useConcentration == False:
axes[0].plot(xarray,1e3/zb,label='ztau ' + chan.name)
axes[1].plot(xarray, za / zb, label='zinf' + chan.name)
if chan.useConcentration == True:
axes[1].set_xlabel('Calcium')
else:
axes[2].set_xscale("log")
axes[2].set_ylabel('ss, tau (s)')
axes[2].plot(xarray,1/zb,label='ztau ' + chan.name)
axes[2].plot(xarray, za / zb, label='zinf ' + chan.name)
axes[2].legend(loc='best', fontsize=8)
axes[0].set_ylabel('tau, ms')
axes[1].set_ylabel('steady state')
axes[0].legend(loc='best', fontsize=8)
axes[1].legend(loc='best', fontsize=8)
else: #Must be two-D tab channel
plt.figure()
ma = moose.element(chan.path + '/gateX').tableA
mb = moose.element(chan.path + '/gateX').tableB
ma = np.array(ma)
mb = np.array(mb)
plt.subplot(211)
plt.title(chan.name+'/gateX top: tau (ms), bottom: ss')
plt.imshow(1e3/mb,extent=[CAMIN,CAMAX,VMIN,VMAX],aspect='auto',origin='lower')
plt.colorbar()
plt.subplot(212)
if plotpow:
inf = (ma/mb)**chan.Xpower
else:
inf = ma/mb
plt.imshow(inf,extent=[CAMIN,CAMAX,VMIN,VMAX],aspect='auto',origin='lower')
plt.xlabel('Ca [mM]')
plt.ylabel('Vm [V]')
plt.colorbar()
if chan.Ypower > 0:
ha = moose.element(chan.path + '/gateY').tableA
hb = moose.element(chan.path + '/gateY').tableB
ha = np.array(ha)
hb = np.array(hb)
plt.figure()
plt.subplot(211)
plt.suptitle(chan.name+'/gateY tau')
plt.imshow(1e3/hb,extent=[CAMIN,CAMAX,VMIN,VMAX],aspect='auto')
plt.colorbar()
plt.subplot(212)
if plotpow:
inf = (ha/hb)**chan.Ypower
else:
inf = ha/hb
plt.imshow(inf,extent=[CAMIN,CAMAX,VMIN,VMAX],aspect='auto')
plt.xlabel('Ca [nM]')
plt.ylabel('Vm [V]')
plt.colorbar()
return
| [
"tom.morse@yale.edu"
] | tom.morse@yale.edu |
356f6d97d06b3df049dac75184bda584cee875f5 | 10f0d2f60b67e1bd45af82f1af0044b7ce1c8843 | /Questões do URI/Exercicio 7 (Reduce).py | 1ae64595fff54a5b2521678cd19aa78917b46155 | [] | no_license | vikvik98/Algoritmos_2017.1 | 184bce5fb6e155076253359e49f73fafb28dbc8d | 219fa2cfdf7320df34d282136c025c6c19be09d6 | refs/heads/master | 2021-08-24T04:15:29.550879 | 2017-12-08T02:08:34 | 2017-12-08T02:08:34 | 113,519,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | def main():
temperaturas = [0] * 121
menor = 0
maior = 0
media = 0
cont = 0
total_temp = 0
dias = 0
#Geração de vetor
for i in range(len(temperaturas)):
temperatura_dia = float(input())
temperaturas[i] = temperatura_dia
#Search
for i in range(len(temperaturas)):
menor = temperaturas[0]
if temperaturas[i] < menor:
menor = temperaturas[i]
print("A menor temperatura neste periodo foi de %.2f graus" % menor)
for i in range(len(temperaturas)):
maior = temperaturas[0]
if temperaturas[i] < maior:
maior = temperaturas[i]
print("A maior temperatura neste periodo foi de %.2f graus" % menor)
#Reduce
for i in range(len(temperaturas)):
total_temp += temperaturas[i]
cont += 1
media = total_temp / cont
print("A media das temperaturas é de %.2f" % media)
for i in range(len(temperaturas)):
if temperaturas[i] < media:
dias += 1
print("Em %d dias a temperatura foi abaixo da media." % dias)
if __name__ == '__main__':
main() | [
"vinicius.c.mascarenhas@hotmail.com"
] | vinicius.c.mascarenhas@hotmail.com |
166470769a122a0a59d76a1b0de5948e91f65b00 | 06b2a4f8dc27b5177bfd782386b59b47eafe6556 | /Django-level-3/project_exercise/users/admin.py | d703d0074246d644276dd261368797c5c0376557 | [] | no_license | mishrakeshav/Django-Tutorials | d81eb736f3d21f207147e1dd7c5132a54523feca | 3757c40e975fa782996f4968752d0b41da1cc96c | refs/heads/master | 2023-07-07T08:45:46.264661 | 2021-03-17T15:09:08 | 2021-03-17T15:09:08 | 275,316,311 | 1 | 0 | null | 2021-08-09T21:02:45 | 2020-06-27T06:49:23 | JavaScript | UTF-8 | Python | false | false | 129 | py | from django.contrib import admin
# Register your models here.
from users.models import User
admin.site.register(User)
| [
"keshav.sm@somaiya.edu"
] | keshav.sm@somaiya.edu |
eed1bf04dd38dc87166f710aeaa9a078af06b58e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03565/s696347515.py | d570956493fddbee98c2576a04e3d91b8bd5bc84 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,180 | py | import copy
S_dash = input()
T = input()
len_S = len(S_dash)
len_T = len(T)
candidate0 = list(S_dash)
ans = "z"*51
if len_S >= len_T:
for s in range(len_S):
cha_S = S_dash[s]
if cha_S != "?" and cha_S in list(T):
for t in range(len_T):
cha_T = T[t]
if cha_S == cha_T and 0<= s-t < len_S and 0< s-t+len_T <= len_S:
S_cut = S_dash[s-t:s-t+len_T]
candidate1 = copy.deepcopy(candidate0)
for x in range(len_T):
if S_cut[x] == "?":
candidate1[s-t+x] = T[x]
elif S_cut[x] != T[x]:
break
else:
if "".join(candidate1[s-t:s-t+len_T]) == T:
if ans > "".join(candidate1).replace('?', 'a'):
ans = "".join(candidate1).replace('?', 'a')
for u in range(len_S-len_T+1):
cut_S = S_dash[u:u+len_T]
if cut_S.count("?") == len_T:
candidate1 = copy.deepcopy(candidate0)
for t in range(len_T):
candidate1[u+t] = T[t]
if ans > "".join(candidate1).replace('?', 'a'):
ans = "".join(candidate1).replace('?', 'a')
if ans == "z"*51:
ans = "UNRESTORABLE"
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
afb650659264e114dd31253efb373c7806e49a31 | 68728961294d360d26e8149e7e0a4816adf20842 | /utils/file_utils.py | 6754798f0e5a5d92d67229d23ed3d1bf4d5cd4e1 | [] | no_license | Dawn-Flying/text_summarization | d334fe884aa3a6341dd7bc381b03c1ab3e2c057e | ab68555c6f455c4f14fead5fc1c49420cdef8dc4 | refs/heads/master | 2023-07-17T07:49:21.995004 | 2021-08-26T15:46:19 | 2021-08-26T15:46:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | # coding=utf-8
import time
import os
from utils.config import results_dir
def get_result_filename(batch_size, epochs, max_length_inp, embedding_dim, commit=''):
"""获取时间
:return:
"""
now_time = time.strftime('%Y_%m_%d_%H_%M_%S')
filename = now_time + f'_batch_size_{batch_size}_epochs_{epochs}_max_length_inp_{max_length_inp}_embedding_dim_{embedding_dim}{commit}.csv'
# result_save_path = os.path.join(results_dir, filename)
return filename
| [
"184419810@qq.com"
] | 184419810@qq.com |
7fbababfe191ed000fffba22a9dbb5a9a39a966b | 4f923bec0048a74177b31ed5261f1e1df0c98c73 | /pytorch_example.py | 9b5bc3aa4755c3d49d8cbf1bd2c7ee35eece8986 | [] | no_license | EngineerKhan/Equi-RC | 33c6c16873ba87620e861d1af7bd3ee3e380976c | e2228c8946b1f0f10639f593dc5b0c074744cacb | refs/heads/main | 2023-06-23T19:27:07.991533 | 2021-07-16T10:19:19 | 2021-07-16T10:19:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,934 | py | import torch
import torch.nn as nn
from pytorch_rclayers import RegToRegConv, RegToIrrepConv, IrrepToIrrepConv, IrrepActivationLayer, \
IrrepConcatLayer, IrrepBatchNorm, RegBatchNorm, RegConcatLayer, ToKmerLayer
class CustomRCPS(nn.Module):
def __init__(self,
filters=(16, 16, 16),
kernel_sizes=(15, 14, 14),
pool_size=40,
pool_strides=20,
out_size=1,
placeholder_bn=False,
kmers=1):
"""
This is an example of use of the equivariant layers :
The network takes as inputs windows of 1000 base pairs one hot encoded and outputs a binary prediction
The architecture follows the paper of Avanti Shrikumar : Reverse Complement Parameter Sharing
We reimplement everything with equivariant layers and add the possibility to start the encoding with
a K-Mer encoding layer.
"""
super(CustomRCPS, self).__init__()
self.kmers = int(kmers)
self.to_kmer = ToKmerLayer(k=self.kmers)
reg_in = self.to_kmer.features // 2
filters = [reg_in] + list(filters)
# Now add the intermediate layer : sequence of conv, BN, activation
self.reg_layers = nn.ModuleList()
self.bn_layers = nn.ModuleList()
self.activation_layers = nn.ModuleList()
for i in range(len(filters) - 1):
prev_reg = filters[i]
next_reg = filters[i + 1]
self.reg_layers.append(RegToRegConv(
reg_in=prev_reg,
reg_out=next_reg,
kernel_size=kernel_sizes[i],
))
self.bn_layers.append(RegBatchNorm(reg_dim=next_reg, placeholder=placeholder_bn))
# Don't add activation if it's the last layer
placeholder = (i == len(filters) - 1)
self.activation_layers.append(nn.ReLU())
self.concat = RegConcatLayer(reg=filters[-1])
self.pool = nn.MaxPool1d(kernel_size=pool_size, stride=pool_strides)
self.flattener = nn.Flatten()
self.dense = nn.Linear(in_features=752, out_features=out_size)
def forward(self, inputs):
x = self.to_kmer(inputs)
for reg_layer, bn_layer, activation_layer in zip(self.reg_layers, self.bn_layers, self.activation_layers):
x = reg_layer(x)
x = bn_layer(x)
x = activation_layer(x)
# Average two strands predictions, pool and go through Dense
x = self.concat(x)
x = self.pool(x)
x = self.flattener(x)
x = self.dense(x)
outputs = torch.sigmoid(x)
return outputs
class EquiNetBinary(nn.Module):
def __init__(self,
filters=((16, 16), (16, 16), (16, 16)),
kernel_sizes=(15, 14, 14),
pool_size=40,
pool_length=20,
out_size=1,
placeholder_bn=False,
kmers=1):
"""
This network takes as inputs windows of 1000 base pairs one hot encoded and outputs a binary prediction
First maps the regular representation to irrep setting
Then goes from one setting to another.
"""
super(EquiNetBinary, self).__init__()
self.kmers = int(kmers)
self.to_kmer = ToKmerLayer(k=self.kmers)
# First mapping goes from the input to an irrep feature space
reg_in = self.to_kmer.features // 2
first_kernel_size = kernel_sizes[0]
first_a, first_b = filters[0]
self.last_a, self.last_b = filters[-1]
self.reg_irrep = RegToIrrepConv(reg_in=reg_in,
a_out=first_a,
b_out=first_b,
kernel_size=first_kernel_size)
self.first_bn = IrrepBatchNorm(a=first_a, b=first_b, placeholder=placeholder_bn)
self.first_act = IrrepActivationLayer(a=first_a, b=first_b)
# Now add the intermediate layers : sequence of conv, BN, activation
self.irrep_layers = nn.ModuleList()
self.bn_layers = nn.ModuleList()
self.activation_layers = nn.ModuleList()
for i in range(1, len(filters)):
prev_a, prev_b = filters[i - 1]
next_a, next_b = filters[i]
self.irrep_layers.append(IrrepToIrrepConv(
a_in=prev_a,
b_in=prev_b,
a_out=next_a,
b_out=next_b,
kernel_size=kernel_sizes[i],
))
self.bn_layers.append(IrrepBatchNorm(a=next_a, b=next_b, placeholder=placeholder_bn))
self.activation_layers.append(IrrepActivationLayer(a=next_a,
b=next_b))
self.concat = IrrepConcatLayer(a=self.last_a, b=self.last_b)
self.pool = nn.MaxPool1d(kernel_size=pool_size, stride=pool_length)
self.flattener = nn.Flatten()
self.dense = nn.Linear(in_features=1472, out_features=out_size)
self.final_activation = nn.Sigmoid()
def forward(self, inputs):
x = self.to_kmer(inputs)
x = self.reg_irrep(x)
x = self.first_bn(x)
x = self.first_act(x)
for irrep_layer, bn_layer, activation_layer in zip(self.irrep_layers, self.bn_layers, self.activation_layers):
x = irrep_layer(x)
x = bn_layer(x)
x = activation_layer(x)
# Average two strands predictions, pool and go through Dense
x = x.float()
x = self.concat(x)
x = self.pool(x)
x = self.flattener(x)
x = self.dense(x)
outputs = self.final_activation(x)
return outputs
if __name__ == '__main__':
inputs = torch.ones(size=(1, 4, 1000)).double()
model = EquiNetBinary(kmers=2, filters=((24, 8), (24, 8), (24, 8)))
outputs = model(inputs)
| [
"vincent.mallet96@gmail.com"
] | vincent.mallet96@gmail.com |
dde2cf28a3ea139ec0d626a79fe3807ac95e2aa5 | fa346a2d5886420e22707a7be03599e634b230a9 | /temboo/Library/GitHub/GistsAPI/Comments/DeleteComment.py | 605046638edf1fa9b4c894ae8181868343c2aca5 | [] | no_license | elihuvillaraus/entity-resolution | cebf937499ed270c3436b1dd25ab4aef687adc11 | 71dd49118a6e11b236861289dcf36436d31f06bc | refs/heads/master | 2021-12-02T17:29:11.864065 | 2014-01-08T04:29:30 | 2014-01-08T04:29:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,116 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# DeleteComment
# Deletes a specified comment.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class DeleteComment(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the DeleteComment Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/GitHub/GistsAPI/Comments/DeleteComment')
def new_input_set(self):
return DeleteCommentInputSet()
def _make_result_set(self, result, path):
return DeleteCommentResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DeleteCommentChoreographyExecution(session, exec_id, path)
class DeleteCommentInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the DeleteComment
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved during the OAuth process.)
"""
InputSet._set_input(self, 'AccessToken', value)
def set_ID(self, value):
"""
Set the value of the ID input for this Choreo. ((required, string) The id of the comment to delete.)
"""
InputSet._set_input(self, 'ID', value)
class DeleteCommentResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the DeleteComment Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Limit(self):
"""
Retrieve the value for the "Limit" output from this Choreo execution. ((integer) The available rate limit for your account. This is returned in the GitHub response header.)
"""
return self._output.get('Limit', None)
def get_Remaining(self):
"""
Retrieve the value for the "Remaining" output from this Choreo execution. ((integer) The remaining number of API requests available to you. This is returned in the GitHub response header.)
"""
return self._output.get('Remaining', None)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from GitHub.)
"""
return self._output.get('Response', None)
class DeleteCommentChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DeleteCommentResultSet(response, path)
| [
"cedric.warny@gmail.com"
] | cedric.warny@gmail.com |
8909c7579436ee650bba7657c438776d878afd26 | e2fb6865a02573709d26e8d8b4c52f2cd687da10 | /utils/__init__.py | 91bd60a419e4d2c09e9b69adff0b69d0b86ee136 | [] | no_license | tbarbugli/saleor | 0af6e21c540ce0af64b06a44821b82373c96d028 | caf9b245c35611c34094f59443da51a4e9657bfd | refs/heads/master | 2020-12-25T03:20:45.574256 | 2013-06-02T20:43:06 | 2013-06-02T20:43:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | from compressor.templatetags.compress import CompressorNode
from django.template.base import Template
def seizaki_compress(context, data, name):
"""
Data is the string from the template (the list of js files in this case)
Name is either 'js' or 'css' (the sekizai namespace)
We basically just manually pass the string through the {% compress 'js' %} template tag
"""
print data
return CompressorNode(nodelist=Template(data).nodelist, kind=name, mode='file').render(context=context) | [
"tbarbugli@gmail.com"
] | tbarbugli@gmail.com |
5d51dcf1897c071819e438b377e20325171f356a | 40b1db3dbd327b0979749812a7388958afd0892c | /Django_practice/DjangoDay3/register_app/views.py | 7395203915c298117737fd8fd9b40ae26156c8a3 | [] | no_license | lucool/project | 91da0255a739b8464c415347c30d5aea69588dee | f2136f7435e817e057403d968b8eb70ddad889be | refs/heads/master | 2023-03-09T01:27:03.633168 | 2021-02-27T10:37:33 | 2021-02-27T10:37:33 | 340,904,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | from django.shortcuts import render, redirect
from django.urls import reverse
def register_view(request):
if request.method == "GET":
return render(request,'register_app/register.html')
elif request.method == "POST":
regname = request.POST.get("regname")
regpwd = request.POST.get("regpwd")
#return redirect(reverse("reg:suc",args=(regname,))) # 重定向+反向解析
return redirect(reverse("reg:suc", kwargs={"username":regname})) # 重定向+反向解析
def success_view(request,username):
return render(request,'register_app/success.html',locals()) | [
"lu2015594025@163.com"
] | lu2015594025@163.com |
00be56f3a2d4c9c95afe72cadc2bad2c4cbd94a1 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /23htQEtZobC8cfwcm_2.py | 360e158c04b47b25a149be2dda61466f3f503056 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py |
def canConcatenate(lst, target):
nlst = []
for sub in lst:
for i in sub:
nlst.append(i)
return sorted(nlst)==sorted(target)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
1e6cbbc5dc6cb0a08716478087f3334a5456edb6 | dff19df8f651fcac7afa53cccd83f90aeef85772 | /account/migrations/0001_initial.py | fd72fc22d8d8b791d84243bd40bb47ee901e9057 | [] | no_license | felipefoc/Django-ToDo | fb6631625fe8d8189841fc43519a727b3fd69ee5 | c15509c22fa7dc9c01ec7b760535becfa0d21b75 | refs/heads/master | 2023-01-23T09:54:56.980485 | 2020-11-22T21:00:01 | 2020-11-22T21:00:01 | 311,521,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,203 | py | # Generated by Django 3.1.3 on 2020-11-16 04:53
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('tasktext', models.TextField(max_length=200)),
('create_at', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
('is_active', models.BooleanField(default=True)),
('ended_date', models.DateTimeField(blank=True)),
('obs', models.TextField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='task', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['id'],
},
),
]
| [
"felipemfmayer@gmail.com"
] | felipemfmayer@gmail.com |
3ce0277d9ac32d4776f25bf49f8feb9b9d398fcb | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/54/usersdata/102/23160/submittedfiles/av1_p2_civil.py | 25adbefb3c5ac9e4e683f29149470bf2394ec2ea | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | # -*- coding: utf-8 -*-
from __future__ import division
n = input('Digite o valor de n:')
i = 1
cont = 0
while i<n:
if i%2==1:
cont = cont + 1
i = i + 1
print (cont) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
dfc8858b3e9adf2feaecf24069bb1764bce3df59 | 8fcc27160f8700be46296568260fa0017a0b3004 | /client/spacecomponents/server/components/bountyEscrow/persister.py | 6306404cfd75f08055edba12151d71291620eb72 | [] | no_license | connoryang/dec-eve-serenity | 5d867f4eedfa896a4ef60f92556356cafd632c96 | b670aec7c8b4514fc47cd52e186d7ccf3aabb69e | refs/heads/master | 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null | UTF-8 | Python | false | false | 1,523 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\spacecomponents\server\components\bountyEscrow\persister.py
class Persister(object):
def __init__(self, solarSystemID, itemID, dbspacecomponent):
self.isDirty = False
self.itemID = itemID
self.solarSystemID = solarSystemID
self.dbspacecomponent = dbspacecomponent
def _GetBountiesParameters(self, bountyContributors):
charIDs = []
bounties = []
for charID, isk in bountyContributors.iteritems():
charIDs.append(str(charID))
bounties.append(str(isk))
charIDsAsString = ','.join(charIDs)
bountiesAsString = ','.join(bounties)
return (charIDsAsString, bountiesAsString)
def PersistEverything(self, bountyContributors, bountyBonus):
if not self.isDirty:
return
bountyArgs = self._GetBountiesParameters(bountyContributors)
self.dbspacecomponent.BountyEscrow_PersistState(self.solarSystemID, self.itemID, bountyBonus, *bountyArgs)
self.isDirty = False
def GetStateForSystem(self):
bonusRows, bounties = self.dbspacecomponent.BountyEscrow_GetState(self.solarSystemID, self.itemID)
if not bonusRows:
bonus = 0.0
else:
bonus = bonusRows[0].bountyEscrowBonus
return (bonus, {r.characterID:r.iskValue for r in bounties})
def MarkDirty(self):
self.isDirty = True
| [
"masaho.shiro@gmail.com"
] | masaho.shiro@gmail.com |
8ca9949f60472c84308104b755239a122b758f5e | 18b977dccd70e9e5a1b553b28ab0413fb3f54f4b | /SoftUni/Python Developmen/Python-Basics/4_Conditional_Statements/fishing_boat.py | 7f0dbb468fa2c040c471a83c952bd44aa16c25e3 | [] | no_license | stevalang/Coding-Lessons | 7203e3a18b20e33e8d596e3dfb58d26c50b74530 | 2d0060c2268ad966efdcae4e6e994ac15e57243a | refs/heads/master | 2023-06-05T08:28:33.290530 | 2021-06-16T19:37:29 | 2021-06-16T19:37:29 | 284,852,565 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,078 | py | budget = int(input())
season = input()
fishermen_count = int(input())
price = 0
ship_spring = 3000
ship_summer_autumn = 4200
ship_winter = 2600
if fishermen_count <= 6:
if season == 'Spring':
price = ship_spring * 0.9
elif season == 'Autumn' or season == 'Summer':
price = ship_summer_autumn * 0.9
else:
price = ship_winter * 0.9
elif fishermen_count <= 11:
if season == 'Spring':
price = ship_spring * 0.85
elif season == 'Autumn' or season == 'Summer':
price = ship_summer_autumn * 0.85
else:
price = ship_winter * 0.85
else:
if season == 'Spring':
price = ship_spring * 0.75
elif season == 'Autumn' or season == 'Summer':
price = ship_summer_autumn * 0.75
else:
price = ship_winter * 0.75
if fishermen_count % 2 == 0 and season != 'Autumn':
price *= 0.95
else:
price = price
difference = abs(budget - price)
if budget >= price:
print(f'Yes! You have {difference:.2f} leva left.')
else:
print(f'Not enough money! You need {difference:.2f} leva.')
| [
"rbeecommerce@gmail.com"
] | rbeecommerce@gmail.com |
64a61209a7c3321452f4445dd94ad549ba4eb752 | e8f7993403776ff414b370f9125891eecfe109ac | /dcn/simplesites/__init__.py | 55022ddb772e9c8d51819bd2f4613801b58f5b5f | [] | no_license | smcmahon/dcn.simplesites | db6d7524ed50bf6ad6601707ee4a4fd83edd1cf3 | aa19585e1bb75cf61cccaa08f711346600b52f23 | refs/heads/master | 2016-09-05T11:21:44.737796 | 2013-06-17T23:13:38 | 2013-06-17T23:13:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,921 | py | from zope.i18nmessageid import MessageFactory
from zope.schema.vocabulary import SimpleVocabulary, SimpleTerm
# Set up the i18n message factory for our package
MessageFactory = MessageFactory('dcn.simplesites')
# token, value, title
# tokens are what's saved;
# values are the html option values
# titles are presented to user;
# if value is missing, it is taken from token
# if title is missing, it is taken from value
skins_vocab = SimpleVocabulary([
SimpleTerm('Sunburst Theme', 'flex', title=u'Flexible Layout'),
SimpleTerm('Plone Classic Theme', 'fixed', title=u'Fixed Layout'),
])
license_vocab = SimpleVocabulary([
SimpleTerm('None', title=u'None'),
SimpleTerm('CC BY', title=u'Creative Commons Attribution'),
SimpleTerm('CC BY-ND', title=u'Creative Commons Attribution, No-Derivatives'),
SimpleTerm('CC BY-SA', title=u'Creative Commons Attribution, Share-Alike'),
SimpleTerm('CC BY-NC', title=u'Creative Commons Attribution, Non-Commercial'),
SimpleTerm('CC BY-NC-ND', title=u'Creative Commons Attribution, Non-Commercial, No-Derivatives'),
SimpleTerm('CC BY-NC-SA', title=u'Creative Commons Attribution, Non-Commercial, Share-Alike'),
])
license_display = {
'None': u'',
'CC BY': """<a rel="license" href="http://creativecommons.org/licenses/by/3.0/us/deed.en_US"><img alt="Creative Commons License" style="border-width:0" src="http://i.creativecommons.org/l/by/3.0/us/80x15.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by/3.0/us/deed.en_US">Creative Commons Attribution 3.0 United States License</a>.""",
'CC BY-ND': """<a rel="license" href="http://creativecommons.org/licenses/by-nd/3.0/us/deed.en_US"><img alt="Creative Commons License" style="border-width:0" src="http://i.creativecommons.org/l/by-nd/3.0/us/80x15.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nd/3.0/us/deed.en_US">Creative Commons Attribution-NoDerivs 3.0 United States License</a>.""",
'CC BY-SA': """<a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/us/deed.en_US"><img alt="Creative Commons License" style="border-width:0" src="http://i.creativecommons.org/l/by-sa/3.0/us/80x15.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/us/deed.en_US">Creative Commons Attribution-ShareAlike 3.0 United States License</a>.""",
'CC BY-NC': """<a rel="license" href="http://creativecommons.org/licenses/by-nc/3.0/us/deed.en_US"><img alt="Creative Commons License" style="border-width:0" src="http://i.creativecommons.org/l/by-nc/3.0/us/80x15.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc/3.0/us/deed.en_US">Creative Commons Attribution-NonCommercial 3.0 United States License</a>.""",
'CC BY-NC-ND': """<a rel="license" href="http://creativecommons.org/licenses/by-nc-nd/3.0/us/deed.en_US"><img alt="Creative Commons License" style="border-width:0" src="http://i.creativecommons.org/l/by-nc-nd/3.0/us/80x15.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc-nd/3.0/us/deed.en_US">Creative Commons Attribution-NonCommercial-NoDerivs 3.0 United States License</a>.""",
'CC BY-NC-SA': """<a rel="license" href="http://creativecommons.org/licenses/by-nc-nd/3.0/us/deed.en_US"><img alt="Creative Commons License" style="border-width:0" src="http://i.creativecommons.org/l/by-nc-nd/3.0/us/80x15.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc-nd/3.0/us/deed.en_US">Creative Commons Attribution-NonCommercial-NoDerivs 3.0 United States License</a>.""",
}
site_credit = """This site provided with the assistance of the <a href="http://www2.dcn.org/dcn">Davis Community Network</a>.""" | [
"steve@dcn.org"
] | steve@dcn.org |
d18d477979825923900b12a742b7dc51c68e53ed | d21071464bef4f3fd51e554f280418d06975a77e | /leetcode/146 LRU cache.py | 366cc00adde6ee948f057550ffc92ccb472c8ca7 | [] | no_license | DeshErBojhaa/sports_programming | ec106dcc24e96231d447cdcac494d76a94868b2d | 96e086d4ee6169c0f83fff3819f38f32b8f17c98 | refs/heads/master | 2021-06-13T19:43:40.782021 | 2021-03-27T14:21:49 | 2021-03-27T14:21:49 | 164,201,394 | 1 | 0 | null | 2019-08-27T22:21:26 | 2019-01-05T09:39:41 | C++ | UTF-8 | Python | false | false | 1,768 | py | from collections import deque
class Node:
def __init__(self, key):
self.k = key
self.next = None
self.prev = None
class LRUCache:
def __init__(self, capacity: int):
self.N = capacity
self.head = Node(-10000)
self.tail = Node(10000)
self.head.next = self.tail
self.tail.prev = self.head
self.d = {}
self.node_cache = {}
def get(self, key: int) -> int:
if not key in self.d:
return -1
nd = self.node_cache[key]
# O->N->P
# O<-N<-P
nxt = nd.next
prev = nd.prev
nxt.prev = prev
prev.next = nxt
self.head.next.prev = nd
nd.next = self.head.next
self.head.next = nd
nd.prev = self.head
return self.d[key]
def put(self, key: int, value: int) -> None:
if key in self.d:
self.d[key] = value
nd = self.node_cache[key]
nd.prev.next = nd.next
nd.next.prev = nd.prev
self.head.next.prev = nd
nd.next = self.head.next
self.head.next = nd
nd.prev = self.head
return
if len(self.d) == self.N:
last_node = self.tail.prev
last_node.prev.next = self.tail
self.tail.prev = last_node.prev
k = last_node.k
del self.d[k]
del self.node_cache[k]
new_node = Node(key)
self.head.next.prev = new_node
new_node.next = self.head.next
self.head.next = new_node
new_node.prev = self.head
self.d[key] = value
self.node_cache[key] = new_node
| [
"noreply@github.com"
] | DeshErBojhaa.noreply@github.com |
4ee99e8dc62e8235506617820b0213baa4574f24 | e81a351d6049a05b92925c9b781d07701345713c | /Language Proficiency/Python/betweenTwoSets.py | c7fbb312611315069616c8b210a93f70eaa99062 | [] | no_license | dmiruke/HackerRank-Python | 1b2fa331d78ce0b4b23628d081441ca008150bd7 | 86b42458cae43e83dbd339665b31926eca0feacd | refs/heads/master | 2022-01-13T22:16:27.338429 | 2019-07-16T08:00:13 | 2019-07-16T08:00:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | #!/bin/python3
from math import gcd
#
# Complete the getTotalX function below.
#
def getTotalX(a, b):
#
# Write your code here.
#
lo = a[0]
for i in a[1:]:
lo = int(lo*i/gcd(lo,i))
hi = b[0]
for i in b[1::]:
hi = int(gcd(hi, i))
count = 0
i = 1
for i in range(lo, hi+1, lo):
if gcd(i,int(hi)) == i:
count += 1
return count
nm = input().split()
n = int(nm[0])
m = int(nm[1])
a = list(map(int, input().rstrip().split()))
b = list(map(int, input().rstrip().split()))
print(getTotalX(a, b))
| [
"atharvapusalkar18@gmail.com"
] | atharvapusalkar18@gmail.com |
dddb37bcbb0fedac09f6b7b4160c9dec9062cb51 | 471b464cd4ec351cb8eb0918ee3658ab22438e47 | /test/test_edict.py | 396a58d7d6708f845f60cffee64a89d131c64da1 | [] | no_license | patarapolw/edictreader | 39c03d551fcc9f38315450ec9fb223e9134455a8 | d1dda8c079b443f66d851fafa9f536b6b0d9453a | refs/heads/master | 2021-04-06T11:01:16.941063 | 2018-03-16T05:32:16 | 2018-03-16T05:32:16 | 125,215,915 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | from time import time
from edictreader.dict import Edict2
def test_search_japanese():
start = time()
d = Edict2()
end = time()
# for item in d:
# print(item)
start2 = time()
print(list(d.search({'japanese': '鼹鼠'})))
end2 = time()
print('__init__() takes {:.4f} seconds'.format(end - start))
print('search() takes {:.4f} seconds'.format(end2 - start2))
if __name__ == '__main__':
test_search_japanese()
| [
"patarapolw@gmail.com"
] | patarapolw@gmail.com |
de35c3bd4ec2a2fe84243fd5123191d42f03790d | 7f939ccdc7068f1c441333296896274724100142 | /allocation/migrations/0008_auto_20170723_1409.py | 04f237178cf11730a622275047206a8e112c04ed | [] | no_license | happychallenge/vendorMgmt | c62584af3a7ab4538b36be2f46a5f23b83043495 | e703a04461151950a4b29e5ab2abb855205b21e6 | refs/heads/master | 2021-07-10T08:03:38.666214 | 2018-01-31T11:00:01 | 2018-01-31T11:00:01 | 96,496,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-23 14:09
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0015_auto_20170723_1352'),
('allocation', '0007_auto_20170723_0903'),
]
operations = [
migrations.AlterUniqueTogether(
name='supplyvendor',
unique_together=set([('supply', 'vendor')]),
),
]
| [
"happychallenge@outlook.com"
] | happychallenge@outlook.com |
837c22b02aaae92a95379ac2311f2991817f4eb1 | dcee93ce4b9fcf0a7ffa6ea658c403ed1fc84043 | /Meteor/src/logAnalysis/migrations/0003_auto_20170811_1009.py | 7bc9b2231774045650c9704355eb09a8a1d96d73 | [] | no_license | henryliuom/drv-study | 3eed96eef58138003371011034562a15ebc16b79 | dcab011bce0f34bcf50f8ab5601eb859a5a07cb7 | refs/heads/master | 2021-06-06T23:49:20.869907 | 2020-07-30T09:06:48 | 2020-07-30T09:06:48 | 95,858,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-11 02:09
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('logAnalysis', '0002_auto_20170802_1329'),
]
operations = [
migrations.AlterModelTable(
name='loganalysis_agent',
table='meteor_loganalysis_agent',
),
migrations.AlterModelTable(
name='loganalysis_area',
table='meteor_loganalysis_area',
),
migrations.AlterModelTable(
name='loganalysis_cdn',
table='meteor_loganalysis_cdn',
),
migrations.AlterModelTable(
name='loganalysis_http',
table='meteor_loganalysis_http',
),
migrations.AlterModelTable(
name='loganalysis_status',
table='meteor_loganalysis_status',
),
migrations.AlterModelTable(
name='loganalysis_upstream',
table='meteor_loganalysis_upstream',
),
]
| [
"henry@techdog.com"
] | henry@techdog.com |
f58f43a4575f64887bb87422cc4d237f32db62bc | 282ec49f8ce8aa176c24e4f13a8852c9b0752e4a | /forgery/simple-manager/gui/main_window.py | fd9291d8160c763d132090a7548b411a28a98454 | [] | no_license | montreal91/workshop | b118b9358094f91defdae1d11ff8a1553d67cee6 | 8c05e15417e99d7236744fe9f960f4d6b09e4e31 | refs/heads/master | 2023-05-22T00:26:09.170584 | 2023-01-28T12:41:08 | 2023-01-28T12:41:08 | 40,283,198 | 3 | 1 | null | 2023-05-01T20:19:11 | 2015-08-06T03:53:44 | C++ | UTF-8 | Python | false | false | 1,537 | py | from random import shuffle
# from PySide.QtCore import Slot
from PySide.QtGui import QWidget
from gui.widgets.main_window_ui import Ui_JMainWindow
from club import JClub
from league import JLeague
club_names = [
"Canberra Masters",
"Sydney Kangaroos",
"Dandenong Pianists",
"Melbourne Slams",
"Melbourne Rockets",
"Darwin Genes",
"Kingston Whales",
"Brisbane Rangers",
"Adelaide Thrashers",
"Perth Penguins"
]
class JMainWindow(QWidget):
def __init__(self):
super(JMainWindow, self).__init__()
self.widget = Ui_JMainWindow()
self.widget.setupUi(self)
self._league = None
def CreateLeague(self, param_dict):
self._league = JLeague(
days=param_dict["days"],
divisions=param_dict["divs"],
indiv_matches=param_dict["in_div_games"],
exdiv_matches=param_dict["out_div_games"]
)
tennis_players = 4
t_players_list = [i+1 for i in range(len(club_names) * tennis_players)]
shuffle(t_players_list)
for i in range(len(club_names)):
if i + 1 == 1:
club = JClub(club_id=i+1, name=club_names[i], playable=True)
else:
club = JClub(club_id=i+1, name=club_names[i], playable=False)
for j in range(tennis_players):
club.AddPlayer(t_players_list.pop())
self._league.AddClub(club)
self._league.CreateSchedule()
| [
"nefedov.alexander91@yandex.ru"
] | nefedov.alexander91@yandex.ru |
655f6bdc4c7f5df0fd7c9a5464f13dfc8f420f3c | 45e376ae66b78b17788b1d3575b334b2cb1d0b1c | /checkov/terraform/checks/resource/aws/SSMDocumentsArePrivate.py | 60963dea36c96ee18408e724b9898d385cb74fa2 | [
"Apache-2.0"
] | permissive | bridgecrewio/checkov | aeb8febed2ed90e61d5755f8f9d80b125362644d | e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d | refs/heads/main | 2023-08-31T06:57:21.990147 | 2023-08-30T23:01:47 | 2023-08-30T23:01:47 | 224,386,599 | 5,929 | 1,056 | Apache-2.0 | 2023-09-14T20:10:23 | 2019-11-27T08:55:14 | Python | UTF-8 | Python | false | false | 1,120 | py |
from checkov.common.models.enums import CheckCategories
from checkov.terraform.checks.resource.base_resource_negative_value_check import BaseResourceNegativeValueCheck
class SSMDocumentsArePrivate(BaseResourceNegativeValueCheck):
def __init__(self):
"""
NIST.800-53.r5 AC-21, NIST.800-53.r5 AC-3, NIST.800-53.r5 AC-3(7), NIST.800-53.r5 AC-4, NIST.800-53.r5 AC-4(21),
NIST.800-53.r5 AC-6, NIST.800-53.r5 SC-7, NIST.800-53.r5 SC-7(11), NIST.800-53.r5 SC-7(16),
NIST.800-53.r5 SC-7(20), NIST.800-53.r5 SC-7(21), NIST.800-53.r5 SC-7(3), NIST.800-53.r5 SC-7(4),
NIST.800-53.r5 SC-7(9)
"""
name = "Ensure SSM documents are not Public"
id = "CKV_AWS_303"
supported_resources = ['aws_ssm_document']
categories = [CheckCategories.GENERAL_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self) -> str:
return "permissions/[0]/account_ids"
def get_forbidden_values(self) -> str:
return "All"
check = SSMDocumentsArePrivate()
| [
"noreply@github.com"
] | bridgecrewio.noreply@github.com |
37d4789329c8c3bf49c5bf0697aeb0465cc52a8f | b0c8e0cafa4a8916faab3cce65756ae91426c43f | /study/Python/VCWeek6/BOJ_14677_강의현.py | a37da96e2703bab006f6f7073b11ede71c4291ce | [] | no_license | Rurril/IT-DA-3rd | b3e3ec3c2a5efbc75b76b84e9002c27a0ba4a1c4 | 9985e237cb1b90e9609656d534e0ed164723e281 | refs/heads/master | 2022-07-22T15:26:39.085369 | 2021-11-23T13:30:06 | 2021-11-23T13:30:06 | 288,980,334 | 3 | 29 | null | 2020-11-05T10:25:30 | 2020-08-20T10:49:17 | Java | UTF-8 | Python | false | false | 971 | py | # 병약한 윤호 - G5
import sys
from collections import deque
def bfs():
que=deque()
que.append((0,0))
day=-1
order=0
while que:
size=len(que)
while True:
if size==0:
break
left, right=que.popleft()
if left+right<T:
if pill[left]==BLD[order] and dp[left+1][right]==0:
que.append((left+1,right))
dp[left+1][right]=1
if pill[T-1-right]==BLD[order] and dp[left][right+1]==0:
que.append((left, right+1))
dp[left][right+1]=1
size-=1
order=(order+1)%3
day+=1
return day
if __name__=="__main__":
N=int(sys.stdin.readline())
T=3*N
BLD=['B','L','D']
pill=list(sys.stdin.readline().rstrip())
dp=[[0 for _ in range(T+1)] for _ in range(T+1)]
dp[0][0]=1
print(bfs()) | [
"riverkeh@naver.com"
] | riverkeh@naver.com |
a5260f4c75d1782d5851bebe940fca53f07fce4d | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_2751486_0/Python/viswabharathi/prob1.py | e936ba3450edac54600bc6b01b6579200d489771 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | t = int(raw_input())
for i in xrange(t):
name, n = raw_input().split()
n = int(n)
l = len(name)
tmp = 0
res = 0
start = 0
cstart = 0
for j, ch in enumerate(name):
if ch not in ('a', 'e', 'i', 'o', 'u'):
tmp += 1
else:
tmp = 0
cstart = j+1
if tmp >= n:
if cstart - start > 0:
#print " test ", cstart, start, j , l
res += ( (cstart - start + 1 ) * (l - j ))
else:
res += (l - j)
#print name[start:], res, cstart
cstart += 1
start = cstart
print "Case #%s: %s"% (i+1, res)
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
a54650b300fda35bc32be1b5af773c9e8c81720b | 181af10fcf40b824fe92d3b8f72fd15d6d1490c2 | /Contests/201-300/week 202/1552. Magnetic Force Between Two Balls/Magnetic Force Between Two Balls.py | 9e1e6d05fc8941b65b0940ab4af14bb1f0f32a08 | [] | no_license | wangyendt/LeetCode | 402c59a0b7b7f5b3a672231ea5dad8056ade36af | 4a3ba15284c45b2d8bf38306c8c8526ae174615c | refs/heads/master | 2023-08-10T06:27:54.995152 | 2023-08-10T02:22:27 | 2023-08-10T02:22:27 | 176,651,399 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: Wayne
@contact: wangye.hope@gmail.com
@software: PyCharm
@file: Magnetic Force Between Two Balls
@time: 2020/08/17 00:23
"""
class Solution:
def maxDistance(self, position: list, m: int) -> int:
n = len(position)
position.sort()
def count(d):
ans, curr = 1, position[0]
for i in range(1, n):
if position[i] - curr >= d:
ans += 1
curr = position[i]
return ans
l, r = 0, position[-1] - position[0]
while l < r:
mid = r - (r - l) // 2
if count(mid) >= m:
l = mid
else:
r = mid - 1
return l
| [
"905317742@qq.com"
] | 905317742@qq.com |
4ace3d37196da36960499f3b695393f79a07f88b | 1d69ab8bc1ae1cb7f576f11eafbd72f9b6032d76 | /one_off/watertest.py.bak | 354b7a363d8581560e1b9c143d8c5b9c46435234 | [] | no_license | FDI-IT/fd | d40271a8ba51908914bfefc7574d86e6dbdb0d90 | 862ec4bdf8da94a29856c11f35b5202af73017b7 | refs/heads/python2 | 2022-12-03T17:27:05.767487 | 2020-07-15T19:01:41 | 2020-07-15T19:01:41 | 221,480,272 | 0 | 0 | null | 2022-11-22T02:11:00 | 2019-11-13T14:39:10 | Python | UTF-8 | Python | false | false | 924 | bak | from haccp.models import WaterTest
from datetime import date
import random
from decimal import Decimal
start_date = date(2009,1,7)
delta = date(2009,1,14) - start_date
vacation_dates = (date(2009,12,30),date(2010,12,29),date(2011,12,28),date(2012,12,26))
def populate_results():
WaterTest.objects.all().delete()
test_date = start_date
zone = 1
while(test_date < date.today()):
for x in range(1,5):
test_result = Decimal(random.randrange(1,7,1))/10
wt = WaterTest(test_date=test_date,
zone=zone,
test_result=test_result)
wt.save()
zone += 1
if zone > 18:
zone = 1
test_date += delta
if test_date in vacation_dates:
print test_date
test_date += delta
| [
"doofus1102@gmail.com"
] | doofus1102@gmail.com |
276172e148598912da58f9737d273252526d5dc8 | 7f1e0158e70b69bfa353661bfb2eabda9ee5c56c | /dnacentersdk/models/validators/v2_2_1/jsd_fe06867e548bba1919024b40d992.py | 6db535faef9734e104abbded4ca2bbe34a05cafe | [
"MIT"
] | permissive | Jerbuck/dnacentersdk | 97fb11844410ec7ab49aec35a30979d6288a87fd | ef2adde6113e7a6acd28a287007eb470fa39d31f | refs/heads/master | 2023-07-31T13:43:01.108243 | 2021-09-14T17:41:19 | 2021-09-14T17:41:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,121 | py | # -*- coding: utf-8 -*-
"""Cisco DNA Center SyncDevices data model.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorFe06867E548BBa1919024B40D992(object):
"""SyncDevices request schema definition."""
def __init__(self):
super(JSONSchemaValidatorFe06867E548BBa1919024B40D992, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"cliTransport": {
"type": "string"
},
"computeDevice": {
"type": "boolean"
},
"enablePassword": {
"type": "string"
},
"extendedDiscoveryInfo": {
"type": "string"
},
"httpPassword": {
"type": "string"
},
"httpPort": {
"type": "string"
},
"httpSecure": {
"type": "boolean"
},
"httpUserName": {
"type": "string"
},
"ipAddress": {
"items": {
"type": "string"
},
"type": "array"
},
"merakiOrgId": {
"items": {
"type": "string"
},
"type": "array"
},
"netconfPort": {
"type": "string"
},
"password": {
"type": "string"
},
"serialNumber": {
"type": "string"
},
"snmpAuthPassphrase": {
"type": "string"
},
"snmpAuthProtocol": {
"type": "string"
},
"snmpMode": {
"type": "string"
},
"snmpPrivPassphrase": {
"type": "string"
},
"snmpPrivProtocol": {
"type": "string"
},
"snmpROCommunity": {
"type": "string"
},
"snmpRWCommunity": {
"type": "string"
},
"snmpRetry": {
"type": "integer"
},
"snmpTimeout": {
"type": "integer"
},
"snmpUserName": {
"type": "string"
},
"snmpVersion": {
"type": "string"
},
"type": {
"enum": [
"COMPUTE_DEVICE",
"MERAKI_DASHBOARD",
"NETWORK_DEVICE",
"NODATACHANGE"
],
"type": "string"
},
"updateMgmtIPaddressList": {
"items": {
"properties": {
"existMgmtIpAddress": {
"type": "string"
},
"newMgmtIpAddress": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"userName": {
"type": "string"
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| [
"wastorga@altus.co.cr"
] | wastorga@altus.co.cr |
afdb4e602c32338a2e0552721dc3c14860833ca9 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/network/v20200401/get_local_network_gateway.py | 9c6a84c850459931be4dd4496034752ac1f109fa | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 7,651 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetLocalNetworkGatewayResult',
'AwaitableGetLocalNetworkGatewayResult',
'get_local_network_gateway',
]
@pulumi.output_type
class GetLocalNetworkGatewayResult:
"""
A common class for general resource information.
"""
def __init__(__self__, bgp_settings=None, etag=None, fqdn=None, gateway_ip_address=None, id=None, local_network_address_space=None, location=None, name=None, provisioning_state=None, resource_guid=None, tags=None, type=None):
if bgp_settings and not isinstance(bgp_settings, dict):
raise TypeError("Expected argument 'bgp_settings' to be a dict")
pulumi.set(__self__, "bgp_settings", bgp_settings)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if fqdn and not isinstance(fqdn, str):
raise TypeError("Expected argument 'fqdn' to be a str")
pulumi.set(__self__, "fqdn", fqdn)
if gateway_ip_address and not isinstance(gateway_ip_address, str):
raise TypeError("Expected argument 'gateway_ip_address' to be a str")
pulumi.set(__self__, "gateway_ip_address", gateway_ip_address)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if local_network_address_space and not isinstance(local_network_address_space, dict):
raise TypeError("Expected argument 'local_network_address_space' to be a dict")
pulumi.set(__self__, "local_network_address_space", local_network_address_space)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="bgpSettings")
def bgp_settings(self) -> Optional['outputs.BgpSettingsResponse']:
"""
Local network gateway's BGP speaker settings.
"""
return pulumi.get(self, "bgp_settings")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def fqdn(self) -> Optional[str]:
"""
FQDN of local network gateway.
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter(name="gatewayIpAddress")
def gateway_ip_address(self) -> Optional[str]:
"""
IP address of local network gateway.
"""
return pulumi.get(self, "gateway_ip_address")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="localNetworkAddressSpace")
def local_network_address_space(self) -> Optional['outputs.AddressSpaceResponse']:
"""
Local network site address space.
"""
return pulumi.get(self, "local_network_address_space")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the local network gateway resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The resource GUID property of the local network gateway resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetLocalNetworkGatewayResult(GetLocalNetworkGatewayResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetLocalNetworkGatewayResult(
bgp_settings=self.bgp_settings,
etag=self.etag,
fqdn=self.fqdn,
gateway_ip_address=self.gateway_ip_address,
id=self.id,
local_network_address_space=self.local_network_address_space,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
tags=self.tags,
type=self.type)
def get_local_network_gateway(local_network_gateway_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetLocalNetworkGatewayResult:
"""
A common class for general resource information.
:param str local_network_gateway_name: The name of the local network gateway.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['localNetworkGatewayName'] = local_network_gateway_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20200401:getLocalNetworkGateway', __args__, opts=opts, typ=GetLocalNetworkGatewayResult).value
return AwaitableGetLocalNetworkGatewayResult(
bgp_settings=__ret__.bgp_settings,
etag=__ret__.etag,
fqdn=__ret__.fqdn,
gateway_ip_address=__ret__.gateway_ip_address,
id=__ret__.id,
local_network_address_space=__ret__.local_network_address_space,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
resource_guid=__ret__.resource_guid,
tags=__ret__.tags,
type=__ret__.type)
| [
"noreply@github.com"
] | morrell.noreply@github.com |
c1baffca1113db80056ca413d53c5e5775d023d8 | 402cb8ac32c5ca7a53f5875688d1ebba1e96474b | /set41.py | 022632c87d78cc3aa65a3ee9f15eb2e24c5c22b5 | [] | no_license | Srija-U/codekataplayer | c073a13d8621f641a8aba8f23ebee4e1b673d58f | 392f24f35f178b034cfb76d2acc31bbc4b3a5814 | refs/heads/master | 2020-05-02T10:59:45.052802 | 2019-07-22T00:27:46 | 2019-07-22T00:27:46 | 177,914,184 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | class s:
def __init__(self):
self.items=[]
def push(self,data):
if(data==')'):
self.pop()
else:
self.items.append(data)
def pop(self):
self.items.pop()
def is_empty(self):
return self.items==[]
o=s()
l=str(input())
for i in range(0,len(l),1):
o.push(l[i])
v=0
v=o.is_empty()
if(v==1):
print("yes")
else:
print("no")
| [
"noreply@github.com"
] | Srija-U.noreply@github.com |
6bce468c5760cc045ca616e1d5138f454bd28843 | 9e204a5b1c5ff4ea3b115ff0559b5af803ab4d15 | /019 Remove Nth Node From End of List.py | 41529a7bd2c40f1bb47863031dffc9d358a8a34b | [
"MIT"
] | permissive | Aminaba123/LeetCode | 178ed1be0733cc7390f30e676eb47cc7f900c5b2 | cbbd4a67ab342ada2421e13f82d660b1d47d4d20 | refs/heads/master | 2020-04-20T10:40:00.424279 | 2019-01-31T08:13:58 | 2019-01-31T08:13:58 | 168,795,374 | 1 | 0 | MIT | 2019-02-02T04:50:31 | 2019-02-02T04:50:30 | null | UTF-8 | Python | false | false | 1,269 | py | """
Given a linked list, remove the nth node from the end of list and return its head.
For example,
Given linked list: 1->2->3->4->5, and n = 2.
After removing the second node from the end, the linked list becomes 1->2->3->5.
Note:
Given n will always be valid.
Try to do this in one pass.
"""
__author__ = 'Danyang'
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def removeNthFromEnd(self, head, n):
"""
O(n)+O(n)
:param head: head node
:param n: the nth node from the end
:return: ListNode, head node
"""
# construct dummy
dummy = ListNode(0)
dummy.next = head
# get length of the linked list
length = 0
pre = dummy
while pre.next:
length += 1
pre=pre.next
# find & remove
pre = dummy
count = 0
while pre.next:
cur = pre.next
if count==length-n:
pre.next = cur.next # remove
break
else:
count += 1
pre = pre.next
return dummy.next
| [
"zhangdanyangg@gmail.com"
] | zhangdanyangg@gmail.com |
5eca7cbd6d0508a7363e93d232e67393cf4c08ac | 2f0cb310e2ec8fb176ee240aa964a7eef5ed23b4 | /giico/giico/report/fuel_card_summary/fuel_card_summary.py | 229f2dfacad40fdcf6e938782dfe93c93ae34bb8 | [
"MIT"
] | permissive | thispl/giico | b96cf6b707f361275f8723d15f8ea1f95f908c9c | 14c5631639ab56a586a7962be9871d722c20e205 | refs/heads/master | 2021-06-18T03:56:02.928303 | 2021-04-27T06:42:59 | 2021-04-27T06:42:59 | 200,183,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | # Copyright (c) 2013, VHRS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
def execute(filters=None):
columns, data = [], []
return columns, data
| [
"hereabdulla@gmail.com"
] | hereabdulla@gmail.com |
3eb75a3150a4dcc08540c10af5f332ba2603423b | 776c8a5821eb8cd1357439454c9c20c9da239afb | /November,2020~July,2021/2021-01-01/9273_이승민_정제천을팔자_시간초과.py | 141f915d871d9eea921da90528b080d83b9a24e8 | [] | no_license | JinYeJin/algorithm-study | 85d84a726e0f7bb78a2da37504bc04a42b3906ea | 538c911e6adcdad3bfed3d9f76ccb30804dfb768 | refs/heads/master | 2023-07-04T16:09:12.101837 | 2021-08-14T02:23:44 | 2021-08-14T02:23:44 | 272,363,049 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | # 정제헌을 팔자!
# 구현 아이디어 : n ** 2의 n까지의 약수의 개수를 구하기
import sys
input = sys.stdin.readline
def solution():
while 1:
arr = list(map(str, input()))
tmp = "".join(map(str, arr[2:]))
n = int(tmp)
cnt = 0
for i in range(1, n + 1):
if n ** 2 % i == 0:
cnt += 1
print(cnt)
solution() | [
"yeen0606@gmail.com"
] | yeen0606@gmail.com |
3ed1cf66d9c905bf48476a9373aa40591ee6fb9d | 6e6f97f416c06aada38c3a9db23eed7517bfaa6d | /accounts/migrations/0009_remove_profile_image_url.py | c34102ddb2e9e7f6d4d141d5c54b1eef76f1406d | [
"MIT"
] | permissive | ZendaInnocent/sogea | 1735ad047539c09a5c81e196a7a1963022452098 | 54cf257856cae451ad87e2396b8e44a34c0c6daf | refs/heads/main | 2023-08-23T07:18:45.741826 | 2021-10-28T13:19:06 | 2021-10-28T13:19:06 | 365,683,816 | 0 | 0 | MIT | 2021-05-09T06:29:57 | 2021-05-09T06:29:57 | null | UTF-8 | Python | false | false | 332 | py | # Generated by Django 3.1.7 on 2021-03-17 14:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0008_profile_image_url'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='image_url',
),
]
| [
"medsonnaftal@gmail.com"
] | medsonnaftal@gmail.com |
d02f7c02234dae46ada1d4880986741d24acb10c | 9cdccd361d2cc778e049ec16008bc457ebaba8ad | /wef/wef/views/home.py | 40860b43c7da518eae00ae33f9afe37adb572616 | [
"MIT"
] | permissive | deadlylaid/book_connect | 2c41c713c63927df27436038c26eb37d70e8b099 | a024363ed1ab06fbb21a9b5da6a04eda9d7dfb35 | refs/heads/master | 2022-11-24T23:37:34.679963 | 2020-12-23T07:10:30 | 2020-12-23T07:10:30 | 63,302,135 | 6 | 0 | MIT | 2022-11-22T01:21:30 | 2016-07-14T04:25:18 | Python | UTF-8 | Python | false | false | 679 | py | from django.views.generic import View
from django.shortcuts import render
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.paginator import Paginator
from items.models import ItemPost
class Home(View):
def get(self, request):
page = 1
page_per = 5
paginator = Paginator(
ItemPost.objects.filter(is_deleted=False).order_by('-id'),
page_per,
)
posts = paginator.page(page)
context = {
"posts": posts
}
return render(
request,
'home.html',
context,
)
| [
"deadlylaid@gmail.com"
] | deadlylaid@gmail.com |
a09ff0316844e058aee4a997839bbdf28553ea89 | f6a6a8b7c0af49bd86930adde8ffe9ba37950046 | /finitediff/grid/__init__.py | 8a4df96de566e58f26627e8143e6e5b44750045a | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | bjodah/finitediff | 5cf65cfff94c85d5aa747c5b0616b93482d707ae | bfb1940cf5c7ce5c9a3b440d1efd8f8c4128fed8 | refs/heads/master | 2021-08-28T18:03:54.491501 | 2021-08-07T06:32:01 | 2021-08-07T06:39:34 | 14,988,640 | 34 | 5 | BSD-2-Clause | 2020-03-07T23:15:07 | 2013-12-06T17:10:24 | Python | UTF-8 | Python | false | false | 292 | py | from .rebalance import (
rebalanced_grid,
pre_pruning_mask,
combine_grids,
grid_pruning_mask,
)
from .refine import refine_grid
from .make import adapted_grid
from .util import locate_discontinuity, pool_discontinuity_approx, grid_error
from .plotting import plot_convergence
| [
"bjodah@gmail.com"
] | bjodah@gmail.com |
afb125cc33aca6fb089b34aaa288b914f5d698e8 | 70fa6468c768d4ec9b4b14fc94fa785da557f1b5 | /lib/surface/components/__init__.py | 66159453ab8e75d21f8d1020cb3956a276b8b50c | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | kylewuolle/google-cloud-sdk | d43286ef646aec053ecd7eb58566ab2075e04e76 | 75f09ebe779e99fdc3fd13b48621fe12bfaa11aa | refs/heads/master | 2020-04-20T22:10:41.774132 | 2019-01-26T09:29:26 | 2019-01-26T09:29:26 | 169,131,028 | 0 | 0 | NOASSERTION | 2019-02-04T19:04:40 | 2019-02-04T18:58:36 | Python | UTF-8 | Python | false | false | 3,929 | py | # -*- coding: utf-8 -*- #
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The super-group for the update manager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import argparse
from googlecloudsdk.calliope import base
from googlecloudsdk.core import config
from googlecloudsdk.core import log
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Components(base.Group):
"""List, install, update, or remove Google Cloud SDK components.
The {command} command group lets you control which tools are installed
in the Cloud SDK. It can be used to install, update and remove components of
the Cloud SDK, ensuring a lean, up-to-date installation.
{command} regularly checks whether updates are available for the
tools you already have installed, and gives you the opportunity to upgrade to
the latest version.
Certain components have dependencies. {command} will install any dependencies,
and during removal, any dependant components will be uninstalled
automatically.
## EXAMPLES
To see all available components:
$ {command} list
To install a component you don't have:
$ {command} install COMPONENT
To remove a component you no longer need:
$ {command} remove COMPONENT
To update all components you have to their latest version:
$ {command} update
To update all installed components to version 1.2.3:
$ {command} update --version 1.2.3
"""
category = base.GCLOUD_SDK_TOOLS_CATEGORY
@staticmethod
def Args(parser):
"""Sets args for gcloud components."""
# An override for the location to install components into.
parser.add_argument('--sdk-root-override', required=False, hidden=True,
help='THIS ARGUMENT NEEDS HELP TEXT.')
# A different URL to look at instead of the default.
parser.add_argument('--snapshot-url-override', required=False, hidden=True,
help='THIS ARGUMENT NEEDS HELP TEXT.')
# This is not a commonly used option. You can use this flag to create a
# Cloud SDK install for an OS other than the one you are running on.
# Running the updater multiple times for different operating systems could
# result in an inconsistent install.
parser.add_argument('--operating-system-override', required=False,
hidden=True,
help='THIS ARGUMENT NEEDS HELP TEXT.')
# This is not a commonly used option. You can use this flag to create a
# Cloud SDK install for a processor architecture other than that of your
# current machine. Running the updater multiple times for different
# architectures could result in an inconsistent install.
parser.add_argument('--architecture-override', required=False, hidden=True,
help='THIS ARGUMENT NEEDS HELP TEXT.')
# pylint:disable=g-missing-docstring
def Filter(self, unused_tool_context, args):
base.DisableUserProjectQuota()
if config.INSTALLATION_CONFIG.IsAlternateReleaseChannel():
log.warning('You are using alternate release channel: [%s]',
config.INSTALLATION_CONFIG.release_channel)
# Always show the URL if using a non standard release channel.
log.warning('Snapshot URL for this release channel is: [%s]',
config.INSTALLATION_CONFIG.snapshot_url)
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
2cf6fc96c940437aff1a27460dddfc56013ab7ec | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02889/s447377526.py | 0de588993a718a3aa4684f5ab46849141c2ffbf5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | from scipy.sparse.csgraph import floyd_warshall
N,M,L = list(map(int, input().split()))
edges = [[0] * N for _ in range(N)]
for _ in range(M):
A,B,C = list(map(int, input().split()))
edges[A-1][B-1] = C
edges[B-1][A-1] = C
Q = int(input())
queries = []
for _ in range(Q):
queries.append(list(map(int,input().split())))
# use flord warshall to find min path between all towns
edges = floyd_warshall(edges)
# if the two towns can be travelled to on one tank, add to our fuel graph with distance 1
for i in range(N):
for j in range(N):
if edges[i][j] <= L:
edges[i][j] = 1
else:
edges[i][j] = 0
# use flord warshall to find min number of fuel tanks to travel between two towns
edges = floyd_warshall(edges)
for query in queries:
s = query[0] - 1
t = query[1] - 1
num_tanks = edges[s][t] - 1
if num_tanks != float('inf'):
print(int(num_tanks))
else:
print("-1")
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
ed84a635a4f264bedf8b43ef13bfdacf1f117a89 | 18783303fec404a4386c8ed0a8b8737c1424eadd | /Py exercises/IffyKeysValues_inalist.py | 2f4f2f342c53dca71a059ea0222cdfc45a995d94 | [
"MIT"
] | permissive | arvindkarir/python-pandas-code | a5b23b724c387d5ff745e017f968681847c506ad | fb3b68f07f0438cd0ef6d7ad669ce78650d884a8 | refs/heads/master | 2020-08-13T02:05:16.469398 | 2019-10-23T02:18:10 | 2019-10-23T02:18:10 | 214,887,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | # it generates a dictionary with keys and values, still need to figure out
import random #a standard library module
def someList(numBuckets):
buckets = []
dictKey = 0
dictVal = 0
for i in range(numBuckets):
buckets.append([])
#print(buckets)
for i in range(20):
dictKey = random.randint(0, 10*2)
dictVal = i
#print(dictKey, dictVal)
hashBucket = buckets[dictKey%numBuckets]
for i in range(len(hashBucket)):
if hashBucket[i][0] == dictKey:
hashBucket[i] =(dictKey, dictVal)
return
hashBucket.append((dictKey, dictVal))
print(hashBucket)
D = someList(7) #this number should always be greater than range (the data set)
| [
"arvindkarir@gmail.com"
] | arvindkarir@gmail.com |
25c778bb93b1774fe789a08191b367d0cdeceb2b | abacbf9798f089cd43fd50c2d577de50cca806d8 | /venv/Lib/site-packages/example/webalone/migrations/versions/060a77af98e5_initial.py | 051775fdf12e92177407e2b75bdcd612686b8d77 | [] | no_license | Sarveshr49/ProInternSML | f2bfd82905dd185d82830d4758d69ee2b23f71fb | 2ac09e31ebe54dbecd46935818b089a4b8428354 | refs/heads/master | 2023-08-11T17:36:16.387236 | 2021-10-16T18:23:04 | 2021-10-16T18:23:04 | 373,503,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,466 | py | """initial
Revision ID: 060a77af98e5
Revises:
Create Date: 2016-04-20 08:54:54.125614
"""
# revision identifiers, used by Alembic.
revision = '060a77af98e5'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
import odm
import odm.types
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_default():
### commands auto generated by Alembic - please adjust! ###
op.create_table('group',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name'),
sa.UniqueConstraint('name')
)
op.create_table('permission',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=120), nullable=True),
sa.Column('description', sa.String(length=120), nullable=True),
sa.Column('policy', odm.types.json.JSONType(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('description'),
sa.UniqueConstraint('description'),
sa.UniqueConstraint('name'),
sa.UniqueConstraint('name')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=50), nullable=True),
sa.Column('first_name', sa.String(length=30), nullable=True),
sa.Column('last_name', sa.String(length=30), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('password', sa.String(length=120), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('superuser', sa.Boolean(), nullable=True),
sa.Column('joined', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('username'),
sa.UniqueConstraint('username')
)
op.create_table('groups_permissions',
sa.Column('group_id', sa.Integer(), nullable=True),
sa.Column('permission_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['group_id'], ['group.id'], ),
sa.ForeignKeyConstraint(['permission_id'], ['permission.id'], )
)
op.create_table('mailinglist',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('topic', sa.String(length=60), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('email')
)
op.create_table('registration',
sa.Column('id', sa.String(length=40), nullable=False),
sa.Column('expiry', sa.DateTime(), nullable=False),
sa.Column('confirmed', sa.Boolean(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('token',
sa.Column('id', odm.types.uuid.UUIDType(length=16), nullable=False),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('expiry', sa.DateTime(), nullable=True),
sa.Column('ip_address', odm.types.ip_address.IPAddressType(length=50), nullable=True),
sa.Column('user_agent', sa.String(length=80), nullable=True),
sa.Column('last_access', sa.DateTime(), nullable=True),
sa.Column('session', sa.Boolean(), nullable=True),
sa.Column('description', sa.String(length=256), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users_groups',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('group_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['group_id'], ['group.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], )
)
### end Alembic commands ###
def downgrade_default():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('users_groups')
op.drop_table('token')
op.drop_table('registration')
op.drop_table('mailinglist')
op.drop_table('groups_permissions')
op.drop_table('user')
op.drop_table('permission')
op.drop_table('group')
### end Alembic commands ###
| [
"sarveshragade@gmail.com"
] | sarveshragade@gmail.com |
f9d339753695d27f8f5ee038d683f7d5953ca007 | 8c02ce5c53c99d2036685d81e60a6506ce87347c | /tests.py | a5c57797d55aaba8fe954f1832ea405e233a548e | [
"MIT"
] | permissive | tonyseek/flask-misaka | fcb180e4fc44cbd600f5eaecea86119f81bc5ff9 | 9941a5ed770325c884fced429efc6bbc494f7261 | refs/heads/master | 2020-12-03T05:21:16.213025 | 2013-08-29T04:19:48 | 2013-08-29T04:19:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,305 | py | from flask import Flask, render_template_string, Markup
from unittest import TestCase
import mock
import misaka
from misaka import (EXT_AUTOLINK, EXT_FENCED_CODE,
EXT_LAX_HTML_BLOCKS, EXT_NO_INTRA_EMPHASIS, EXT_SPACE_HEADERS,
EXT_STRIKETHROUGH, EXT_SUPERSCRIPT, EXT_TABLES, HTML_ESCAPE,
HTML_EXPAND_TABS, HTML_HARD_WRAP, HTML_SAFELINK, HTML_SKIP_HTML,
HTML_SKIP_IMAGES, HTML_SKIP_LINKS, HTML_SKIP_STYLE, HTML_SMARTYPANTS,
HTML_TOC, HTML_TOC_TREE, HTML_USE_XHTML, TABLE_ALIGNMASK, TABLE_ALIGN_C,
TABLE_ALIGN_L, TABLE_ALIGN_R, TABLE_HEADER)
from flask.ext.misaka import Misaka, markdown
TEST_MD = "*This* ~~contains~~ ``some`` mark^(down) extensions: www.markdown.com foo_bar_baz it's"
app = Flask(__name__)
app.debug = True
Misaka(app)
### templating tests ###
@app.route('/a')
def view_render_inline():
s = u"This is ~~restructuredtext~~ *markdown*"
return render_template_string('{{s|markdown}}', s=s)
def test_render_inline():
client = app.test_client()
resp = client.open('/a')
assert resp.data == u'<p>This is ~~restructuredtext~~ <em>markdown</em></p>\n'
@app.route('/b')
def view_render_var_block():
s = u"This is a *markdown* block"
tpl = u'''{% filter markdown %}{{s}}{% endfilter %}'''
return render_template_string(tpl, s=s)
def test_render_var_block():
client = app.test_client()
resp = client.open('/b')
assert resp.data == u'<p>This is a <em>markdown</em> block</p>\n'
@app.route('/c')
def view_render_in_block():
tpl = u'''{% filter markdown %}This is a *markdown* block{% endfilter %}'''
return render_template_string(tpl)
def test_render_in_block():
client = app.test_client()
resp = client.open('/c')
assert resp.data == u'<p>This is a <em>markdown</em> block</p>\n'
### markdown extensions in templates
extapp = Flask(__name__)
extapp.debug = True
Misaka(extapp, strikethrough=True)
@extapp.route('/d')
def view_render_inline_ext():
s = u"This is ~~restructuredtext~~ *markdown*"
return render_template_string('{{s|markdown}}', s=s)
def test_render_inline_ext():
client = extapp.test_client()
resp = client.open('/d')
assert resp.data == u'<p>This is <del>restructuredtext</del> <em>markdown</em></p>\n'
# Note that the Markdown extension tests aren't actually testing that the
# Markdown is rendered correctly; that should be covered by the test suite of
# the misaka module. These tests should test that Flask-Misaka is calling
# the misaka module correctly, and returning the result unmodified
# (aside from being wrapped in a Markup class instance.)
@mock.patch("flask.ext.misaka.misaka.html", side_effect=misaka.html)
class MarkdownExtensionTests(TestCase):
def test_defaults(self, html):
ext, flags = 0, 0
result = markdown(TEST_MD)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_one_ext(self, html):
ext, flags = EXT_AUTOLINK, 0
result = markdown(TEST_MD, autolink=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_two_ext(self, html):
ext, flags = EXT_FENCED_CODE | EXT_LAX_HTML_BLOCKS, 0
result = markdown(TEST_MD, fenced_code=True, lax_html=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_one_render(self, html):
ext, flags = 0, HTML_ESCAPE
result = markdown(TEST_MD, escape=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_two_render(self, html):
ext, flags = 0, HTML_HARD_WRAP | HTML_SAFELINK
result = markdown(TEST_MD, wrap=True, safelink=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_one_ext_one_render(self, html):
ext, flags = EXT_NO_INTRA_EMPHASIS, HTML_SKIP_HTML
result = markdown(TEST_MD, no_intra_emphasis=True, no_html=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_two_ext_two_render(self, html):
ext = EXT_STRIKETHROUGH | EXT_SUPERSCRIPT
flags = HTML_SKIP_LINKS | HTML_SKIP_STYLE
result = markdown(TEST_MD, strikethrough=True, superscript=True,
skip_links=True, no_style=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_inverse(self, html):
ext, flags = EXT_NO_INTRA_EMPHASIS, 0
result = markdown(TEST_MD, intra_emphasis=False)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_undefined_option(self, html):
ext, flags = 0, 0
result = markdown(TEST_MD, fireworks=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_defined_and_undefined_options(self, html):
ext, flags = 0, HTML_SMARTYPANTS
result = markdown(TEST_MD, smartypants=True, stupidpants=False)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_set_defaults(self, html):
ext, flags = EXT_TABLES, HTML_SMARTYPANTS
md = Misaka(smartypants=True, tables=True)
result = md.render(TEST_MD)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_override_defaults(self, html):
ext, flags = 0, 0
md = Misaka(autolink=True)
result = md.render(TEST_MD, autolink=False)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
| [
"david@davidbaumgold.com"
] | david@davidbaumgold.com |
a7b128e2ceda284f59e9a1fab21f774ce6c065d8 | 668a956d20eabae835a90b2d688f2232707e53d5 | /day18/untitled1/website/migrations/0001_initial.py | 732c12c19a989b2e4d383f06d92ee3e2ab72bac9 | [] | no_license | cq146637/The_front_background | e476c85dfa50cb181f350a5c36e241256637b500 | fefb3db705fe311022568619eb9e006c83b2eaf2 | refs/heads/master | 2021-08-31T23:04:27.298570 | 2017-12-23T09:22:51 | 2017-12-23T09:22:51 | 114,733,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-11-04 07:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UserInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=20)),
('password', models.CharField(max_length=50)),
],
),
]
| [
"1016025625@qq.com"
] | 1016025625@qq.com |
464e20ba504d56b1908b9d7160f72647b6baa3a5 | dd9a9649c6c82e1decaf4d2ea56c198b18cdd395 | /api/views.py | 9e6090589a87c02eb45be05750e9b6ed880cefdb | [] | no_license | srrobin/rest-framework-project | e56d2523e8d23109b308bfe8b6ea4bd0511b9da9 | d3050216b0f2cd9c81ad4dc57aa468cc6e007b0a | refs/heads/master | 2020-11-24T15:26:26.462369 | 2019-12-15T16:54:47 | 2019-12-15T16:54:47 | 228,215,569 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | from rest_framework import generics,permissions
from quote.models import QuoteList
from quote.models import QuoteCategory
from .serializers import QuoteSerializer
from .serializers import QuoteCategorySerializer
class QuoteAPIView(generics.ListAPIView):
permission_classes =(permissions.IsAuthenticated,)
queryset = QuoteList.objects.all()
serializer_class = QuoteSerializer
class QuoteCategoryAPIView(generics.ListAPIView):
queryset = QuoteCategory.objects.all()
serializer_class = QuoteCategorySerializer
class QuoteAPIDetailView( generics.RetrieveUpdateDestroyAPIView):
queryset = QuoteList.objects.all()
serializer_class = QuoteSerializer
class QuoteAPINewView(generics.ListCreateAPIView):
queryset = QuoteList.objects.all().order_by('-id')[:1]
serializer_class = QuoteSerializer | [
"you@domain.com"
] | you@domain.com |
f923ced813c51ae62ed4fb7fb339dade9a332ae9 | 35cf6fc79b8d6c335add8e55e0f4dca6f2816d1d | /Python_Study/第三模块学习/Shool_CRM/bin/start.py | 1bce8f2e71d6509dc9737bf36539adace09d8bde | [] | no_license | KongChan1988/51CTO-Treasure | 08b4ca412ad8a09d67c1ea79c7149f8573309ca4 | edb2e4bd11d39ac24cd240f3e815a88361867621 | refs/heads/master | 2021-07-04T15:57:56.164446 | 2019-07-24T15:28:36 | 2019-07-24T15:28:36 | 97,453,749 | 5 | 8 | null | 2019-10-30T22:05:12 | 2017-07-17T08:34:59 | Python | UTF-8 | Python | false | false | 250 | py | #-*- Coding:utf-8 -*-
# Author: D.Gray
import os,sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(BASE_DIR)
sys.path.append(BASE_DIR)
from core import core
if __name__ == '__main__':
a = core.Conter()
a.run() | [
"wangwei_198811@163.com"
] | wangwei_198811@163.com |
434a48e868ae18d561bb5d11ae4e5b288a8152da | c67029032e5f35eaae448d601d8cb870fd2b2953 | /WeatherApp/weather/migrations/0001_initial.py | 7fc72879a9963b8494cd28f9e4794ab6f4640218 | [
"MIT"
] | permissive | VToropov1337/django_weather | c6be2a882ed3a096e3df394acc143fdfa8f798d3 | 936637578f52679d6b79becc77372055dad6008f | refs/heads/master | 2020-12-04T20:00:13.632506 | 2020-01-05T12:47:06 | 2020-01-05T12:47:06 | 231,887,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | # Generated by Django 3.0 on 2020-01-05 10:14
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
]
| [
"vladimirtoropov87@gmail.com"
] | vladimirtoropov87@gmail.com |
dce5f9b2ef6fb2aa4b68de8e9b42a8e58f0c7336 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03786/s612653284.py | 3f76c3a440685a6568c77eac0ac77920bf2a28d3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | n = int(input())
a = sorted(list(map(int,input().split())))
cur = a[0]
count = 1
for i in range(1,n):
if a[i] <= cur*2:
count += 1
else:
count = 1
cur += a[i]
print(count) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e4c365971bff3e2af3483a510bf04d821556aff0 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/429/usersdata/309/106184/submittedfiles/jogoDaVelha_BIB.py | 46e2792cb92d6791a026e2704a7844089d936447 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,444 | py | # -*- coding: utf-8 -*-
from datetime import datetime
from random import randint
# autenticação do simbolo para a jogada humano
def solicitaSimboloDoHumano():
# nome=input('Qual seu nome(ou apelido)? ')
simbH= (input("\nQual o simbolo que você deseja utilizar no jogo? "))
while simbH!="X" and simbH!="O" and simbH!="o" and simbH!="x" :
print ("\nOps! Simbolo inválido")
simbH= input("\nInforme um simbolo válido que deseja utilizar para a partida: X ou O : ")
if simbH=="X" or simbH=="x":
simbH="X"
else:
simbH="O"
return simbH
#sorteio
def sorteioPrimeiraJogada (simbM, simbH, tabuleiro, nome):
now= datetime.now()
a=now.second
#essa var serve para ajudar a definir de quem será aproxima jogada
pro=0
if a%2==0:
print("\nVencedor do sorteio para inicio do jogo: Computador")
prop=1
# chama a função mostraTabuleiro com a jogada do computador
tabuleiro=jogadaComputador(tabuleiro, simbM)
mostraTabuleiro(tabuleiro)
else:
print("\nVencedor do sorteio para inicio do jogo: Jogador")
prop=2
#chama a função mostraTabuleiro com a jogada do jogador
#tabuleiro=jogadaHumana(nome, simbH, tabuleiro)
return prop
#Função para printar o tabuleiro:
def mostraTabuleiro(tabuleiro):
print(' ')
print (tabuleiro[0][0] +' | '+ tabuleiro[0][1] + ' | '+ tabuleiro[0][2])
print (tabuleiro[1][0] +' | '+ tabuleiro[1][1] + ' | '+ tabuleiro[1][2])
print (tabuleiro[2][0] +' | '+ tabuleiro[2][1] + ' | '+ tabuleiro[2][2])
#Função da jogada do humano
def jogadaHumana(nome, simbH, tabuleiro):
casa=[]
casa=input("\n Qual a sua jogada, %s ? " %nome)
#tabuleiro[casa//10][casa%10]=simbH
i=int(casa[0])
j=int(casa[2])
while i>2 and j>2 or i<0 and j<0 :
print('\nOps! Jogada invalida... ')
casa=int(input("\n Qual a sua jogada, %s ?" %nome))
i=int(casa[0])
j=int(casa[2])
validarJogada(nome, simbH, tabuleiro, i, j)
return tabuleiro
#Função para validar uma jogada
def validarJogada(nome, simbH, tabuleiro, i, j):
if tabuleiro[i][j]!="X" and tabuleiro[i][j]!="O" :
tabuleiro[i][j]=simbH
else:
print ("\nOPS!!! Essa jogada não está disponível. Tente novamente!")
jogadaHumana(nome, simbH, tabuleiro)
#Função da Jogada do computador
def jogadaComputador(tabuleiro, simbM):
sortL=randint(0, 2)
sortC=randint(0, 2)
while tabuleiro[sortL][sortC] !=" " :
sortL=randint(0, 2)
sortC=randint(0, 2)
tabuleiro[sortL][sortC]=simbM
return tabuleiro
#Função que verifica o vencedor
def VerificaVencedor(tab, simbH, nome, simbM):
x=1
if tab[0][0]==tab[0][2] and tab[0][0]==tab[0][1] and tab[0][1]==tab[0][2]:
if tab[0][0]==simbH:
x=2
elif tab[0][0]==simbM:
x=4
elif tab[1][0]==tab[1][1] and tab[1][1]==tab[1][2] and tab[1][0]==tab[1][2]:
if tab[1][0]==simbH:
x=2
elif tab[1][0]==simbM:
x=4
elif tab[2][0]==tab[2][1] and tab[2][1]==tab[2][2] and tab[2][0]==tab[2][2]:
if tab[2][0]==simbH:
x=2
elif tab[2][0]==simbM:
x=4
elif tab[0][0]==tab[1][0] and tab[2][0]==tab[0][0] and tab[2][0]==tab[1][0]:
if tab[1][0]==simbH:
x=2
elif tab[1][0]==simbM:
x=4
elif tab[0][1]==tab[1][1] and tab[1][1]==tab[2][1] and tab[0][1]==tab[2][1]:
if tab[1][1]==simbH:
x=2
elif tab[1][1]==simbM:
x=4
elif tab[0][2]==tab[1][2] and tab[1][2]==tab[2][2] and tab[0][2]==tab[2][2]:
if tab[2][2]==simbH:
x=2
elif tab[2][2]==simbM:
x=4
elif tab[0][0]==tab[1][1] and tab[1][1]==tab[2][2] and tab[0][0]==tab[2][2]:
if tab[0][0]==simbH:
x=2
elif tab[0][0]==simbM:
x=4
elif tab[0][2]==tab[1][1] and tab[1][1]==tab[2][0] and tab[2][0]==tab[0][2]:
if tab[2][0]==simbH:
x=2
elif tab[2][0]==simbM:
x=4
elif tab[0][0]!=" " and tab[0][1]!=" " and tab[0][2]!=" " and tab[1][0]!=" " and tab[1][1]!=" " and tab[1][2]!=" " and tab[2][0]!=" " and tab[2][1]!=" " and tab[2][2]!=" ":
print ('Deu velha')
x=6
return x
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
d9596a089de2fc79e1fc2156fb5fe244e800b3a3 | d06f9135fd0f42e59df4c91324cd22124a3486d7 | /Study_Groups/mapper.py | 8ac83c84dbee973eb3f0421b64d8367921a97844 | [] | no_license | jlyang1990/MapReduce | 7546599afaa9b8366e02a0724a6b77cc46149b30 | d01a4d7c4e12e0c77ff820fb2a20a9210c9ce3b0 | refs/heads/master | 2020-06-13T16:56:10.008429 | 2016-12-05T08:16:33 | 2016-12-05T08:16:36 | 75,523,296 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,485 | py | #!/usr/bin/python
"""
In this exercise your task is to write a mapreduce program that for each forum thread (that is a question node with all it's answers and comments)
would give us a list of students that have posted there - either asked the question, answered a question or added a comment.
If a student posted to that thread several times, they should be added to that list several times as well, to indicate intensity of communication.
"""
"""
column names of forum_node.tsv are:
id, title, tagnames, author_id, body, node_type, parent_id, abs_parent_id, added_at, score, state_string,
last_edited_id, last_activity_by_id, last_activity_at, active_revision_id, extra, extra_ref_id, extra_count, marked
The ones that are the most relevant to the task are:
"id": id of the node
"title": title of the node. in case "node_type" is "answer" or "comment", this field will be empty
"tagnames": space separated list of tags
"author_id": id of the author
"body": content of the post
"node_type": type of the node, either "question", "answer" or "comment"
"parent_id": node under which the post is located, will be empty for "questions"
"abs_parent_id": top node where the post is located
"added_at": date added
"""
import sys
import csv
reader = csv.reader(sys.stdin, delimiter = '\t')
for line in reader:
if len(line) == 19 and line[0] != "id":
if line[5] == "question":
print "{0}\t{1}".format(line[0], line[3])
else:
print "{0}\t{1}".format(line[6], line[3])
| [
"jlyang@ucdavis.edu"
] | jlyang@ucdavis.edu |
1ddfcadb90f807f864cbfd5f09862558cbbcd387 | 8839bd1f2e35726b6c8066985690fa2fa86b09a6 | /5.pyAI-K210/5.项目应用/2.视频播放/video_play.py | 8f0972098bff92c05d8d2969bc4f8bdbdf482f2a | [
"MIT"
] | permissive | elektrik-elektronik-muhendisligi/MicroPython-Examples-1 | a9532b06aba470f7f26f841929f4fb145549f70b | f7b08e95ff73e3417af21918c9c6bcf2f83281c6 | refs/heads/master | 2021-05-25T22:58:36.207098 | 2020-04-01T09:50:53 | 2020-04-01T09:50:53 | 253,956,073 | 1 | 0 | null | 2020-04-08T01:39:46 | 2020-04-08T01:39:45 | null | UTF-8 | Python | false | false | 807 | py | '''
实验名称:视频播放器
版本:v1.0
日期:2019.12
翻译和注释:01Studio
说明:AVI视频播放。
'''
import video,time
from Maix import GPIO
from board import board_info
from fpioa_manager import fm
import lcd
lcd.init()
# 音频使能IO
AUDIO_PA_EN_PIN = 32
#注册音频使能IO
if AUDIO_PA_EN_PIN:
fm.register(AUDIO_PA_EN_PIN, fm.fpioa.GPIO1, force=True)
#注册音频控制IO
fm.register(34, fm.fpioa.I2S0_OUT_D1, force=True)
fm.register(35, fm.fpioa.I2S0_SCLK, force=True)
fm.register(33, fm.fpioa.I2S0_WS, force=True)
#播放avi文件
v = video.open("/sd/badapple.avi")
#打印视频文件信息
print(v)
#音量调节
v.volume(5)
while True:
if v.play() == 0: #播放完毕
print("play end")
break
v.__del__() #销毁对象,释放内存
| [
"237827161@qq.com"
] | 237827161@qq.com |
cddcb6fcda2ccfcf7c8853049983101c62c2c2c7 | 739e91039c05943352a3fc07e768641f74097482 | /Python_self/Binary_Tree.py | 9b0dcbbdfde156b128cf02239d2a618ab114eadf | [] | no_license | doublejy715/Problem-Solve | 651182079ded1a9da3478dd30a4c4507894de85e | 57d5a672a48103769c8cc022cb7132d988624600 | refs/heads/master | 2023-07-13T09:51:23.145427 | 2021-08-31T04:35:04 | 2021-08-31T04:35:04 | 234,250,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,212 | py | '''
트리는 변수를 head를 가진다 root node이다.
기본적으로 이진트리는 링크드 리스트로 형성 할 수 있다.
기본적으로 노드 형성 클래스와 트리 클래스로 나누어서 생각한다.
1. class node
data, left, right
2. class tree
insert
search(종류 : DLR)
delete
'''
class Node():
def __init__(self,value):
self.value = value
self.left = None
self.right = None
class tree():
def __init__(self,head):
self.head = head
# 1. insert
def insert(self,data):
self.current_node = self.head # 시작할 위치를 지정해 놓는다.
while True: # 해당 조건이 만족할 동안만 작동한다.
# 값의 크기를 비교해서 넣어준다.
# 1.1 삽입하려는 값이 현재의 값보다 작은 경우.
if self.current_node.value > data:
if self.current_node.left != None:
self.current_node = self.current_node.left
else:
self.current_node.left = Node(data)
break
# 1.2 삽입하려는 값이 현재의 값보다 큰 경우
elif self.current_node.value < data:
if self.current_node.right != None:
self.current_node = self.current_node.right
else:
self.current_node.right = Node(data)
break
# 2. search
def search(self,data):
self.current_node = self.head
while self.current_node.data:
# 2.1 찾는 값이 일치하는 경우
if self.current_node == data:
return True
# 2.2 찾는 값이 현재 노드보다 더 큰 경우
elif self.current_node > data:
self.current_node = self.current_node.right
# 2.3 찾는 값이 현재 노드보다 더 작은 경우
else:
self.current_node = self.current_node.left
# 찾지 못하는 경우
return False
head = Node(2)
BST = tree(head)
BST.insert(1)
BST.insert(3)
BST.search(1) | [
"doublejy715@gmail.com"
] | doublejy715@gmail.com |
025ee506dce791e2417711b08ae51f16ccd15efc | 757aace69943122a21b1fac07ea43199e2ca1228 | /lib/python3.5/site-packages/launch/__init__.py | 39e23c9c10ac1f5300733afa7dbe86a03992527d | [] | no_license | iamsile/ros2-for-os | f9faa522b2f17f04e07de88a6053599fa9a987bc | a83f66a6a34ec9ec969b54194e3bdd32d5b9d092 | refs/heads/master | 2021-01-20T16:31:05.458482 | 2017-06-29T02:00:48 | 2017-06-29T02:00:48 | 95,727,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,953 | py | # Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from launch.output_handler import CompositeOutputHandler
from launch.output_handler import ConsoleOutput
from launch.exit_handler import default_exit_handler
class LaunchDescriptor(object):
def __init__(self):
self.task_descriptors = []
def add_coroutine(self, coroutine, name=None, exit_handler=None):
if name is not None and name in [p.name for p in self.task_descriptors]:
raise RuntimeError("Task name '%s' already used" % name)
if exit_handler is None:
exit_handler = default_exit_handler
coroutine_descriptor = CoroutineDescriptor(
coroutine, name, exit_handler)
self.task_descriptors.append(coroutine_descriptor)
return coroutine_descriptor
def add_process(self, cmd, name=None, env=None, output_handlers=None, exit_handler=None):
if name is not None and name in [p.name for p in self.task_descriptors]:
raise RuntimeError("Task name '%s' already used" % name)
if output_handlers is None:
output_handlers = [ConsoleOutput()]
output_handlers = CompositeOutputHandler(output_handlers)
if exit_handler is None:
exit_handler = default_exit_handler
process_descriptor = ProcessDescriptor(
cmd, name, output_handlers, exit_handler, env=env)
self.task_descriptors.append(process_descriptor)
return process_descriptor
class TaskDescriptor(object):
def __init__(self):
self.task_state = None
class CoroutineDescriptor(TaskDescriptor):
def __init__(self, coroutine, name, exit_handler):
super(CoroutineDescriptor, self).__init__()
self.coroutine = coroutine
self.name = name
self.exit_handler = exit_handler
class ProcessDescriptor(TaskDescriptor):
def __init__(self, cmd, name, output_handler, exit_handler, env=None):
super(ProcessDescriptor, self).__init__()
self.cmd = cmd
self.name = name
self.output_handler = output_handler
self.exit_handler = exit_handler
self.env = env
self.transport = None
self.protocol = None
def send_signal(self, signal):
if self.transport:
self.transport.send_signal(signal)
def terminate(self):
if self.transport:
self.transport.terminate()
| [
"longxian.tw@alibaba-inc.com"
] | longxian.tw@alibaba-inc.com |
19b1eac27cf2de891f0469d21918d872855415a4 | 5c0c0176db0ccf2c24b6b5ed459a8dc144518b13 | /examples/nas/naive-tf/train.py | f2b2062a8954ca01b8b6e9ef11b2dfe99ca3e815 | [
"MIT"
] | permissive | petuum/nni | ac4f4a1c4d6df71684eeffa127b7c4858fd29e97 | 8134be6269902939232482d63649c06f9864be6d | refs/heads/master | 2023-02-18T11:21:41.078889 | 2021-01-20T03:21:50 | 2021-01-20T03:21:50 | 302,736,456 | 4 | 3 | MIT | 2020-11-20T20:21:15 | 2020-10-09T19:34:11 | Python | UTF-8 | Python | false | false | 2,663 | py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import (AveragePooling2D, BatchNormalization, Conv2D, Dense, MaxPool2D)
from tensorflow.keras.losses import Reduction, SparseCategoricalCrossentropy
from tensorflow.keras.optimizers import SGD
from nni.nas.tensorflow.mutables import LayerChoice, InputChoice
from nni.algorithms.nas.tensorflow.enas import EnasTrainer
class Net(Model):
def __init__(self):
super().__init__()
self.conv1 = LayerChoice([
Conv2D(6, 3, padding='same', activation='relu'),
Conv2D(6, 5, padding='same', activation='relu'),
])
self.pool = MaxPool2D(2)
self.conv2 = LayerChoice([
Conv2D(16, 3, padding='same', activation='relu'),
Conv2D(16, 5, padding='same', activation='relu'),
])
self.conv3 = Conv2D(16, 1)
self.skipconnect = InputChoice(n_candidates=1)
self.bn = BatchNormalization()
self.gap = AveragePooling2D(2)
self.fc1 = Dense(120, activation='relu')
self.fc2 = Dense(84, activation='relu')
self.fc3 = Dense(10)
def call(self, x):
bs = x.shape[0]
t = self.conv1(x)
x = self.pool(t)
x0 = self.conv2(x)
x1 = self.conv3(x0)
x0 = self.skipconnect([x0])
if x0 is not None:
x1 += x0
x = self.pool(self.bn(x1))
x = self.gap(x)
x = tf.reshape(x, [bs, -1])
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
def accuracy(truth, logits):
truth = tf.reshape(truth, (-1, ))
predicted = tf.cast(tf.math.argmax(logits, axis=1), truth.dtype)
equal = tf.cast(predicted == truth, tf.int32)
return tf.math.reduce_sum(equal).numpy() / equal.shape[0]
def accuracy_metrics(truth, logits):
acc = accuracy(truth, logits)
return {'accuracy': acc}
if __name__ == '__main__':
cifar10 = tf.keras.datasets.cifar10
(x_train, y_train), (x_valid, y_valid) = cifar10.load_data()
x_train, x_valid = x_train / 255.0, x_valid / 255.0
train_set = (x_train, y_train)
valid_set = (x_valid, y_valid)
net = Net()
trainer = EnasTrainer(
net,
loss=SparseCategoricalCrossentropy(from_logits=True, reduction=Reduction.NONE),
metrics=accuracy_metrics,
reward_function=accuracy,
optimizer=SGD(learning_rate=0.001, momentum=0.9),
batch_size=64,
num_epochs=2,
dataset_train=train_set,
dataset_valid=valid_set
)
trainer.train()
| [
"noreply@github.com"
] | petuum.noreply@github.com |
aeec0d323c820ebcbab293cfab36ab3d59368f88 | 3537265c1b60f0c0eb06d165a0b5779438fc698c | /py/vision/test1/visionlib.py | d1c1136cad4cce4ea52424fc24b104b74f284547 | [
"MIT"
] | permissive | iqihao/mabo.io | f864ba3e158d8c6ee113dd8c0ae8708cae86e9d1 | 7f646db9d5ee3cd0b137866bf8eaf295890f134c | refs/heads/master | 2021-05-30T12:59:56.723936 | 2016-02-10T23:45:36 | 2016-02-10T23:45:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,165 | py |
import itertools
import cv2
import numpy as np
def findKeyPoints(img, template, distance=200):
""" find key points in image """
# SIFT
FEATURE_DETECTOR = "SIFT" #"SURF" # "SIFT"
detector = cv2.FeatureDetector_create(FEATURE_DETECTOR)
descriptor = cv2.DescriptorExtractor_create(FEATURE_DETECTOR)
skp = detector.detect(img)
skp, sd = descriptor.compute(img, skp)
tkp = detector.detect(template)
tkp, td = descriptor.compute(template, tkp)
flann_params = dict(algorithm=1, trees=4)
flann = cv2.flann_Index(sd, flann_params)
idx, dist = flann.knnSearch(td, 1, params={})
del flann
dist = dist[:,0]/2500.0
dist = dist.reshape(-1,).tolist()
idx = idx.reshape(-1).tolist()
indices = range(len(dist))
indices.sort(key=lambda i: dist[i])
dist = [dist[i] for i in indices]
idx = [idx[i] for i in indices]
skp_final = []
for i, dis in itertools.izip(idx, dist):
if dis < distance:
skp_final.append(skp[i])
flann = cv2.flann_Index(td, flann_params)
idx, dist = flann.knnSearch(sd, 1, params={})
del flann
dist = dist[:,0]/2500.0
dist = dist.reshape(-1,).tolist()
idx = idx.reshape(-1).tolist()
indices = range(len(dist))
indices.sort(key=lambda i: dist[i])
dist = [dist[i] for i in indices]
idx = [idx[i] for i in indices]
tkp_final = []
for i, dis in itertools.izip(idx, dist):
if dis < distance:
tkp_final.append(tkp[i])
return skp_final, tkp_final
def drawKeyPoints(img, template, skp, tkp, num=-1):
""" """
h1, w1 = img.shape[:2]
h2, w2 = template.shape[:2]
nWidth = w1+w2
nHeight = max(h1, h2)
hdif = (h1-h2)/2
newimg = np.zeros((nHeight, nWidth, 3), np.uint8)
newimg[hdif:hdif+h2, :w2] = template
newimg[:h1, w2:w1+w2] = img
maxlen = min(len(skp), len(tkp))
if num < 0 or num > maxlen:
num = maxlen
for i in range(num):
pt_a = (int(tkp[i].pt[0]), int(tkp[i].pt[1]+hdif))
pt_b = (int(skp[i].pt[0]+w2), int(skp[i].pt[1]))
cv2.line(newimg, pt_a, pt_b, (255, 0, 0))
return newimg | [
"aidear@163.com"
] | aidear@163.com |
f1583880f8ed6f14fca3054e147fec9973b07d2a | 096dc9b83f9f8764cbd92f628f215d34b4bab4d5 | /src/11/simple_communication_between_interpreters/echoclient.py | 96fc37158b72a9ab6493fd50f1d1c6cb059b4ef9 | [] | no_license | pyarnold/python-cookbook | 18ceecec5a728d88ea9065bb1b2323e59df686a1 | ef8a32741d4571b75d995637a8b2edc3b85a2859 | refs/heads/master | 2021-01-21T09:29:34.998385 | 2014-03-19T14:57:17 | 2014-03-19T14:57:17 | 17,908,202 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | from multiprocessing.connection import Client
c = Client(('localhost', 25000), authkey=b'peekaboo')
c.send('hello')
print('Got:', c.recv())
c.send(42)
print('Got:', c.recv())
c.send([1, 2, 3, 4, 5])
print('Got:', c.recv())
| [
"dave@dabeaz.com"
] | dave@dabeaz.com |
fc92399aa42aec6f5c79201431a31e094a729032 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03377/s172991411.py | d2919c59254b5b5c249175befd8776cdf3a52de3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | def main():
a,b,x = map(int, input().split())
if a<=x and x<=a+b:
print('YES')
else:
print('NO')
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
2bf27f22ff1a3a923f49193f02937201c8afebdf | 3f2d1c68d07dd6677bc19c559b1960ca5fef6346 | /tensorbord/train.py | 6a734e1fb8529e6c98a51c56d939fe1312206302 | [] | no_license | 213584adghj/ml | 6ffcf732377dabda129990e3a89468e18dd2700c | f73080e13c4a1c6babe0229bdb939eb3a7f988b6 | refs/heads/master | 2021-03-13T23:22:41.981534 | 2020-03-12T01:59:21 | 2020-03-12T01:59:21 | 246,720,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,684 | py | # coding: utf-8
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
class train(object):
def __init__(self):
with tf.name_scope('input'):
# 定义两个placeholder
self.x = tf.placeholder(tf.float32, [None, 784], name='x-input')
self.y = tf.placeholder(tf.float32, [None, 10], name='y-input')
self.prediction, self.loss, self.train_step, self.accuracy = self.get_model()
self.train_model()
# 命名空间
pass
def variable_summaries(self, var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean) # 平均值
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev) # 标准值
tf.summary.scalar('max', tf.reduce_max(var)) # 最大值
tf.summary.scalar('min', tf.reduce_min(var)) # 最小值
tf.summary.histogram('histogram', var) # 直方图
def get_model(self):
with tf.name_scope('layer'):
# 创建一个简单的神经网络(无隐藏层)
with tf.name_scope('wights'):
W = tf.Variable(tf.zeros([784, 10]), name='W')
self.variable_summaries(W)
with tf.name_scope('biases'):
b = tf.Variable(tf.zeros([10]), name='b')
self.variable_summaries(b)
with tf.name_scope('wx_plus_b'):
wx_plus_b = tf.matmul(self.x, W) + b
with tf.name_scope('softmax'):
prediction = tf.nn.softmax(wx_plus_b)
# 二次代价函数
# loss = tf.reduce_mean(tf.square(y-prediction))
# 交叉熵
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=prediction))
tf.summary.scalar('loss', loss)
with tf.name_scope('train'):
# 使用梯度下降法
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
# 将结果放在一个bool型列表中
correct_prediction = tf.equal(tf.argmax(self.y, 1), tf.argmax(prediction, 1))
with tf.name_scope('accuracy'):
# 求准确率
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
return prediction, loss, train_step, accuracy # 预测,损失函数,训练过程,准确率
def train_model(self):
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
batch_size = 100
# 计算一共有多少个批次
n_batch = mnist.train.num_examples // batch_size
merged = tf.summary.merge_all()
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
writer = tf.summary.FileWriter('logs', sess.graph)
for epoch in range(51):
for batch in range(n_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
summary, _ = sess.run([merged, self.train_step], feed_dict={self.x: batch_xs, self.y: batch_ys})
writer.add_summary(summary, epoch)
acc = sess.run(self.accuracy, feed_dict={self.x: mnist.test.images, self.y: mnist.test.labels})
print("Iter" + str(epoch) + ",Testing Accuracy" + str(acc))
pass
a = train()
| [
"you@example.com"
] | you@example.com |
51a886388078a4d3e2a0dbdcf134e56b1993c5af | 26cd3fc39f99b74c2741dcbac0d2fed0098d2deb | /thorpy/elements/slidersetter.py | 22b4d5a12c423ed038b9097b61e2fc5154df2817 | [
"MIT"
] | permissive | YannThorimbert/Thorpy-1.4 | b35c4293ba389e13fb7a2691543f9de7030d3f20 | 8dfc5f2fd047ae39c2c8aac1e23326be6152663b | refs/heads/master | 2021-04-09T17:12:34.356309 | 2015-05-11T20:53:18 | 2015-05-11T20:53:18 | 35,449,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,006 | py | from thorpy.elements.slider import SliderX
from thorpy.elements.element import Element
from thorpy.miscgui import functions, style, painterstyle
class SliderXSetter(SliderX):
"""Like a slider, but has a get_value method"""
def __init__(self,
length,
limvals=None,
text="",
elements=None,
normal_params=None,
namestyle=None,
valuestyle=None,
typ=float,
initial_value=None):
namestyle = style.STYLE_SLIDER_NAME if namestyle is None else namestyle
valuestyle=style.STYLE_SLIDER_VALUE if valuestyle is None else valuestyle
SliderX.__init__(self, length, limvals, "", elements, normal_params,
initial_value)
self._value_type = typ
self._round_decimals = 2
self._name_element = self._get_name_element(text, namestyle) # herite de setter
self._value_element = self._get_value_element(valuestyle)
self.add_elements([self._name_element, self._value_element])
def finish(self):
SliderX.finish(self)
self._refresh_pos()
self._drag_element.set_setter()
value = str(self.get_value())
self._value_element.set_text(value)
self.set_prison()
def show_value(self, show_value):
self._value_element.visible = show_value
def _get_name_element(self, name, namestyle):
painter = functions.obtain_valid_painter(
painterstyle.CHECKER_NAME_PAINTER,
size=style.SIZE)
el = Element(name)
el.set_painter(painter)
if namestyle:
el.set_style(namestyle)
el.finish()
return el
def _get_value_element(self, valuestyle):
painter = functions.obtain_valid_painter(
painterstyle.CHECKER_VALUE_PAINTER,
size=style.CHECK_SIZE)
el = Element(str(self.get_value()))
el.set_painter(painter)
if valuestyle:
el.set_style(valuestyle)
el.finish()
return el
def _refresh_pos(self):
l = self.get_fus_topleft()[0]
(x, y) = self.get_fus_center()
l -= self._name_element.get_fus_size()[0] + style.MARGINS[0]
self._name_element.set_center((None, y))
self._name_element.set_topleft((l, None))
w = self.get_fus_rect().right + style.MARGINS[0]
self._value_element.set_center((None, y))
self._value_element.set_topleft((w, None))
def refresh_value(self):
self._value_element.unblit()
self._value_element.update()
value = str(self.get_value())
self._value_element.set_text(value)
self._value_element.blit()
self._value_element.update()
def get_value(self):
value = SliderX.get_value(self)
return self._value_type(value)
def set_font_color(self, color, state=None, center_title=True):
"""set font color for a given state"""
SliderX.set_font_color(self, color, state, center_title)
self._name_element.set_font_color(color, state, center_title)
def set_font_size(self, size, state=None, center_title=True):
"""set font size for a given state"""
SliderX.set_font_size(self, size, state, center_title)
self._name_element.set_font_size(size, state, center_title)
def set_font_effects(self, biu, state=None, center=True, preserve=False):
"""biu = tuple : (bold, italic, underline)"""
SliderX.set_font_effects(self, bio, state, center, preserve)
self._name_element.set_font_effects(biu, state, center, preserve)
def pix_to_val(self, pix, x0):
value = SliderX.pix_to_val(self, pix, x0)
if self._value_type is float:
return round(value, self._round_decimals)
elif self._value_type is int:
return int(round(value))
def get_help_rect(self):
return self._name_element.get_help_rect()
| [
"yann.thorimbert@gmail.com"
] | yann.thorimbert@gmail.com |
170ab9e04c87704324e5667f1c73ce9e974a7587 | 4015291afebfd346da3fee4b1d5a775882b5b461 | /services/director-v2/tests/unit/test_models_dynamic_services.py | 03c9084f24b2632efd11873930d77a10d59eb40f | [
"MIT"
] | permissive | pcrespov/osparc-simcore | 3a8a6b5252038542f515c7e90d983ac6f1fb4de7 | eb5e00bc2cf4acfe81f5dc422a5e50a4646c9596 | refs/heads/master | 2023-08-06T04:33:38.594066 | 2023-07-12T09:47:00 | 2023-07-12T09:47:00 | 130,357,545 | 0 | 1 | MIT | 2023-04-18T08:04:27 | 2018-04-20T12:10:41 | Python | UTF-8 | Python | false | false | 6,964 | py | # pylint: disable=redefined-outer-name
import string
from collections import namedtuple
import pytest
from simcore_service_director_v2.models.schemas.dynamic_services import (
RunningDynamicServiceDetails,
SchedulerData,
ServiceBootType,
ServiceState,
)
from simcore_service_director_v2.models.schemas.dynamic_services.scheduler import (
DockerContainerInspect,
)
from simcore_service_director_v2.modules.dynamic_sidecar.docker_states import (
CONTAINER_STATUSES_UNEXPECTED,
extract_containers_minimum_statuses,
)
# the following is the predefined expected ordering, change below test only if
# this order is not adequate anymore
_EXPECTED_ORDER = [
ServiceState.FAILED,
ServiceState.PENDING,
ServiceState.PULLING,
ServiceState.STARTING,
ServiceState.RUNNING,
ServiceState.STOPPING,
ServiceState.COMPLETE,
]
CNT_STS_RESTARTING = "restarting"
CNT_STS_DEAD = "dead"
CNT_STS_PAUSED = "paused"
CNT_STS_CREATED = "created"
CNT_STS_RUNNING = "running"
CNT_STS_REMOVING = "removing"
CNT_STS_EXITED = "exited"
ALL_CONTAINER_STATUSES: set[str] = {
CNT_STS_RESTARTING,
CNT_STS_DEAD,
CNT_STS_PAUSED,
CNT_STS_CREATED,
CNT_STS_RUNNING,
CNT_STS_REMOVING,
CNT_STS_EXITED,
}
RANDOM_STRING_DATASET = string.ascii_letters + string.digits
ExpectedStatus = namedtuple("ExpectedStatus", "containers_statuses, expected_state")
@pytest.fixture
def service_message() -> str:
return "starting..."
@pytest.fixture
def service_state() -> ServiceState:
return ServiceState.RUNNING
@pytest.fixture
def mock_containers_statuses() -> dict[str, dict[str, str]]:
return {
"container_id_1": {"Status": "created"},
"container_id_2": {"Status": "dead", "Error": "something"},
"container_id_3": {"Status": "running"},
}
# UTILS
def _make_status_dict(status: str) -> DockerContainerInspect:
assert status in ALL_CONTAINER_STATUSES
status_dict = {"Status": status}
if status in CONTAINER_STATUSES_UNEXPECTED:
status_dict["Error"] = "failed state here"
return DockerContainerInspect.from_container(
{"State": status_dict, "Name": "", "Id": ""}
)
def get_containers_inspect(*args: str) -> list[DockerContainerInspect]:
return [_make_status_dict(x) for x in args]
def _all_states() -> set[ServiceState]:
return set(ServiceState)
SAMPLE_EXPECTED_STATUSES: list[ExpectedStatus] = [
ExpectedStatus(
containers_statuses=get_containers_inspect(
CNT_STS_RESTARTING, CNT_STS_EXITED, CNT_STS_RUNNING
),
expected_state=ServiceState.FAILED,
),
ExpectedStatus(
containers_statuses=get_containers_inspect(CNT_STS_CREATED, CNT_STS_RUNNING),
expected_state=ServiceState.STARTING,
),
ExpectedStatus(
containers_statuses=get_containers_inspect(CNT_STS_CREATED),
expected_state=ServiceState.STARTING,
),
ExpectedStatus(
containers_statuses=get_containers_inspect(CNT_STS_RUNNING),
expected_state=ServiceState.RUNNING,
),
ExpectedStatus(
containers_statuses=get_containers_inspect(CNT_STS_REMOVING, CNT_STS_EXITED),
expected_state=ServiceState.FAILED,
),
]
def test_running_service_details_make_status(
scheduler_data: SchedulerData, service_message: str, service_state: ServiceState
):
running_service_details = RunningDynamicServiceDetails.from_scheduler_data(
node_uuid=scheduler_data.node_uuid,
scheduler_data=scheduler_data,
service_state=service_state,
service_message=service_message,
)
print(running_service_details)
assert running_service_details
running_service_details_dict = running_service_details.dict(
exclude_unset=True, by_alias=True
)
expected_running_service_details = {
"boot_type": ServiceBootType.V2,
"project_id": scheduler_data.project_id,
"service_state": service_state,
"service_message": service_message,
"service_uuid": scheduler_data.node_uuid,
"service_key": scheduler_data.key,
"service_version": scheduler_data.version,
"service_host": scheduler_data.service_name,
"user_id": scheduler_data.user_id,
"service_port": scheduler_data.service_port,
}
assert running_service_details_dict == expected_running_service_details
def test_all_states_are_mapped():
service_state_defined: set[ServiceState] = _all_states()
comparison_mapped: set[ServiceState] = set(ServiceState.comparison_order().keys())
assert (
service_state_defined == comparison_mapped
), "entries from _COMPARISON_ORDER do not match all states in ServiceState"
def test_equality():
for service_state in _all_states():
assert service_state == ServiceState(service_state.value)
def test_expected_order():
for k, service_state in enumerate(_EXPECTED_ORDER):
prior_states = _EXPECTED_ORDER[:k]
for prior_service_state in prior_states:
assert prior_service_state < service_state
assert prior_service_state != service_state
assert service_state > prior_service_state
def test_min_service_state_is_lowerst_in_expected_order():
for i in range(len(_EXPECTED_ORDER)):
items_after_index = _EXPECTED_ORDER[i:]
assert min(items_after_index) == items_after_index[0]
@pytest.mark.parametrize(
"containers_statuses, expected_state",
[(x.containers_statuses, x.expected_state) for x in SAMPLE_EXPECTED_STATUSES],
ids=[x.expected_state.name for x in SAMPLE_EXPECTED_STATUSES],
)
def test_extract_containers_minimim_statuses(
containers_statuses: list[DockerContainerInspect], expected_state: ServiceState
):
service_state, _ = extract_containers_minimum_statuses(containers_statuses)
assert service_state == expected_state
def test_not_implemented_comparison() -> None:
with pytest.raises(TypeError):
# pylint: disable=pointless-statement
ServiceState.FAILED > {} # type: ignore
def test_regression_legacy_service_compatibility() -> None:
api_response = {
"published_port": None,
"entry_point": "",
"service_uuid": "e5aa2f7a-eac4-4522-bd4f-270b5d8d9fff",
"service_key": "simcore/services/dynamic/mocked",
"service_version": "1.6.10",
"service_host": "mocked_e5aa2f7a-eac4-4522-bd4f-270b5d8d9fff",
"service_port": 8888,
"service_basepath": "/x/e5aa2f7a-eac4-4522-bd4f-270b5d8d9fff",
"service_state": "running",
"service_message": "",
"user_id": "1",
"project_id": "b1ec5c8e-f5bb-11eb-b1d5-02420a000006",
}
service_details = RunningDynamicServiceDetails.parse_obj(api_response)
assert service_details
service_url = f"http://{service_details.host}:{service_details.internal_port}{service_details.basepath}"
assert service_url == service_details.legacy_service_url
| [
"noreply@github.com"
] | pcrespov.noreply@github.com |
a8669f9772872cd6be435f83a6f58da83cd2299d | 9433ce01c6e2906c694b6f0956a4640e1872d4d2 | /src/main/python/wdbd/codepool/sqlalchemy/user_dept.py | 7e8675698dd3166725d47b7f79533e7a1752a3b7 | [] | no_license | shwdbd/python_codepool | fcd7950fc1339994186461ae18c34cee238938ee | 92a4fb61d060f9a545499b6b7f99a4dc211d5009 | refs/heads/master | 2023-02-20T19:49:23.677824 | 2022-06-15T08:53:51 | 2022-06-15T08:53:51 | 209,431,254 | 0 | 1 | null | 2023-02-15T21:58:53 | 2019-09-19T00:56:03 | Python | UTF-8 | Python | false | false | 5,924 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : user_dept.py
@Time : 2020/02/09 14:03:54
@Author : Jeffrey Wang
@Version : 1.0
@Contact : shwangjj@163.com
@Desc : 用 员工、部门 为例子,实现ORM基本操作
'''
from sqlalchemy import Column, String, Integer
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import wdbd.codepool.sqlalchemy.conn as conn
from sqlalchemy.orm import aliased
from sqlalchemy import text
from sqlalchemy import func
Session = sessionmaker(bind=conn.get_conn_engine())
# 创建对象的基类:
Base = declarative_base()
# 定义User对象:
class Employee(Base):
# 表的名字:
__tablename__ = 'employee'
# 表的结构:
# id = Column('id', String(20), primary_key=True)
name = Column('name', String(20), primary_key=True)
age = Column('age', Integer())
def __repr__(self):
return "<Employee(name='%s', age='%i')>" % (
self.name, self.age)
class EmployeeManager:
def __init__(self):
# session = Session()
# session.execute('delete from employee')
# session.commit()
pass
def create_db_structor(self):
# 建立表结构:
engine = conn.get_conn_engine()
Base.metadata.create_all(engine)
print('数据库表结构 新建完毕!')
def add(self, new_emplpoyee):
# 新添加记录
# new_emplpoyee 是 Employee or list
# pk重复的情况,会抛出异常
try:
session = Session()
if type(new_emplpoyee) is Employee:
session.add(new_emplpoyee)
elif type(new_emplpoyee) is list:
session.add_all(new_emplpoyee)
else:
print('新增员工,参数类型错误!')
raise Exception('新增员工,参数类型错误!')
session.commit()
except Exception as err:
print('Exp:' + str(err))
session.rollback()
finally:
pass
def query(self):
session = Session()
# # 所有:
# for instance in session.query(Employee).order_by(Employee.age):
# print(instance)
# # 所有,按字段
# for name, age in session.query(Employee.name, Employee.age):
# print("{0} , {1}".format(name, age))
# # 查询所有,对象和字段混用, KeyedTuple
# for row in session.query(Employee, Employee.age).all():
# print("{0} , {1}".format(row.Employee, row.age))
# # <Employee(name='U1', age='1')> , 1
# 列可以取别名
for row in session.query(Employee, Employee.age.label('suishu')).all():
print("{0} , {1}".format(row.Employee, row.suishu))
# <Employee(name='U1', age='1')> , 1
def query_all_by_aliased(self):
# 整个表对象都用别名
session = Session()
user_alias = aliased(Employee, name='user_alias')
for row in session.query(user_alias, user_alias.name).all():
print(row.user_alias)
def query_limit(self):
"""查询,使用LIMIT和OFFSET
类似于SQL中: TOP 10, LIMIT 10
"""
session = Session()
# 仅查前3个(含第三个),age逆序排序
for row in session.query(Employee).order_by(-Employee.age)[:3]:
print(row)
def query_by_filter(self):
# 条件查询
session = Session()
# 单一条件:
print('单一条件 = 年龄小于等于5:')
print('使用filter_by:')
for row in session.query(Employee).filter_by(age=5):
print(row)
print('使用filter:')
for row in session.query(Employee).filter(Employee.age <= 5)[:3]:
print(row)
def query_by_filter_text(self):
"""使用SQL语句进行过滤查询
"""
session = Session()
# 直接的SQL语句
# for row in session.query(Employee).filter(text(' AGE<3 and name like "U%" ')).all():
# print(row)
# 含参数的SQL:使用:字段的形式
# sql = 'AGE<:age and name like ":name_pre%"'
sql = 'AGE<:age and name=:name_pre'
for row in session.query(Employee).filter(text(sql)).params(age=5, name_pre='U1').all():
print(row)
def query_count(self):
"""查询,使用COUNT
"""
session = Session()
count = session.query(Employee).filter(Employee.name.like('U%')).count()
print(count)
def query_group_count(self):
"""查询,GROUP和COUNT配合
"""
session = Session()
result = session.query(func.count(Employee.age), Employee.age).group_by(Employee.age).all()
print(result)
# [(1, 1), (1, 2), (1, 3), (1, 4), (2, 5), (1, 6), (1, 7), (1, 8), (1, 9)]
# SELECT count(employee.age) AS count_1, employee.age AS employee_age FROM employee GROUP BY employee.age
def query_count_star(self):
# SELECT count(*) FROM table
session = Session()
result = session.query(func.count('*')).select_from(Employee).scalar()
print(result)
if __name__ == "__main__":
# dirty数据无Demo
mgr = EmployeeManager()
mgr.create_db_structor()
# print(Employee.__table__)
# e1 = Employee(name='JACK', age=33)
# e2 = Employee(name='Mike', age=55)
# lst_employee = [e1, e2]
# # mgr.add(lst_employee)
# mgr.add(e1)
# lst_em = []
# for i in range(1, 10):
# lst_em.append(Employee(name='U'+str(i), age=i))
# mgr.add(lst_em)
# query:
# mgr.query()
# mgr.query_all_by_aliased()
# mgr.query_limit()
# mgr.query_by_filter()
# mgr.query_by_filter_text()
# mgr.query_count()
# mgr.query_group_count()
# mgr.query_count_star()
| [
"shwangjj@163.com"
] | shwangjj@163.com |
4b828290eeedaeeb5c56247fb70f45bd34cdc8cb | d8ac6dc1fafbca669ac14d3cd9549eba2d503bc4 | /plot.py | 801ebc284e0ceccacbde992cf0ad3f8fabbf5192 | [] | no_license | mattvenn/visa-tek | 34951f05a37fdcc1ad8c0a2dc153b90c5fedd958 | eac2ca584a33e0ab6f739462d30f1a41faa542f9 | refs/heads/master | 2020-06-10T21:12:57.832578 | 2020-02-27T14:20:54 | 2020-02-27T14:20:54 | 193,749,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | #!/usr/bin/python3
import matplotlib.pyplot as plt
import csv
def load(filename):
freq = []
amp = []
count = 0
with open(filename) as fh:
reader = csv.reader(fh)
for row in reader:
freq.append(float(row[0]))
amp.append(float(row[1]))
return freq, amp
def plot(freq, amp):
fig, ax = plt.subplots()
# freq plot
ax.set(xlabel='Freq (kHz)', ylabel='Amp (V)', title='frequency response')
ax.set(xlim=(0, 2000000), ylim=(0, 6))
ax.grid(True)
ax.plot(freq, amp)
plt.show()
if __name__ == '__main__':
freq, amp = load("results.csv")
plot(freq, amp)
| [
"matt@mattvenn.net"
] | matt@mattvenn.net |
c31b9da1d8505959099df9656b61afd7122f1140 | 33f81c4f22df664de162339f685f1e25a38a50fc | /apps/analytics/mixins.py | 5db5bc6ed82bacab74d3c44afbae9e94418f3431 | [] | no_license | SimonielMusyoki/PythonEcommerce | 1c96f43ff191e3b5aff488c7d53501dd64c2eaf1 | 94583f4162c899373987acba56f2e34d6e91be3b | refs/heads/master | 2023-04-27T05:01:11.142119 | 2023-04-20T08:32:01 | 2023-04-20T08:32:01 | 199,605,797 | 1 | 0 | null | 2023-04-18T14:47:23 | 2019-07-30T08:08:53 | Python | UTF-8 | Python | false | false | 449 | py | from .signals import object_viewed_signal
class ObjectViewedMixin(object):
def get_context_data(self, *args, **kwargs):
context = super(ObjectViewedMixin, self).get_context_data(*args, **kwargs)
request = self.request
instance = context.get("object")
if instance:
object_viewed_signal.send(
instance.__class__, instance=instance, request=request
)
return context
| [
"musyoki.mtk3@gmail.com"
] | musyoki.mtk3@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.