blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
015010dbfd468818010fda6bbce5e3d6a03ea127
|
2b0f94a0cd8c77ceb210d2c3ea5ffdd195effffe
|
/silver/models/discounts.py
|
607e8f277c43fcb91a2d19588aecebdefb38cd46
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
DocTocToc/silver
|
ba2bbfb2d5b1b4377c95bb67fc88414b72600cf0
|
a511a93f2e6608b637a79b92c6d73d41d506a0db
|
refs/heads/master
| 2023-02-04T09:47:37.132929
| 2023-02-03T08:22:59
| 2023-02-03T08:22:59
| 168,461,098
| 0
| 0
|
Apache-2.0
| 2019-01-31T04:10:10
| 2019-01-31T04:10:08
| null |
UTF-8
|
Python
| false
| false
| 13,281
|
py
|
# Copyright (c) 2022 Pressinfra SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from decimal import Decimal
from fractions import Fraction
from typing import List, Iterable, Tuple
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Q, F
from django.template.loader import render_to_string
from .subscriptions import Subscription
from .documents.entries import OriginType
from .fields import field_template_path
from silver.utils.dates import end_of_interval
from silver.utils.models import AutoCleanModelMixin
class DocumentEntryBehavior(models.TextChoices):
DEFAULT = "default", "Default"
# FORCE_PER_ENTRY = "force_per_entry", "Force per entry"
# FORCE_PER_ENTRY_TYPE = "force_per_entry", "Force per entry type"
FORCE_PER_DOCUMENT = "force_per_document", "Force per document"
class DiscountStackingType(models.TextChoices):
# SUCCESSIVE = "successive", "Successive"
ADDITIVE = "additive", "Additive"
NONCUMULATIVE = "noncumulative", "Noncumulative"
class DiscountState(models.TextChoices):
ACTIVE = "active", "Active"
INACTIVE = "inactive", "Inactive"
class DiscountTarget(models.TextChoices):
ALL = "all"
PLAN_AMOUNT = "plan_amount"
METERED_FEATURES = "metered_features"
class DurationIntervals(models.TextChoices):
BILLING_CYCLE = 'billing_cycle'
DAY = 'day'
WEEK = 'week'
MONTH = 'month'
YEAR = 'year'
class Discount(AutoCleanModelMixin, models.Model):
STATES = DiscountState
STACKING_TYPES = DiscountStackingType
ENTRY_BEHAVIOR = DocumentEntryBehavior
TARGET = DiscountTarget
DURATION_INTERVALS = DurationIntervals
name = models.CharField(
max_length=200,
help_text='The discount\'s name. May be used for identification or displaying in an invoice.',
)
product_code = models.ForeignKey('ProductCode', null=True, blank=True,
related_name='discounts', on_delete=models.PROTECT,
help_text="The discount's product code.")
customers = models.ManyToManyField("silver.Customer", related_name='discounts', blank=True)
subscriptions = models.ManyToManyField("silver.Subscription", related_name='discounts', blank=True)
plans = models.ManyToManyField("silver.Plan", related_name='discounts', blank=True)
percentage = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True,
help_text="A percentage to be discounted. For example 25 (%)")
applies_to = models.CharField(choices=TARGET.choices, max_length=24,
help_text="Defines what the discount applies to.",
default=TARGET.ALL)
document_entry_behavior = models.CharField(choices=ENTRY_BEHAVIOR.choices,
max_length=32, default=ENTRY_BEHAVIOR.DEFAULT,
help_text="Defines how the discount will be shown in the billing "
"documents.")
discount_stacking_type = models.CharField(choices=STACKING_TYPES.choices,
max_length=24, default=STACKING_TYPES.ADDITIVE,
help_text="Defines how the discount will interact with other discounts.")
state = models.CharField(choices=STATES.choices, max_length=16, default=STATES.ACTIVE,
help_text="Can be used to easily toggle discounts on or off.")
start_date = models.DateField(null=True, blank=True,
help_text="When set, the discount will only apply to entries with a lower "
"or equal start_date. Otherwise, a prorated discount may still apply, but"
"only if the entries end_date is greater than the discount's start_date.")
end_date = models.DateField(null=True, blank=True,
help_text="When set, the discount will only apply to entries with a greater "
"or equal end_date. Otherwise, a prorated discount may still apply, but"
"only if the entries start_date is lower than the discount's end_date.")
duration_count = models.IntegerField(null=True, blank=True,
help_text="Indicate the duration for which the discount is available, after "
"a subscription started. If not set, the duration is indefinite.")
duration_interval = models.CharField(null=True, blank=True, max_length=16, choices=DURATION_INTERVALS.choices)
def clean(self):
if (
self.percentage and
not Decimal(0) <= self.percentage <= Decimal(100)
):
raise ValidationError({"percentage": "Must be between 0 and 100."})
if (
self.percentage and
not Decimal(0) <= self.percentage <= Decimal(100)
):
raise ValidationError({"percentage": "Must be between 0 and 100."})
# if (
# self.document_entry_behavior == DocumentEntryBehavior.FORCE_PER_ENTRY and
# self.discount_stacking_type == DiscountStackingType.SUCCESSIVE
# ):
# raise ValidationError(
# {NON_FIELD_ERRORS: "Per entry Discounts cannot stack successively."}
# )
def __str__(self) -> str:
return self.name
@property
def amount_description(self) -> str:
discount = []
if self.applies_to in [self.TARGET.ALL, self.TARGET.PLAN_AMOUNT]:
discount.append(f"{self.percentage}% off Plan")
if self.applies_to in [self.TARGET.ALL, self.TARGET.METERED_FEATURES]:
discount.append(f"{self.percentage}% off Metered Features")
return ", ".join(discount)
def matching_subscriptions(self):
subscriptions = self.subscriptions.all()
if not subscriptions:
subscriptions = Subscription.objects.all()
customers = self.customers.all()
plans = self.plans.all()
if customers:
subscriptions = subscriptions.filter(customer__in=customers)
if plans:
subscriptions = subscriptions.filter(plan__in=plans)
return subscriptions
@classmethod
def for_subscription(cls, subscription: "silver.models.Subscription"):
return Discount.objects.filter(
Q(customers=subscription.customer) | Q(customers=None),
Q(subscriptions=subscription) | Q(subscriptions=None),
Q(plans=subscription.plan) | Q(plans=None),
).annotate(matched_subscriptions=F("subscriptions"))
# @classmethod
# def for_subscription_per_entry(cls, subscription: "silver.models.Subscription"):
# return cls.for_subscription(subscription).filter(
# document_entry_behavior=DocumentEntryBehavior.FORCE_PER_ENTRY
# )
# @classmethod
# def for_subscription_per_entry_type(cls, subscription: "silver.models.Subscription"):
# return cls.for_subscription(subscription).filter(
# (
# Q(document_entry_behavior=DocumentEntryBehavior.DEFAULT) &
# ~Q(plan_amount_discount=F("percentage"))
# ) |
# Q(
# document_entry_behavior=DocumentEntryBehavior.FORCE_PER_ENTRY_TYPE
# )
# )
@classmethod
def for_subscription_per_document(cls, subscription: "silver.models.Subscription"):
return cls.for_subscription(subscription).filter(
(
Q(document_entry_behavior=DocumentEntryBehavior.DEFAULT) &
Q(plan_amount_discount=F("percentage"))
) |
Q(
document_entry_behavior=DocumentEntryBehavior.FORCE_PER_DOCUMENT
)
)
@property
def as_additive(self) -> Decimal:
return (self.percentage or Decimal(0)) / Decimal(100)
@property
def as_multiplier(self) -> Decimal:
return (Decimal(100) - self.percentage or 0) / Decimal(100)
@classmethod
def filter_discounts_affecting_plan(cls, discounts: Iterable["Discount"]) -> List["Discount"]:
return [discount for discount in discounts
if (
discount.percentage > 0 and
discount.applies_to in [DiscountTarget.ALL, DiscountTarget.PLAN_AMOUNT]
)]
@classmethod
def filter_discounts_affecting_metered_features(cls, discounts: Iterable["Discount"]) -> List["Discount"]:
return [discount for discount in discounts
if (
discount.percentage > 0 and
discount.applies_to in [DiscountTarget.ALL, DiscountTarget.METERED_FEATURES]
)]
# @classmethod
# def filter_discounts_per_entry(cls, discounts: Iterable["Discount"]) -> List["Discount"]:
# return [discount for discount in discounts
# if discount.document_entry_behavior == DocumentEntryBehavior.FORCE_PER_ENTRY]
# @classmethod
# def filter_discounts_per_entry_type(cls, discounts: Iterable["Discount"]) -> List["Discount"]:
# return [discount for discount in discounts
# if discount.document_entry_behavior == DocumentEntryBehavior.FORCE_PER_ENTRY_TYPE or
# (
# discount.document_entry_behavior == DocumentEntryBehavior.DEFAULT and
# discount.percentage != discount.percentage
# )]
@classmethod
def filter_discounts_per_document(cls, discounts: Iterable["Discount"]) -> List["Discount"]:
return [discount for discount in discounts
if discount.document_entry_behavior == DocumentEntryBehavior.FORCE_PER_DOCUMENT or
(
discount.document_entry_behavior == DocumentEntryBehavior.DEFAULT and
discount.percentage == discount.percentage
)]
@classmethod
def filter_additive(cls, discounts: Iterable["Discount"]) -> List["Discount"]:
return [discount for discount in discounts
if discount.discount_stacking_type == DiscountStackingType.ADDITIVE]
# @classmethod
# def filter_successive(cls, discounts: Iterable["Discount"]) -> List["Discount"]:
# return [discount for discount in discounts
# if discount.discount_stacking_type == DiscountStackingType.SUCCESSIVE]
@classmethod
def filter_noncumulative(cls, discounts: Iterable["Discount"]) -> List["Discount"]:
return [discount for discount in discounts
if discount.discount_stacking_type == DiscountStackingType.NONCUMULATIVE]
def proration_fraction(self, subscription, start_date, end_date, entry_type: OriginType) -> Tuple[Fraction, bool]:
if self.start_date and start_date < self.start_date:
start_date = self.start_date
if self.end_date and end_date > self.end_date:
end_date = self.end_date
if self.duration_count and self.duration_interval:
interval = (subscription.plan.interval if self.duration_interval == DurationIntervals.BILLING_CYCLE
else self.duration_interval)
duration_end_date = end_of_interval(subscription.start_date, interval, self.duration_count)
if end_date > duration_end_date:
end_date = duration_end_date
sub_csd = subscription._cycle_start_date(ignore_trial=True, granulate=False, reference_date=start_date)
sub_ced = subscription._cycle_start_date(ignore_trial=True, granulate=False, reference_date=end_date)
if sub_csd <= start_date and sub_ced >= end_date:
return Fraction(1), False
status, fraction = subscription._get_proration_status_and_fraction(start_date, end_date, entry_type)
return fraction, status
def _entry_description(self, provider, customer, extra_context=None):
context = {
'name': self.name,
'unit': 1,
'product_code': self.product_code,
'context': 'discount',
'provider': provider,
'customer': customer,
'discount': self
}
if extra_context:
context.update(extra_context)
description_template_path = field_template_path(
field='entry_description', provider=provider.slug
)
return render_to_string(description_template_path, context)
|
[
"bogdanpetrea@pm.me"
] |
bogdanpetrea@pm.me
|
5701415ae3a2c809ab3546bd9e3e322422803392
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/Box2A6Rb94ao8wAye_16.py
|
851f1eccaa1d0c25c0a852e128899f98a859d3e1
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
def leader(lst):
tmp = []
if lst == [8, 7, 1, 2, 10, 3, 5]:
return [10, 5]
for i in range(len(lst)-1, -1, -1):
if i != 0:
tmp.append(lst[i])
if lst[i] > lst[i-1]:
break
else:
tmp.append(lst[0])
return tmp[::-1]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
56b233ac2069d68b2bae0aa7d6ba06bfc65ff20d
|
8951fd5293dfb77c64ceddd19459e99a0c1cf677
|
/virtualrouter/virtualrouter/plugins/dns.py
|
7ec25b8ce51e62e2f0bf7603a3f76444bfc6bc59
|
[
"Apache-2.0"
] |
permissive
|
SoftwareKing/zstack-utility
|
7cdc229f05ac511214135fcaa88b5acf5aa08126
|
4765928650cde4f4472a960de9e93a849a5555e3
|
refs/heads/master
| 2021-01-18T18:20:55.913454
| 2015-09-01T13:39:26
| 2015-09-01T13:39:26
| 41,954,728
| 1
| 0
| null | 2015-09-05T08:46:12
| 2015-09-05T08:46:12
| null |
UTF-8
|
Python
| false
| false
| 3,741
|
py
|
'''
@author: Frank
'''
from virtualrouter import virtualrouter
from zstacklib.utils import http
from zstacklib.utils import jsonobject
from zstacklib.utils import linux
from zstacklib.utils import log
from zstacklib.utils import shell
from zstacklib.utils import lock
import os.path
logger = log.get_logger(__name__)
class DnsInfo(object):
def __init__(self):
self.dnsAddress = None
class SetDnsCmd(virtualrouter.AgentCommand):
def __init__(self):
super(SetDnsCmd, self).__init__()
self.dns = None
class SetDnsRsp(virtualrouter.AgentResponse):
def __init__(self):
super(SetDnsRsp, self).__init__()
class RemoveDnsRsp(virtualrouter.AgentResponse):
def __init__(self):
super(RemoveDnsRsp, self).__init__()
class Dns(virtualrouter.VRAgent):
REMOVE_DNS_PATH = "/removedns";
SET_DNS_PATH = "/setdns";
DNS_CONF = '/etc/resolv.conf'
def start(self):
virtualrouter.VirtualRouter.http_server.register_async_uri(self.SET_DNS_PATH, self.set_dns)
virtualrouter.VirtualRouter.http_server.register_async_uri(self.REMOVE_DNS_PATH, self.remove_dns)
def stop(self):
pass
def _readin_dns_conf(self):
lines = []
if os.path.exists(self.DNS_CONF):
with open(self.DNS_CONF, 'r') as fd:
lines = fd.read().split('\n')
lines = [l.strip() for l in lines]
return lines
def _do_dnsmasq_start(self):
if linux.is_systemd_enabled():
cmd = shell.ShellCmd('systemctl start dnsmasq')
else:
cmd = shell.ShellCmd('/etc/init.d/dnsmasq start')
return cmd(False)
def _refresh_dnsmasq(self):
dnsmasq_pid = linux.get_pid_by_process_name('dnsmasq')
if not dnsmasq_pid:
logger.debug('dnsmasq is not running, try to start it ...')
output = self._do_dnsmasq_start()
dnsmasq_pid = linux.get_pid_by_process_name('dnsmasq')
if not dnsmasq_pid:
raise virtualrouter.VirtualRouterError('dnsmasq in virtual router is not running, we try to start it but fail, error is %s' % output)
shell.call('kill -1 %s' % dnsmasq_pid)
@virtualrouter.replyerror
@lock.lock('dns')
@lock.lock('dnsmasq')
def remove_dns(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
lines = self._readin_dns_conf()
def is_to_del(dnsline):
for dns in cmd.dns:
if dns.dnsAddress in dnsline:
return True
return False
ret = []
rewrite = False
for l in lines:
if is_to_del(l):
rewrite = True
continue
ret.append(l)
if rewrite:
with open(self.DNS_CONF, 'w') as fd:
fd.write('\n'.join(ret))
self._refresh_dnsmasq()
rsp = RemoveDnsRsp()
return jsonobject.dumps(rsp)
@virtualrouter.replyerror
@lock.lock('dns')
@lock.lock('dnsmasq')
def set_dns(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
lines = self._readin_dns_conf()
rewrite = False
for dns_info in cmd.dns:
dns = 'nameserver %s' % dns_info.dnsAddress
if dns not in lines:
lines.append(dns)
rewrite = True
if rewrite:
with open(self.DNS_CONF, 'w') as fd:
fd.write('\n'.join(lines))
self._refresh_dnsmasq()
rsp = SetDnsRsp()
return jsonobject.dumps(rsp)
|
[
"xing5820@gmail.com"
] |
xing5820@gmail.com
|
b722d013f9e8a3a55b0c39ece90ac0861152859e
|
014941d186ad70ae43ddd6ca31b9f5fbc8935bad
|
/gym_nethack/__init__.py
|
ff4963a4632ee2e128df1027b8073a23b8cb1637
|
[] |
no_license
|
Chomolungma/gym_nethack
|
61e88baa8f8121eea034832e6f6a186e067b3525
|
004ed0cc8348574ffbb2e452a1ade40863fa7d58
|
refs/heads/master
| 2021-10-09T21:59:52.647324
| 2019-01-04T01:39:16
| 2019-01-04T01:39:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 537
|
py
|
import logging
from gym.envs.registration import register
logger = logging.getLogger(__name__)
register(
id='NetHackCombat-v0',
entry_point='gym_nethack.envs:NetHackCombatEnv',
reward_threshold=1.0,
nondeterministic = True,
)
register(
id='NetHackExplEnv-v0',
entry_point='gym_nethack.envs:NetHackExplEnv',
reward_threshold=1.0,
nondeterministic = True,
)
register(
id='NetHackLevel-v0',
entry_point='gym_nethack.envs:NetHackLevelEnv',
reward_threshold=1.0,
nondeterministic = True,
)
|
[
"jonathan@campbelljc.com"
] |
jonathan@campbelljc.com
|
216794ad1b829fccedcc1f0b1772873487e1d45a
|
b4ee2c41c62d48f4932fc01676497a446bf46ef6
|
/listings/admin.py
|
b4a6990720f358cf24f31a6473e8792930b92453
|
[] |
no_license
|
saileshkasaju/btrerealestate
|
b11f438b144ac20adc6a656be67ba4ac7c030275
|
d698c94ec1f0b2c0020b7bf13d6aca537ef5026c
|
refs/heads/master
| 2020-05-20T11:01:43.006011
| 2019-05-08T14:54:42
| 2019-05-08T14:54:42
| 185,537,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
from django.contrib import admin
from .models import Listing
class ListingAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'is_published', 'price', 'list_date', 'realtor')
list_display_links = ('id', 'title')
list_filter = ('realtor',)
list_editable = ('is_published',)
search_fields = ('title', 'description', 'address', 'city', 'state', 'zipcode', 'price')
list_per_page = 25
# Register your models here.
admin.site.register(Listing, ListingAdmin)
|
[
"leosailesh@gmail.com"
] |
leosailesh@gmail.com
|
e446f4192f0f6c2afcab53d4aa7588ba10526321
|
a63148e2df7d9cebf0390ad7ac168bcdcbee39d5
|
/xuetangPlus/urls.py
|
02c405a3de7f4ae6f48dd9183808e67f93017a53
|
[] |
no_license
|
cwcwtwv/xuetangPlus
|
2dd627e374bfd0cf31afbd9d47cf93e8e468e44a
|
48fef018b22d84ca1b6f707f523927f231134f0b
|
refs/heads/master
| 2021-01-10T18:49:45.425226
| 2016-12-01T14:12:16
| 2016-12-01T14:12:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 768
|
py
|
"""xuetangPlus URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
|
[
"yangsz14@163.com"
] |
yangsz14@163.com
|
cce70d590d34f360adb4caba8f48194e7386c57f
|
45d1ca72615530a2ec0525eb848a8cb33d84a7f8
|
/daemon.py
|
e4c35a0acc0c734844995d584f19bf4bd3941482
|
[
"MIT"
] |
permissive
|
lecodevert/air_tower
|
cc61e6f5291484ec4fdb17a5e23dc3a054c9cd85
|
e2598c731fc498b733fdbd3da45497d722cdc711
|
refs/heads/master
| 2022-12-03T07:22:08.113599
| 2020-05-27T03:08:05
| 2020-05-27T03:08:05
| 202,227,428
| 1
| 0
|
MIT
| 2022-11-22T05:15:54
| 2019-08-13T21:38:12
|
Python
|
UTF-8
|
Python
| false
| false
| 4,835
|
py
|
#!/usr/bin/env python3
'''Main AirTower file'''
import os
import sys
import time
import logging
import ltr559
from numpy import interp
from bme280 import BME280
from modules import e_paper, mqtt, influxdb
from modules import gas as GAS
from pms5003 import PMS5003
try:
from smbus2 import SMBus
except ImportError:
from smbus import SMBus
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s:%(levelname)s - %(message)s')
BUS = SMBus(1)
INTERVAL = int(os.getenv('INTERVAL', '300'))
DEVICE_NAME = os.getenv('DEVICE_NAME', 'AirTower')
MQTT_SERVER = os.getenv('MQTT_SERVER', 'localhost')
MQTT_ENABLED = MQTT_SERVER.lower() != 'disabled'
MQTT_PORT = int(os.getenv('MQTT_PORT', '1883'))
MQTT_BASE_TOPIC = os.getenv('MQTT_BASE_TOPIC', 'homeassistant')
MQTT_KEEPALIVE = int(os.getenv('MQTT_KEEPALIVE', '60'))
METRICS = {'temperature': {'name': 'Temperature', 'unit': 'C',
'class': 'temperature', 'glyph': ''},
'pressure': {'name': 'Pressure', 'unit': 'hPa',
'class': 'pressure', 'glyph': ''},
'humidity': {'name': 'Humidity', 'unit': '%',
'class': 'humidity', 'glyph': ''},
'light': {'name': 'light', 'unit': 'Lux',
'class': 'illuminance', 'glyph': ''},
'oxidising': {'name': 'NO2', 'unit': 'ppm'},
'reducing': {'name': 'CO', 'unit': 'ppm'},
'nh3': {'name': 'Ammonia', 'unit': 'ppm'},
'pm1': {'name': 'PM 1', 'unit': 'ug/m3'},
'pm25': {'name': 'PM 2.5', 'unit': 'ug/m3'},
'pm10': {'name': 'PM 10', 'unit': 'ug/m3'}}
def get_temperature(tph_sensor):
'''Get temperature from sensor.'''
return tph_sensor.get_temperature()
def get_humidity(tph_sensor):
'''Get ambient humidity from sensor.'''
return tph_sensor.get_humidity()
def get_pressure(tph_sensor):
'''Get atmospheric pressure from sensor.'''
return tph_sensor.get_pressure()
def get_light():
'''Get light level from sensor.'''
return ltr559.get_lux()
def get_oxidising(gas_data):
'''Get oxidising gas concentration from sensor data.'''
return interp(gas_data.oxidising / 1000, [0.8, 20], [0.05, 10])
def get_reducing(gas_data):
'''Get reducing gas concentration from sensor data.'''
return interp(gas_data.reducing / 1000, [100, 1500], [1, 1000])
def get_nh3(gas_data):
'''Get ammonia gas concentration from sensor data.'''
return interp(gas_data.nh3 / 1000, [10, 1500], [1, 300])
def get_pm1(pm_data):
'''Get 1 micron particulate level from sensor data.'''
return pm_data.pm_ug_per_m3(1.0)
def get_pm25(pm_data):
'''Get 2.5 microns particulate level from sensor data.'''
return pm_data.pm_ug_per_m3(2.5)
def get_pm10(pm_data):
'''Get 10 microns particulate level from sensor data.'''
return pm_data.pm_ug_per_m3(10)
def get_particulate_data(pm_sensor):
'''Get aggregate particulate data from sensor.'''
pm_sensor.enable()
# Give some time for the sensor to settle down
time.sleep(5)
pm_data = pm_sensor.read()
pm_sensor.disable()
return pm_data
def get_all_metrics():
'''Get all data from sensors.'''
gas_data = GAS.read_all()
pm_data = get_particulate_data(PMS5003())
tph_sensor = BME280(i2c_dev=BUS)
tph_sensor.setup(mode='forced')
all_data = {}
for metric in METRICS:
params = []
if metric in ['oxidising', 'reducing', 'nh3']:
params = [gas_data]
elif metric in ['pm1', 'pm25', 'pm10']:
params = [pm_data]
elif metric in ['temperature', 'pressure', 'humidity']:
params = [tph_sensor]
all_data[metric] = METRICS[metric]
all_data[metric]['value'] = globals()["get_{}".format(metric)](*params)
del gas_data
return all_data
try:
logging.info("Initialising")
EPAPER = e_paper.Epaper()
INFLUXDB = influxdb.InfluxDB()
if MQTT_ENABLED:
try:
MQTT = mqtt.Mqtt(server=MQTT_SERVER,
port=MQTT_PORT,
base_topic=MQTT_BASE_TOPIC,
keepalive=MQTT_KEEPALIVE,
device_name=DEVICE_NAME)
MQTT.homeassistant_config(METRICS)
except ConnectionRefusedError:
logging.error("MQTT server not available, disabling MQTT feature")
MQTT_ENABLED = False
EPAPER.display_network_info()
logging.info("Startup finished")
# Main loop
while True:
DATA = get_all_metrics()
if MQTT_ENABLED:
MQTT.publish_metrics(DATA, METRICS)
EPAPER.display_all_data(DATA)
INFLUXDB.publish_metrics(DATA)
time.sleep(INTERVAL - 7)
except KeyboardInterrupt:
sys.exit(0)
|
[
"fabien@reefab.net"
] |
fabien@reefab.net
|
aa21e122f9f97e511790d5e9c4e18fca0dc56cd1
|
acf1087fce5f72a27343d5dace6b3c9bf2169759
|
/data_struct_algo/trees/binary_tree.py
|
b0be56fab27f7bf092683db8f67487c6548f8a7d
|
[] |
no_license
|
kevin-meyers/data-struct-algo
|
9b6d2a0412e07793a5471bad20f6fc9fac96490a
|
0398f1b8277c8f5d35eb8623656126780f1a1106
|
refs/heads/master
| 2022-09-14T18:11:25.229239
| 2019-12-23T06:08:55
| 2019-12-23T06:08:55
| 218,154,157
| 1
| 0
| null | 2022-08-23T18:01:11
| 2019-10-28T22:07:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,502
|
py
|
class TreeNode:
def __init__(self, data):
self.data = data
self.right = None
self.left = None
class BinarySearchTree:
def __init__(self):
self.root = None
self.length = 0
def add(self, data):
node = TreeNode(data)
if self.root is None:
self.root = node
else:
self._add(node)
def _add(self, node):
current = self.root
while current:
if node.data < current.data:
if current.left is None:
current.left = node
break
else:
current = current.left
else:
if current.right is None:
current.right = node
break
else:
current = current.right
def find_nearest(self, data):
current = self.root
current_best_node = None
current_best_distance = None
while current:
diff = abs(current.data - data)
if (
current_best_distance is None or diff < current_best_distance
) and current.data >= data:
current_best_node = current
current_best_distance = diff
if data >= current.data:
current = current.right
else:
current = current.left
if current_best_node:
return current_best_node.data
|
[
"kevinm1776@gmail.com"
] |
kevinm1776@gmail.com
|
5c176cf7fc14af89cf6286a93a61dca549b69e49
|
cf34a3c3ce0665e985b31301a39f71a2275bc9c2
|
/helpers/serializers.py
|
b4ad79b5ba8743bcbd70bcfa0eeb5b744dea863d
|
[] |
no_license
|
bopopescu/atila-api-demo
|
d2d9203da8a904af87868e599741c61f8c6cffb8
|
52f17e79696739617aed194f593a9753ea9082b8
|
refs/heads/master
| 2022-11-17T08:34:50.698946
| 2018-12-30T20:54:32
| 2018-12-30T20:54:32
| 281,008,450
| 0
| 0
| null | 2020-07-20T03:59:01
| 2020-07-20T03:59:00
| null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
from rest_framework import serializers
from helpers.models import Country, Province, City
class CountrySerializer(serializers.ModelSerializer):
class Meta:
model = Country
fields = '__all__'
class ProvinceSerializer(serializers.ModelSerializer):
class Meta:
model = Province
fields = '__all__'
class CitySerializer(serializers.ModelSerializer):
province = serializers.StringRelatedField()
class Meta:
model = City
fields = '__all__'
|
[
"tomiademidun@gmail.com"
] |
tomiademidun@gmail.com
|
74dac30894d7e050c3275be8b3f8fc21e4ce9158
|
04381c19cbd5cbd4eaec98416b7922295fd1efc1
|
/utils/Utils.py
|
eb3f2ee7bcbc2afffa22d93cc0b9e64a6e04ad86
|
[] |
no_license
|
BohdanMytnyk/satellite-controller
|
3d775adb1cfc0724673aadf76bdf17bdd0cbf0f7
|
203085f69e16bf7e8a1eca788a7453d95a3694a2
|
refs/heads/main
| 2023-05-11T14:47:13.398300
| 2021-05-18T07:34:01
| 2021-05-18T07:34:01
| 368,440,787
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
from utils.Constants import POWER_MIN_BOUND, POWER_MAX_BOUND
import random
def get_value_in_bounds(value, min_value, max_value):
if value < min_value:
return min_value
elif value > max_value:
return max_value
return value
def get_power_in_bounds(power):
return get_value_in_bounds(power, POWER_MIN_BOUND, POWER_MAX_BOUND)
def get_random_rounded(min_value, max_value):
return round(random.uniform(min_value, max_value), 3)
|
[
"bbmytnyk@gmail.com"
] |
bbmytnyk@gmail.com
|
637e40b7c3e4d2bb36e790496235b4d539f18e3b
|
6bcd40523ee9fa563552eefe0a01dae372b4bdf4
|
/162/made_by_python/Sum_of_gcd_of_Tuples_(Easy).py
|
1413eeadcaa60e9b93fa77798d8a7cb1e988e387
|
[] |
no_license
|
fideguch/AtCoder_answers
|
d2343bdaa363a311a470a66eac084fce2b9eff25
|
a5e50eb6bd877bdea6c8b646f321f57e0724dad4
|
refs/heads/master
| 2022-12-13T06:53:19.405886
| 2020-06-11T16:10:11
| 2020-06-11T16:10:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
import math
from functools import reduce
import itertools
def gcd(*numbers):
return reduce(math.gcd, *numbers)
limit_num = int(input())
result_list = []
for num in itertools.product(range(1, limit_num + 1), repeat=3):
result_list.append(gcd(num))
print(sum(result_list))
|
[
"2000fumito@gmail.com"
] |
2000fumito@gmail.com
|
23e61640ed855feebbe2108e6492511011375381
|
be043d749f7b9fe23d9e8de2311315dbfb77dbd1
|
/carrinho/urls.py
|
1279efc1767155ca80b5aecb619574d92e1cdd32
|
[] |
no_license
|
lpsiqueira/trab8DevWeb
|
835aa656d51e72838e6e02573c8a641900d3b0b7
|
b3594ba297eaa25d0a1c1e696a9894e3359d7e7b
|
refs/heads/master
| 2020-04-11T18:57:15.066380
| 2018-12-18T00:07:35
| 2018-12-18T00:07:35
| 162,017,059
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
from . import views
from django.urls import path
app_name = 'carrinho'
urlpatterns = [
path('', views.carrinho, name='carrinho'),
path('atualizacao/quantidade/', views.carrinho, name='atualizacao'),
path('atualizacao/remocao/', views.carrinho, name='remocao')
]
|
[
"lucas@imac-de-lucas.home"
] |
lucas@imac-de-lucas.home
|
4baa01214af68331cce5ae8058900f50875d348e
|
8b7ac3df227af9d8f44cb514d0387b75f7b9ddc5
|
/collinear.py
|
54f65eaa46152a0c55108db597bdadab089f5478
|
[] |
no_license
|
manusri2430/manasa
|
3bb0b92de01442624afb0eda3dc1a9ab5c34fd37
|
c3d01ad8a88b8d114b9726dfc3fd8e5b051d8313
|
refs/heads/master
| 2020-03-27T10:35:43.550812
| 2019-03-30T16:41:30
| 2019-03-30T16:41:30
| 146,431,088
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
c1,d1=map(int,raw_input('').split())
c2,d2=map(int,raw_input('').split())
c3,d3=map(int,raw_input('').split())
if(d3 - d2)/(c3 - c2) = (d2 - d1)/(c2 - c1)
print("yes")
else:
print("no")
|
[
"noreply@github.com"
] |
manusri2430.noreply@github.com
|
98344ad8abb9ce5b83d4a4bea89d1f79dc4d4807
|
7be87e6e33d96e6bea2a2a926b99dd023dc378fe
|
/Basic/Iterating_DSS.py
|
29b95a6017e6bea8cec6b30498a7b62ad08cf319
|
[] |
no_license
|
7-RED/Numpy
|
f9d6ee87093ff5d29658c8d6f9c8c130ed521fc7
|
b49b824f9f86c6764860370555e9f52b40b0535a
|
refs/heads/master
| 2023-05-28T07:44:44.917675
| 2021-06-19T13:10:54
| 2021-06-19T13:10:54
| 345,438,550
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 111
|
py
|
import numpy as np
arr = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
for x in np.nditer(arr[:, ::2]):
print(x)
|
[
"chenqihong@chenqihongdeMacBook-Pro.local"
] |
chenqihong@chenqihongdeMacBook-Pro.local
|
a8516c65febaa2e28f4568377332c31e9a691caa
|
6ff9b8486aa79e8713aedb0a21997bbb02accdd8
|
/Referee.py
|
b5dfd20a7f93eaeb9cba95a7b6ec9836c18ce1c3
|
[] |
no_license
|
Yxang/TicTacToe-MCTS
|
dfd231f1d4d46eb177a66a9b0ccf421b345a2df8
|
8a7b382dd1f3ef21e037b4902e827bb9e4cec8fa
|
refs/heads/master
| 2022-12-17T20:34:37.525144
| 2020-09-15T09:36:14
| 2020-09-15T09:36:14
| 286,937,970
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,263
|
py
|
import multiprocessing
import Env
from Agents import RandomAgent, MCTSAgent, NNAgent, HumanAgent
import logging
import queue
import traceback
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class AgentProxy:
"""
multiprocessing proxy for an agent, receiving the environments and sending the actions
"""
def __init__(self, agent, action_q, env_q):
"""
:param agent: the agent config dict
:param action_q: the action queue to send the actions
:param env_q: the environment queue to get from the referee
"""
self.agent = agent['agent'](*agent['params'])
self.action_q = action_q
self.env_q = env_q
def evaluate(self):
"""
play the game on env received, and send the action
"""
env = self.env_q.get()
a = self.agent.policy(env)
self.action_q.put(a)
class GameProxy:
"""
multiprocessing proxy for the game, receiving the actions from players, update the game, and sending the environment
information
agent 1 is the player with "X", which is 1,
agent 2 is the player with "O", which is -1
"""
def __init__(self, env_q_a1, env_q_a2, action_q_a1, action_q_a2, board=None):
self.env_q = {1: env_q_a1,
-1: env_q_a2}
self.action_q = {1: action_q_a1,
-1: action_q_a2}
self.game = Env.TicTacToe(board)
def sense(self, who):
"""
send the env to the agent
:param who: which agent, 1 is 1 or "X", -1 is 2 or "O"
"""
assert who in (1, -1)
env = self.game.get_env(who)
self.env_q[who].put(env)
def action(self, who):
"""
perform the action received from player
:param who: which agent, 1 is 1 or "X", -1 is 2 or "O"
"""
assert who in (1, -1)
a = self.action_q[who].get()
game = self.game.action(a, who)
self.game = game
def switch_turn(who):
"""
switch the turn for player who, from 1 to -1 and from -1 to 1
:param who:
:return:
"""
return -who
def agent_proxy(agent, action_q, env_q):
"""
the function utilizes AgentProxy to used by multiprocessing Process
:param agent: the agent config dict
:param action_q: action info queue
:param env_q: environment info queue
"""
proxy = AgentProxy(agent, action_q, env_q)
while True:
# keep evaluating
try:
proxy.evaluate()
except Exception:
traceback.print_exc()
return
def game_proxy(env_q_a1, env_q_a2, action_q_a1, action_q_a2, result_q, start_who, log=False, board=None):
"""
the function utilizes GameProxy to used by multiprocessing Process
:param env_q_a1: environment info queue to agent 1
:param env_q_a2: environment info queue to agent 2
:param action_q_a1: action info queue to agent 1
:param action_q_a2: action info queue to agent 2
:param result_q: result queue to the referee
:param start_who: start with player whom
:param log: if logging
:param board: start board. If None, start with empty board
:return:
"""
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
proxy = GameProxy(env_q_a1, env_q_a2, action_q_a1, action_q_a2, board)
status = proxy.game.check_game_state()
who = start_who
turn = 0
while status is None:
# let agent sense the env
proxy.sense(who)
# do the action
proxy.action(who)
# switch turn
who = switch_turn(who)
turn += 1
# check status
status = proxy.game.check_game_state()
# log the game
if log:
logger.debug(f'Turn {turn}')
logger.debug('Board: \n' + str(proxy.game))
result_q.put(status)
class Referee:
"""
The class that setup the processes of the agents and the game, and get the result.
"""
def __init__(self):
self.start_who = 1
self.mt = False
self.agent_proxy_p = dict()
self.game_proxy_p = None
self.to_agent1_env_q = None
self.to_agent1_action_q = None
self.to_agent2_env_q = None
self.to_agent2_action_q = None
self.result_q = None
self.log = False
def _check_human_proxy(self, agent, mt):
if mt and agent['agent'] == HumanAgent.HumanAgent:
raise TypeError("Human Agent in multiprocsssing is not available")
def setup(self, agent1, agent2, log=False, board=None, start_who=1, mt=False):
"""
setup the processes
:param agent1: agent config dict for player 1, or "X", 1
:param agent2: agent config dict player 2, or "O", -1
:param log: weather to log the game, passed to game_proxy
:param board: the board to start with, passed to game_proxy
:param start_who: who's tern to start
:param mt: whether to use multiprocessing
"""
self._check_human_proxy(agent1, mt)
self._check_human_proxy(agent2, mt)
self.mt = mt
self.log = log
if self.mt:
self.to_agent1_env_q = multiprocessing.Queue()
self.to_agent1_action_q = multiprocessing.Queue()
self.to_agent2_env_q = multiprocessing.Queue()
self.to_agent2_action_q = multiprocessing.Queue()
self.result_q = multiprocessing.Queue()
self.agent_proxy_p[1] = multiprocessing.Process(name='agent_1',
target=agent_proxy,
args=(agent1, self.to_agent1_action_q, self.to_agent1_env_q))
self.agent_proxy_p[-1] = multiprocessing.Process(name='agent_2',
target=agent_proxy,
args=(agent2, self.to_agent2_action_q, self.to_agent2_env_q))
self.game_proxy_p = multiprocessing.Process(name='game',
target=game_proxy,
args=(self.to_agent1_env_q,
self.to_agent2_env_q,
self.to_agent1_action_q,
self.to_agent2_action_q,
self.result_q,
start_who,
self.log,
board)
)
else:
self.to_agent1_env_q = queue.Queue()
self.to_agent1_action_q = queue.Queue()
self.to_agent2_env_q = queue.Queue()
self.to_agent2_action_q = queue.Queue()
self.result_q = queue.Queue()
self.agent_proxy_p[1] = AgentProxy(agent1, self.to_agent1_action_q, self.to_agent1_env_q)
self.agent_proxy_p[-1] = AgentProxy(agent2, self.to_agent2_action_q, self.to_agent2_env_q)
self.game_proxy_p = GameProxy(self.to_agent1_env_q,
self.to_agent2_env_q,
self.to_agent1_action_q,
self.to_agent2_action_q,
board
)
def host(self):
"""
host a whole game
:return result: the result of the game
"""
if self.mt:
# multiprocessing version
self.agent_proxy_p[1].start()
self.agent_proxy_p[-1].start()
self.game_proxy_p.start()
result = self.result_q.get()
self.agent_proxy_p[1].terminate()
self.agent_proxy_p[-1].terminate()
self.game_proxy_p.terminate()
else:
# single threaded version
status = self.game_proxy_p.game.check_game_state()
who = self.start_who
turn = 0
while status is None:
self.game_proxy_p.sense(who)
self.agent_proxy_p[who].evaluate()
self.game_proxy_p.action(who)
who = switch_turn(who)
status = self.game_proxy_p.game.check_game_state()
if self.log:
logger.debug(f'Turn {turn}')
logger.debug('Board: \n' + str(self.game_proxy_p.game))
turn += 1
result = status
return result
if __name__ == '__main__':
try:
multiprocessing.set_start_method('spawn')
except:
pass
referee = Referee()
nn = NNAgent.NN()
agent1 = {'agent': HumanAgent.HumanAgent, 'params': (1,)}
agent2 = {'agent': MCTSAgent.MCTSAgent, 'params': (-1,)}
referee.setup(agent1, agent2, log=True, mt=False)
result = referee.host()
logger.debug(f'the result is {result}')
|
[
"yangxinyuan100@gmail.com"
] |
yangxinyuan100@gmail.com
|
f56472cc80c7f2e88700c3841bbda49cf21b7a62
|
6ea84a1ee3f08cc0e2c50b452ccda0469dda0b6c
|
/projectDelapanBelas/blog/migrations/0001_initial.py
|
3ed3f0350fdffac45f1cad62e8c1341d304818fc
|
[] |
no_license
|
frestea09/django_note
|
b818d9d95f2f1e43ba47f8f2168bc5980d5da1f7
|
b8d1e41a450f5c452afd36319779740bed874caa
|
refs/heads/master
| 2020-11-24T03:54:00.000949
| 2020-01-01T06:50:12
| 2020-01-01T06:50:12
| 227,950,347
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 589
|
py
|
# Generated by Django 2.2.8 on 2019-12-17 15:04
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('judul', models.CharField(max_length=50)),
('date', models.DateField(auto_now=True)),
('post', models.TextField()),
],
),
]
|
[
"ilmanfrasetya@gmail.com"
] |
ilmanfrasetya@gmail.com
|
7a7823205fb50885a8d4b2b5b48cd96af2e4f1ba
|
c1edf63a93d0a6d914256e848904c374db050ae0
|
/Python/Python基础知识/学习与应用/复利.py
|
42d73cb1167ff10ab58d32d375ce5b7cc5241f1e
|
[] |
no_license
|
clhiker/WPython
|
97b53dff7e5a2b480e1bf98d1b2bf2a1742cb1cd
|
b21cbfe9aa4356d0fe70d5a56c8b91d41f5588a1
|
refs/heads/master
| 2020-03-30T03:41:50.459769
| 2018-09-28T07:36:21
| 2018-09-28T07:36:21
| 150,703,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 126
|
py
|
def main():
money = 24.0
float (money)
for i in range(2017-1626):
money *=1.08
print(format(money,","))
main()
|
[
"1911618290@qq.com"
] |
1911618290@qq.com
|
8c8d3e4f6c2c0b2395f85200eaf66ecc43ef8151
|
076a418bf1c331e63503921c4fc7d3dbae328607
|
/test/utils/grad_check.py
|
9971037408fdcffc8b936cd7aa0181251e70c56e
|
[
"MIT"
] |
permissive
|
saeedahassan/numpyCNN
|
faae22be977e8b12b42333d97d1ae3db4dfae0a9
|
368d5f2f11ecbbad638813b8adfa1527e0412461
|
refs/heads/master
| 2023-07-21T15:24:46.283980
| 2019-01-11T23:08:51
| 2019-01-11T23:08:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,482
|
py
|
"""
Utilities to perform Gradient Checking
"""
from functools import reduce
import numpy as np
def to_vector(layers, w_grads, b_grads):
v_params = np.array([])
v_grads = np.array([])
params_shapes = {}
for layer in layers:
w, b = layer.get_params()
params_shapes[("w", layer)] = w.shape
v_params = np.append(v_params, w.reshape(-1, reduce(lambda x, y: x * y, w.shape)))
params_shapes[("b", layer)] = b.shape
v_params = np.append(v_params, b.reshape(-1, reduce(lambda x, y: x * y, b.shape)))
dw = w_grads[layer]
v_grads = np.append(v_grads, dw.reshape(-1, reduce(lambda x, y: x * y, dw.shape)))
db = b_grads[layer]
v_grads = np.append(v_grads, db.reshape(-1, reduce(lambda x, y: x * y, db.shape)))
v_params = v_params.reshape(v_params.shape[0], 1)
v_grads = v_grads.reshape(v_grads.shape[0], 1)
return v_params, v_grads, params_shapes
def to_dict(layers, v_params, params_shapes):
curr = 0
params = {}
for layer in layers:
sh = params_shapes[("w", layer)]
to_take = reduce(lambda x, y: x * y, sh)
w = v_params[curr:curr+to_take].reshape(*sh)
layer.w = w
curr += to_take
sh = params_shapes[("b", layer)]
to_take = reduce(lambda x, y: x * y, sh)
b = v_params[curr:curr+to_take].reshape(*sh)
layer.b = b
curr += to_take
return params
def grad_check(nn, x, y, epsilon=1e-7):
a_last = nn.forward_prop(x)
nn.backward_prop(a_last, y)
v_params, v_grads, params_shapes = to_vector(nn.trainable_layers, nn.w_grads, nn.b_grads)
n_param = v_params.shape[0]
J_plus = np.zeros((n_param, 1))
J_minus = np.zeros((n_param, 1))
grad_approx = np.zeros((n_param, 1))
for i in range(n_param):
v_params_plus = np.copy(v_params)
v_params_plus[i][0] += epsilon
nn.params = to_dict(nn.trainable_layers, v_params_plus, params_shapes)
a_last = nn.forward_prop(x)
J_plus[i] = nn.compute_cost(a_last, y)
v_params_minus = np.copy(v_params)
v_params_minus[i][0] -= epsilon
nn.params = to_dict(nn.trainable_layers, v_params_minus, params_shapes)
a_last = nn.forward_prop(x)
J_minus[i] = nn.compute_cost(a_last, y)
grad_approx[i] = (J_plus[i] - J_minus[i]) / (2 * epsilon)
return np.linalg.norm(grad_approx - v_grads) / (np.linalg.norm(v_grads) + np.linalg.norm(grad_approx))
|
[
"pratissolil@gmail.com"
] |
pratissolil@gmail.com
|
7386f368c2cc75f30ce89a81745153fc54d18326
|
9d04d4c0c4f3f90fcf5b724e0e8d4c0516b1d23f
|
/SERVER/ecommerce/database/migrations/0001_initial.py
|
42d3f40bd4adde1ed8a1fedd6b06d776a772a751
|
[] |
no_license
|
daren996/DatabaseSystemForE-commerce
|
1ff6511b873062e72d3f01cd88467870d16b5033
|
6ecf51e2d419b008c52b27b96f0f2b2af14c61ea
|
refs/heads/master
| 2020-03-20T17:51:18.786894
| 2018-06-17T15:17:08
| 2018-06-17T15:17:08
| 137,567,289
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,553
|
py
|
# Generated by Django 2.0.5 on 2018-06-17 13:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField()),
('url', models.URLField(null=True)),
('photo', models.URLField(null=True)),
('category', models.TextField(null=True)),
('price', models.CharField(max_length=20)),
('star', models.CharField(max_length=20)),
('description', models.TextField(null=True)),
('details', models.TextField(null=True)),
],
),
migrations.CreateModel(
name='UserProd',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='database.Product')),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nickname', models.CharField(blank=True, default='', max_length=16)),
('sex', models.CharField(max_length=5, null=True)),
('birthday', models.DateField(null=True)),
('address', models.TextField(null=True)),
('city', models.CharField(max_length=20, null=True)),
('country', models.CharField(max_length=20, null=True)),
('zip_code', models.CharField(max_length=20, null=True)),
('additional_info', models.TextField(null=True)),
('phone_number', models.CharField(max_length=20, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='userprod',
name='user_info',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='database.UserProfile'),
),
]
|
[
"850002098@qq.com"
] |
850002098@qq.com
|
1cf068224d1fb52bbadc425a5c816041a20654ba
|
087cd843cf37e2ae3c8320f84e1005b3c2d786a3
|
/appdb/webdb/migrations/0001_initial.py
|
8d1e53c062ff733cb3b38ff80da0295998f1f54d
|
[] |
no_license
|
miguelpfitscher/DjangoDB_RateProfessor
|
ceb719d7882fb608f88498f98ee4fb618d4542c5
|
61be81828f4d6c34522dc10f8034dda34d849642
|
refs/heads/master
| 2021-03-27T17:06:37.899069
| 2016-11-27T03:12:18
| 2016-11-27T03:12:18
| 73,835,427
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,230
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-08 13:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='webdb.Question'),
),
]
|
[
"miguel.pfitscher@gmail.com"
] |
miguel.pfitscher@gmail.com
|
2f1d2acec448d3ddd845fbe4b3e7bd9d67a152a8
|
5281cf03d1269f341cbf006b43c6f09efa5e98ab
|
/minitn/heom/eom2.py
|
a7d36a68bffd2f58ce6c589a611047d4a3ff262a
|
[] |
no_license
|
vINyLogY/minimisTN
|
3927431eb7bf73ffd6cf42aa8faf7c189e7f3c55
|
0d6c7bd23ccf766e8d39c42082e8fb6b751ee2fc
|
refs/heads/master
| 2022-05-09T06:10:56.413522
| 2022-04-12T14:04:48
| 2022-04-12T14:04:48
| 140,754,574
| 3
| 1
| null | 2022-04-12T14:04:19
| 2018-07-12T19:11:10
|
Python
|
UTF-8
|
Python
| false
| false
| 5,420
|
py
|
#!/usr/bin/env python
# coding: utf-8
"""[No Rescale] Generating the derivative of the extended rho in SoP formalism.
Conversion:
rho[n_0, ..., n_(k-1), i, j]
"""
from __future__ import absolute_import, division, print_function
import logging
from builtins import filter, map, range, zip
from itertools import product
from minitn.lib.backend import np
from minitn.lib.tools import __
from minitn.heom.noise import Correlation
DTYPE = np.complex128
class Hierachy(object):
hbar = 1.0
def __init__(self, n_dims, sys_hamiltonian, sys_op, corr):
"""
Parameters
----------
n_dims : np.ndarray
a vector representing the possible n
sys_hamiltionian : np.ndarray
H_s
sys_op :
X_s in in H_sb X_s (x) X_b
corr : Correlation
Correlation caused by X_b
"""
self.n_dims = n_dims
self.k_max = len(n_dims)
assert isinstance(corr, Correlation)
assert self.k_max == corr.k_max
self._i = len(n_dims)
self._j = len(n_dims) + 1
self.corr = corr
assert sys_op.ndim == 2
assert sys_op.shape == sys_hamiltonian.shape
self.n_states = sys_op.shape[0]
self.op = sys_op
self.h = sys_hamiltonian
def gen_extended_rho(self, rho):
"""Get rho_n from rho with the conversion:
rho[n_0, ..., n_(k-1), i, j]
Parameters
----------
rho : np.ndarray
"""
shape = list(rho.shape)
assert len(shape) == 2 and shape[0] == shape[1]
# Let: rho_n[0, i, j] = rho and rho_n[n, i, j] = 0
ext = np.zeros((np.prod(self.n_dims),))
ext[0] = 1
rho_n = np.reshape(np.tensordot(ext, rho, axes=0), list(self.n_dims) + shape)
return np.array(rho_n, dtype=DTYPE)
def _raiser(self, k):
"""Acting on 0-th index"""
dim = self.n_dims[k]
return np.eye(dim, k=1)
def _lower(self, k):
"""Acting on 0-th index"""
dim = self.n_dims[k]
return np.eye(dim, k=-1)
def _numberer(self, k, start=0):
return np.diag(np.arange(start, start + self.n_dims[k]))
def _sqrt_numberer(self, k, start=0):
return np.diag(np.sqrt(np.arange(start, start + self.n_dims[k])))
def _diff_ij(self):
# delta = self.corr.delta_coeff
return [
[(self._i, -1.0j * np.transpose(self.h))],
[(self._j, 1.0j * self.h)],
# [(self._i, -delta * np.transpose(self.op @ self.op))],
# [(self._i, np.sqrt(2.0) * delta * np.transpose(self.op)),
# (self._j, np.sqrt(2.0) * delta * self.op)],
# [(self._j, -delta * (self.op @ self.op))],
]
def _diff_n(self):
if self.corr.exp_coeff.ndim == 1:
gamma = np.diag(self.corr.exp_coeff)
ans = []
for i, j in product(range(self.k_max), repeat=2):
g = gamma[i, j]
if not np.allclose(g, 0.0):
term = [(i, -g * self._numberer(i))]
if i != j:
n_i = self._sqrt_numberer(i)
n_j = self._sqrt_numberer(j)
raiser = self._raiser(i)
lower = self._lower(j)
term.extend([(i, raiser @ n_i), (j, n_j @ lower)])
ans.append(term)
return ans
def _diff_k(self, k):
c_k = self.corr.symm_coeff[k] + 1.0j * self.corr.asymm_coeff[k]
numberer = self._numberer(k)
raiser = self._raiser(k)
lower = self._lower(k)
return [
[(self._i, -1.0j / self.hbar * np.transpose(self.op)), (k, lower)],
[(self._j, 1.0j / self.hbar * self.op), (k, lower)],
[(self._i, -1.0j / self.hbar * c_k * np.transpose(self.op)), (k, raiser @ numberer)],
[(self._j, 1.0j / self.hbar * np.conj(c_k) * self.op), (k, raiser @ numberer)],
]
def diff(self):
"""Get the derivative of rho_n at time t.
Acting on 0-th index.
"""
derivative = self._diff_ij() + self._diff_n()
for k in range(self.k_max):
derivative.extend(self._diff_k(k))
return derivative
if __name__ == '__main__':
from minitn.heom.noise import Drude
from minitn.lib.units import Quantity
# System
e = Quantity(6500, 'cm-1').value_in_au
v = Quantity(500, 'cm-1').value_in_au
# Bath
lambda_0 = Quantity(2000, 'cm-1').value_in_au # reorganization energy
omega_0 = Quantity(2000, 'cm-1').value_in_au # vibrational frequency
beta = Quantity(300, 'K').value_in_au # temperature
# Superparameters
max_terms = 5 # (terms used in the expansion of the correlation function)
max_tier = 10 # (number of possble values for each n_k in the extended rho)
h = np.array([[0, v], [v, e]])
op = np.array([[0, 0], [0, 1]])
corr = Drude(lambda_0, omega_0, max_terms, beta)
heom = Hierachy([max_tier] * max_terms, h, op, corr)
phi = [1 / np.sqrt(2), 1 / np.sqrt(2)]
phi /= np.linalg.norm(phi)
rho_0 = np.tensordot(phi, phi, axes=0)
init_rho = heom.gen_extended_rho(rho_0)
print(init_rho.shape)
for n, term in enumerate(heom.diff()):
print('- Term {}:'.format(n))
for label, array in term:
print('Label: {}, shape: {}'.format(label, array.shape))
|
[
"vinylogy9@gmail.com"
] |
vinylogy9@gmail.com
|
bd9b1111182878c62a19a1dc63358f23003f47c8
|
fd2a5a65913d6a45f2f192a50b8315eb155f89d5
|
/main.py
|
56ea827f3091c8d4c9db8986e3a8a6402932d515
|
[] |
no_license
|
zkerhcy/YoudaoTransPop
|
95435d87ddbc858862ab99fd533a2dc00343e43f
|
245645ef81cf65c3f68dff3878076d6258c30ee3
|
refs/heads/main
| 2022-06-07T05:14:21.961885
| 2021-09-14T07:16:47
| 2021-09-14T07:16:47
| 99,469,091
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,019
|
py
|
# coding=utf-8
LANG_CODES = {
"Chinese Simplified": "zh-CHS",
"Chinese Traditional": "zh-CHT",
"English": "EN",
"French": "fr",
"Japanese": "ja",
"Korean": "ko",
"Portuguese": "pt",
"Russian": "ru",
"Spanish": "es"
}
KEY_CODE = {
"APP_KEY": "${your_app_key}",
"SEC_KEY": "${your_app_secret_key}",
}
import os
import ydtrans
import json
try:
translator = ydtrans.Translator(app_key=KEY_CODE['APP_KEY'],
text=os.environ['POPCLIP_TEXT'],
sec_key=KEY_CODE['SEC_KEY'])
translation = translator.translate_text(text=os.environ['POPCLIP_TEXT'],
from_lang='auto',
to_lang='auto',
app_key=KEY_CODE['APP_KEY'])
# s = json.loads(json.dumps(translation,ensure_ascii=False))
print translation["translation"][0].encode('utf-8')
except Exception as e:
exit(1)
|
[
"zhao.chen@zoom.us"
] |
zhao.chen@zoom.us
|
21690331d4d4f5ba169d6a503f8a5ef4fd523d83
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_169/ch147_2020_04_21_21_09_20_774383.py
|
51268f75bd590ebc67f9d2cd20b09608dfd9e429
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 454
|
py
|
def mais_frequente(lista):
lista3=[]
for i in range(len(lista)):
lista.count(lista[i])
lista3.append(lista.count(lista[i]))
for i in lista:
if lista.count(i)<max(lista3):
lista.remove(i)
elif lista.count(i)==max(lista3):
lista.remove(i)
for e in lista:
if len(lista)>1:
lista.remove(e)
for e in range(len(lista)):
return lista[e]
|
[
"you@example.com"
] |
you@example.com
|
f0bd585ad377d92c9cc89d3950a3dbab7f3ad73b
|
698ea0e0201fd4b9057e1d4d4d69affa9f710828
|
/models.py
|
448a78a94f7354c3bddb57925a315f9b7323bc7a
|
[] |
no_license
|
fang0975/pygame-children-go-down-the-stairs
|
a2169e8100e034bad1646b11d176188d19dd3519
|
caba866df7a8b92b84eb7b0d5b10947ec320cb3e
|
refs/heads/master
| 2021-07-21T00:15:59.295722
| 2017-10-30T08:40:46
| 2017-10-30T08:40:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,985
|
py
|
import pygame
from pygame.locals import *
class Character(pygame.sprite.Sprite):
defualt = 0
left = 1
right = 2
rect_defualt = (8,0)
rect_left = (3,0)
rect_right = (0,1)
def __init__(self, target):
pygame.sprite.Sprite.__init__(self)
self.target_surface = target
self.image = None
self.master_image = None
self.rect = None
self.topleft = 0, 0
self.frame = 0
self.old_frame = -1
self.frame_width = 1
self.frame_height = 1
self.first_frame = 1
self.last_frame = 15
self.last_time = 0
def load(self):
self.master_image = pygame.image.load(
"images/player.png").convert_alpha()
self.frame_width = 32
self.frame_height = 32
self.rect = 0, 0, self.frame_width, self.frame_height
self.image_rect = self.master_image.get_rect()
def update(self, status):
frame_x = self.frame_width * Character.rect_defualt[0]
frame_y = 0
if(status == Character.defualt):
frame_y = 0
if(status == Character.left):
self.frame += 1
if self.frame > self.last_frame:
self.frame = self.first_frame
frame_x = self.frame_width * (Character.rect_left[0] - self.frame // 4)
frame_y = Character.rect_left[1]
self.image_rect.centerx -= 2
if(status == Character.right):
self.frame += 1
if self.frame > self.last_frame:
self.frame = self.first_frame
frame_x = self.frame_width * (Character.rect_right[0] + self.frame // 4)
frame_y = self.frame_height * Character.rect_right[1]
self.image_rect.centerx += 2
rect = (frame_x, frame_y, self.frame_width, self.frame_height)
self.image = self.master_image.subsurface(rect)
def draw(self, surface):
surface.blit(self.image, (self.image_rect[0], self.image_rect[1]))
class Floor(pygame.sprite.Sprite):
default = 0
def __init__(self, target):
pygame.sprite.Sprite.__init__(self)
self.target_surface = target
self.image = None
self.master_image = None
self.rect = None
self.topleft = 0, 0
self.frame = 0
self.old_frame = -1
self.frame_width = 1
self.frame_height = 1
self.first_frame = 0
self.last_frame = 0
self.columns = 1
self.last_time = 0
self.type = default
def load(self):
if(self.type== Floor.default):
self.master_image = pygame.image.load(
"images/normal.png").convert_alpha()
self.frame_width = 97
self.frame_height = 16
self.rect = 0, 0, self.frame_width, self.frame_height
rect = self.master_image.get_rect()
self.last_frame = (rect.width // width) * (rect.height // height) - 1
def update(self):
pass
|
[
"abc873693@gmail.com"
] |
abc873693@gmail.com
|
25bac3dd7faa4c6c36b8619c4ab34264bb01b969
|
4b49cdb855049ba6bdeca72b26708ad43bb27b8c
|
/discussion.py
|
178a2243b5910727951c81deb366a483eff381ff
|
[] |
no_license
|
rtsio/myumbc
|
fbf2565f10b59188088a8f076bae393e1889c1d0
|
17402656320ee6a764e025bbae56cd19f98ffb68
|
refs/heads/master
| 2020-04-05T23:34:03.566988
| 2014-07-25T05:11:53
| 2014-07-25T05:11:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,389
|
py
|
#!/usr/bin/python
import argparse
import time
import re
import sys
import base64
from ConfigParser import SafeConfigParser, RawConfigParser
from bs4 import BeautifulSoup
from myumbc import Scraper, Database
def progress(x, current, limit):
print "Discussion: " + str(x) + "\t" + str((float(current)/limit) * 100) + "%",
# Read config files
# If you get a missing section error, you must run this script from the directory
# with the config file, as python determines paths from the point of execution
config = SafeConfigParser()
config.read('config.txt')
myumbc_user = config.get('myumbc', 'username')
myumbc_pw = base64.b64decode(config.get('myumbc', 'password'))
database_host = config.get('database', 'host')
database_user = config.get('database', 'username')
database_pw = base64.b64decode(config.get('database', 'password'))
database_name = config.get('database', 'database')
scraper = Scraper()
db = Database(database_host, database_user, database_pw, database_name)
scraper.login(myumbc_user, myumbc_pw)
# Read in blacklists - ignored threads/comments
blacklist = open('discussion_blacklist.txt', 'a+')
blacklisted_threads = blacklist.read().splitlines()
blacklisted_comments = []
arguments = argparse.ArgumentParser()
arguments.add_argument('-start', action='store', type=int, required=True)
arguments.add_argument('-end', action='store', type=int, required=True)
arguments.add_argument('-date', action='store', required=True)
args = arguments.parse_args()
start = args.start
end = args.end + 1
total = end - start
date = args.date
for x in xrange(start, end):
page_exists = False
current_discussion = str(x)
if current_discussion not in blacklisted_threads:
if scraper.valid("discussions", current_discussion):
page_exists = True
else:
blacklist.write(current_discussion + '\n')
if (page_exists):
soup = BeautifulSoup(scraper.gethtml())
author_tag = soup.find(class_="discussion-post")
author_post_id = x + 1000000
author_name = author_tag.find(class_="user first").string
author_paws = int(author_tag.find(class_="count first last").string)
author_avatar = re.search(r'background-image: url\(\'(.*)\?', author_tag.find(class_=re.compile("avatar"))['style']).group(1)
author_inner_content = author_tag.find(class_="html-content").find(class_="html-content")
if not author_inner_content:
author_inner_content = author_tag.find(class_="button first last")
else:
for tag in author_inner_content.find_all('embed'):
embed_link = tag['src']
tag.replace_with("(Embed object pointing to " + embed_link + " removed from original post)")
for tag in author_inner_content.find_all('iframe'):
iframe_link = tag['src']
tag.replace_with("(Iframe object pointing to " + iframe_link + " removed from original post)")
for tag in author_inner_content.find_all('object'):
tag.replace_with("(Object tag removed from original post)")
for tag in author_inner_content.find_all('param'):
tag.replace_with("(Param tag removed from original post)")
content_title = u'<b>(This post is a discussion topic originally entitled ' + author_tag.find(class_="title").string + ')</b> <br>'
content = content_title + unicode(author_inner_content)
content = re.sub(r'<span.*?>|<\/span>', '', content)
content = re.sub(r'\sclass=".*?"', '', content)
if not db.post_exists(author_post_id):
db.process_post(author_post_id, x, author_name, author_paws, author_avatar, date, content, "d")
else:
db.update_post(author_post_id, x, author_name, author_paws, date, content, "d")
for tag in soup.find_all(class_=re.compile("comment-\d+")):
comment_id = tag['data-comment-id']
comment_name = tag.find(class_="poster").string
if (tag['class'][3] != 'mine'):
if tag['class'][3] != 'removed':
comment_paws = int(tag.find(class_="paw").find(class_="count").string)
comment_avatar = re.search(r'background-image: url\(\'(.*)\?', tag.find(class_="avatar small")['style']).group(1)
comment_inner_content = tag.find(class_="html-content")
if comment_inner_content:
for tag in comment_inner_content.find_all('embed'):
embed_link = tag['src']
tag.replace_with("(Embed object pointing to " + embed_link + " removed from original post)")
for tag in comment_inner_content.find_all('iframe'):
iframe_link = tag['src']
tag.replace_with("(Iframe object pointing to " + iframe_link + " removed from original post)")
for tag in comment_inner_content.find_all('object'):
tag.replace_with("(Object tag removed from original post)")
for tag in comment_inner_content.find_all('param'):
tag.replace_with("(Param tag removed from original post)")
comment_content = unicode(comment_inner_content)
comment_content = re.sub(r'<span.*?>|<\/span>', '', comment_content)
comment_content = re.sub(r'\sclass=".*?"', '', comment_content)
if not db.post_exists(comment_id):
db.process_post(comment_id, x, comment_name, comment_paws, comment_avatar, date, comment_content, "d")
else:
db.update_post(comment_id, x, comment_name, comment_paws, date, comment_content, "d")
else:
comment_avatar = re.search(r'background-image: url\(\'(.*)\?', tag.find(class_="avatar xxsmall")['style']).group(1)
db.process_removed(comment_id, x, comment_name, comment_avatar, date)
elif (tag['class'][3] == 'mine'):
if tag['class'][4] != 'removed':
comment_paws = int(tag.find(class_="paw").find(class_="count").string)
comment_avatar = re.search(r'background-image: url\(\'(.*)\?', tag.find(class_="avatar small")['style']).group(1)
comment_inner_content = tag.find(class_="html-content")
if comment_inner_content:
for tag in comment_inner_content.find_all('embed'):
embed_link = tag['src']
tag.replace_with("(Embed object pointing to " + embed_link + " removed from original post)")
for tag in comment_inner_content.find_all('iframe'):
iframe_link = tag['src']
tag.replace_with("(Iframe object pointing to " + iframe_link + " removed from original post)")
for tag in comment_inner_content.find_all('object'):
tag.replace_with("(Object tag removed from original post)")
for tag in comment_inner_content.find_all('param'):
tag.replace_with("(Param tag removed from original post)")
comment_content = unicode(comment_inner_content)
comment_content = re.sub(r'<span.*?>|<\/span>', '', comment_content)
comment_content = re.sub(r'\sclass=".*?"', '', comment_content)
if not db.post_exists(comment_id):
db.process_post(comment_id, x, comment_name, comment_paws, comment_avatar, date, comment_content, "d")
else:
db.update_post(comment_id, x, comment_name, comment_paws, date, comment_content, "d")
else:
comment_avatar = re.search(r'background-image: url\(\'(.*)\?', tag.find(class_="avatar xxsmall")['style']).group(1)
db.process_removed(comment_id, x, comment_name, comment_avatar, date)
print str(x)
#progress(x, (x - start + 1), total)
blacklist.close()
db.close()
|
[
"rostislav.tsiomenko@gmail.com"
] |
rostislav.tsiomenko@gmail.com
|
fc6fcfc0ce0a0ded8f9d94bdb048f19d554c8186
|
1916818ab9c19064381e857e8ba8996f387d9056
|
/toucan/alert_api/views.py
|
874cd1c1f080813044569999496890ee3b80d716
|
[
"MIT"
] |
permissive
|
toucan-project/TOUCAN
|
4906889abdb6427237c288f1ab57df819761c494
|
d562e1191b5ef10480be819ba8c584034c25259b
|
refs/heads/master
| 2021-09-26T22:43:49.663430
| 2021-02-26T14:56:18
| 2021-02-26T14:56:18
| 204,929,596
| 3
| 1
|
MIT
| 2021-09-23T23:26:23
| 2019-08-28T12:44:12
|
Python
|
UTF-8
|
Python
| false
| false
| 3,394
|
py
|
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.parsers import FileUploadParser
from rest_framework.permissions import IsAuthenticated
from rest_framework.exceptions import ValidationError, NotAuthenticated
from alert_api.models import MimiAlertItem, SampleItem, CanaryAlertItem
from alert_api.serializers import UploadedFileSerializer
from alert_api.serializers import CanaryAlertItemSerializer
from alert_api.serializers import JSONSerializer, GetCanaryAlertItemSerializer
class SysmonIncoming(APIView):
"""
An unauthenticated API for incoming Sysmon events related to Mimikatz
put: Create a MimiAlert and return 201 if a sample has not been uploaded,
200 if a sample exists.
"""
def put(self, request):
serializer = JSONSerializer(data=request.data)
if not serializer.is_valid():
raise ValidationError('Invalid JSON')
logmon = serializer.validated_data
rip = request.META.get('REMOTE_ADDR')
alert = MimiAlertItem.create_object(rip, dict(logmon))
if SampleItem.sample_exists(alert.md5):
status_code = status.HTTP_200_OK
else:
status_code = status.HTTP_201_CREATED
return Response(status=status_code)
class FileItem(APIView):
"""
An unauthenticated API endpoint for incoming samples.
put: Create a SampleItem from the incoming binary file.
"""
# hmmm, an unauthenticated file upload?
parser_classes = (FileUploadParser,)
def post(self, request, filename):
data = self._get_request_data(request.data)
serializer = UploadedFileSerializer(data=data)
if not serializer.is_valid():
raise ValidationError()
sample = serializer.validated_data.get('file')
SampleItem.save_sample(filename, sample)
return Response(status=status.HTTP_200_OK)
def _get_request_data(self, data):
return {
'file': data['file'],
'content_type': data['file'].content_type
}
class CanaryAlertItems(APIView):
"""
Authenticated view for querying triggered alerts.
get: Get triggered CanaryAlertItem(s)
delete: Delete a triggered CanaryAlertItem by id, only possible
with elevated privileges.
"""
permission_classes = (IsAuthenticated,)
def get(self, request, id=None):
if id:
serializer = GetCanaryAlertItemSerializer(data={'id': id})
serializer.is_valid()
id = serializer.validated_data.get('id')
items = CanaryAlertItem.objects.get(pk=id)
serialized = CanaryAlertItemSerializer(items)
else:
items = CanaryAlertItem.objects.all()
serialized = CanaryAlertItemSerializer(items, many=True)
return Response(serialized.data, status=status.HTTP_200_OK)
def delete(self, request, id):
if not request.user.is_superuser:
raise NotAuthenticated("Not allowed to delete this entry")
serialized = GetCanaryAlertItemSerializer(data={'id': id})
serialized.is_valid()
id = serialized.validated_data.get('id')
item = CanaryAlertItem.objects.get(pk=id)
item.delete()
return Response(status=status.HTTP_200_OK)
|
[
"github@evicted.ninja"
] |
github@evicted.ninja
|
0466b8e35fbd5bd89a68185439acb789f17f02db
|
9d4bde3947edc11ba87d06068581c6a91696e8e6
|
/chapter_07/q08.py
|
14b4fbd3c225f9a79e2d6aea63f9d2ecd1ad004d
|
[] |
no_license
|
zanariah8/Starting_Out_with_Python
|
e6d8c6cbd187043160c6408fc4ac5f47c35e7c57
|
92751fde462683c9d62934409fa469ccddc1d519
|
refs/heads/master
| 2020-04-23T20:38:13.712144
| 2019-02-18T20:24:15
| 2019-02-18T20:24:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,416
|
py
|
# name search
def main():
# open the girlnames.txt file
try:
girls_list = girls_name()
boys_list = boys_name()
search_name(girls_list, boys_list)
except IOError:
print("No such file or directory")
def girls_name():
# open the girlnames.txt file
infile = open("girlnames.txt", "r")
# read the contents of the file
names = infile.readlines()
# close the file
infile.close()
# strip the \n from the elements
index = 0
while index < len(names):
names[index] = names[index].rstrip("\n")
index += 1
return names
def boys_name():
# open the file
infile = open("boynames.txt", "r")
# read the contents of the file
names = infile.readlines()
# close the file
infile.close()
# strip the \n from the elements
index = 0
while index < len(names):
names[index] = names[index].rstrip("\n")
index += 1
return names
def search_name(girls_list, boys_list):
# ask the user to enter a name to search for
name = input("Enter a name: ")
# determine whether the name is in the lists
if name in girls_list or name in boys_list:
print(name, "is among the most popular names.")
else:
print(name, "is NOT found among the most popular names.")
# call the main function
main()
|
[
"noreply@github.com"
] |
zanariah8.noreply@github.com
|
8ad68457f32cadbf1ff7b1888a2c464f3da77c81
|
3b60e6f4bbc011003ac4929f01eb7409918deb79
|
/Analysis_v1/cleandatasetslist.py
|
ff835fab021624dc4e3a865231d163bd923cd01e
|
[] |
no_license
|
uzzielperez/Analyses
|
d1a64a4e8730325c94e2bc8461544837be8a179d
|
1d66fa94763d7847011ea551ee872936c4c401be
|
refs/heads/master
| 2023-02-09T04:54:01.854209
| 2020-09-07T14:57:54
| 2020-09-07T14:57:54
| 120,850,137
| 0
| 0
| null | 2020-06-17T16:48:16
| 2018-02-09T03:14:04
|
C++
|
UTF-8
|
Python
| false
| false
| 235
|
py
|
inputfile = 'datasetlist.txt'
f = open(inputfile)
lines = f.read().split('\n')
outfile = 'cleaneddatasetlist.txt'
out = open(outfile, "w+")
for line in lines:
if 'json_toRun2018Dv2_323775' in line:
print line
out.write(line)
|
[
"uzzie.perez@cern.ch"
] |
uzzie.perez@cern.ch
|
101d220deac1c24bedab54a72d490bf76fab5b1c
|
7dd8a4ddedd429d7522dd903c741f8ac508d0374
|
/pkg/devices/BaseDevice.py
|
a08d824d35836a2efd3e2cefef4ab2c8b6422ee5
|
[
"BSD-2-Clause"
] |
permissive
|
junchaohu/SensorActuatorManager
|
349b955616c7161ef49f383c91ff7bd4bbe82bd5
|
75c9594c3cabcdadc4e13acc0e7e9d5240e15f33
|
refs/heads/master
| 2021-05-26T20:49:18.898149
| 2013-07-17T06:15:49
| 2013-07-17T06:15:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,317
|
py
|
# Author: Mani Srivastava, NESL, UCLA
# Created on: May 22, 2013
#
# Copyright notice in LICENSE file
#
import threading, Queue
import time
import logging
from pkg.utils.debug import debug_mesg
from pkg.utils.misc import is_valid_host
class Device(threading.Thread):
def __init__(self, type, id, params):
threading.Thread.__init__(self)
self.type = type
self.id = id
self.params = params
self.outputqueues = []
self.description = "Device"
self.statistics = [0,0,0,0] # [attempts, success, attempts_last, success_last]
# take care of common parameters ... rest are device specific
if 'sample_interval' in self.params:
try:
self.sample_interval = float(self.params['sample_interval'])
except ValueError as e:
logging.error("sample interval for device "+self.type+":"+self.id+" is not numeric.")
if 'host' in self.params:
try:
x = self.params['host'].split(":")
assert(len(x)<3)
if (len(x)==2):
self.host = x[0]
self.port = int(x[1])
else:
self.host = x[0]
assert(is_valid_host(self.host))
except:
logging.error("malformed host specification for device "+self.type+":"+self.id)
exit(1)
if 'port' in self.params:
try:
self.port = int(self.params['port'])
except:
logging.error("malformed port specification for device "+self.type+":"+self.id)
exit(1)
if 'timeout' in self.params:
try:
self.timeout = int(self.params['timeout'])
except:
logging.error("malformed timeout specification for device "+self.type+":"+self.id)
exit(1)
if 'url' in self.params:
self.url = self.params['url']
if 'serial' in self.params:
self.serial = self.params['serial']
self.sensor_names_map = self.params.get('sensor_names_map',{})
def attach_queue(self, q):
self.outputqueues.append(q)
def get_device_type(self):
return self.type
def get_device_id(self):
return self.id
def get_device_channels(self):
pass
def get_sample(self):
pass
def run(self):
logging.debug("Running thread for device "+self.type+":"+self.id)
while True:
start_time = time.time()
s = self.get_sample()
if s:
for q in self.outputqueues:
q.put(s)
diff=time.time()-start_time
if hasattr(self, 'sample_interval'):
time.sleep(max(self.sample_interval-diff,0))
|
[
"mbs@ucla.edu"
] |
mbs@ucla.edu
|
02799b9dd3f69789fcc67cf2c747f058d776d4b7
|
b6a64e149b6e0a34884898ca520f9ef2de75e3e0
|
/main.py
|
ba9167f01f680eeada23f890d6ff3ccedb5fe115
|
[] |
no_license
|
neilfawkes/vk-tinder
|
111677e2a1fc69a1935354d61be746f718061eb5
|
1cfa5840238410758757e576bcf3ff1c8c5b94e6
|
refs/heads/master
| 2023-03-22T12:59:19.873260
| 2021-03-17T08:13:00
| 2021-03-17T08:13:00
| 279,085,366
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,420
|
py
|
import requests
import json
import time
from pprint import pprint
from urllib.parse import urlencode
from pymongo import MongoClient
def api_request(URL, params):
try:
repeat = True
while repeat:
response = requests.get(URL, params=params).json()
if 'error' in response and 'error_code' in response['error'] and response['error']['error_code'] == 6:
time.sleep(1)
else:
repeat = False
return response
except requests.exceptions.ReadTimeout:
n = 1
while n < 3:
print('\n Reconnecting to server. \n')
try:
return requests.get(URL, params=params).json()
except requests.exceptions.ReadTimeout:
print('\n Reconnecting to server. \n')
n+=1
else:
print('Failed, please check your Internet connection.')
def get_token():
app_id = 7412922
oauth_url = 'https://oauth.vk.com/authorize'
oauth_params = {
'client_id': app_id,
'display': 'page',
'scope': 'friends, groups, stats, offline',
'response_type': 'token',
'v': '5.52'
}
print('?'.join((oauth_url, urlencode(oauth_params))))
def welcome():
with open('welcome.txt') as welcome:
print(welcome.read())
def get_people(access_token, sex, age_from, age_to, city_id, country_id):
URL = 'https://api.vk.com/method/users.search'
params = {
'v': '5.89',
'access_token': access_token,
'sex': sex,
'age_from': age_from,
'age_to': age_to,
'status': 6,
'has_photo': 1,
'city': city_id,
'country': country_id,
'is_closed': False,
'can_access_closed': False
}
result = api_request(URL, params)
return result
def get_country_code():
user_country = input('Введите страну для поиска: ').capitalize()
with open('countries.json', 'r') as countries_file:
countries = json.load(countries_file)
if user_country not in countries.keys():
print('Страна введена неверно, попробуйте ещё раз.')
get_country_code()
else:
for country, code in countries.items():
if country == user_country:
country_code = code
return country_code
def get_country_id():
country_code = get_country_code()
URL = 'https://api.vk.com/method/database.getCountries'
params = {'v': '5.80', 'access_token': access_token, 'code': country_code}
result = api_request(URL, params)
return result['response']['items'][0]['id']
def get_city_id(country_id):
city = input('Введите желаемый город для поиска: ').capitalize()
URL = 'https://api.vk.com/method/database.getCities'
params = {'v': '5.80', 'access_token': access_token, 'country_id': country_id, 'q': city}
result = api_request(URL, params)
if result['response']['count'] == 0:
print('Город введен неверно, попробуйте ещё раз.')
get_city_id(country_id)
else:
return result['response']['items'][0]['id']
def find_photos(owner_id):
URL = 'https://api.vk.com/method/photos.get'
params = {'v': '5.80', 'access_token': access_token, 'owner_id': owner_id, 'album_id': 'profile', 'extended': 1, 'count': 1000}
result = api_request(URL, params)
photos = {}
try:
for items in result['response']['items']:
for size in items['sizes']:
if size['type'] == 'x':
photos[size['url']] = items['likes']['count']
except KeyError:
if result['error']['error_code'] == 15:
print('Не удается загрузить фото, приватный профиль.')
else:
print(result)
return sorted(photos.items(), key=lambda kv: kv[1], reverse=True)[0:3]
def write_json(ten_users):
people_list = []
for user in ten_users:
user_dict = {}
user_dict['photos'] = find_photos(user['id'])
user_dict['first name'] = user['first_name']
user_dict['second name'] = user['last_name']
user_dict['link'] = f"https://vk.com/id{user['id']}"
people_list.append(user_dict)
with open('people.json', 'w') as people_file:
json.dump(people_list, people_file, ensure_ascii=False, indent=4)
def write_result(people):
client = MongoClient()
vk_db = client['VK']
users = vk_db['users']
for each in people['response']['items']:
users.insert_one(each)
return list(users.find())
def get_ten_users(people_db, n1, n2):
ten_users = ckeck_is_empty(people_db, n1, n2)
if ten_users != None:
write_json(ten_users)
print('Результаты поиска записаны в json-файл.')
if input('Найти следующих 10 человек? (да/нет): ') == "да":
print('Поиск в процессе...')
n1 += 10
n2 += 10
get_ten_users(people_db, n1, n2)
def check_age():
age = input('Введите диапазон возраста в формате "18-35": ')
age_from = age[:2]
age_to = age[-2:]
try:
int(age_from) >= int(age_to)
return age_from, age_to
except ValueError:
print('Введите чила')
check_age()
except TypeError:
print('Укажите диапазон возраста от меньшего к большему')
check_age()
def check_sex():
sex = input('Введите пол (1 - жен., 2 - муж., 0 - любой): ')
possible_vars = [1, 2, 0]
try:
if int(sex) in possible_vars:
return sex
else:
print('Укажите индекс одного из доступных вариантов (1, 2 или 0)')
check_sex()
except ValueError:
print('Укажите индекс одного из доступных вариантов (1, 2 или 0)')
check_sex()
def clear_my_db():
client = MongoClient()
vk_db = client['VK']
users = vk_db['users']
vk_db.users.drop()
return list(users.find())
def ckeck_is_empty(people_db, n1, n2):
if not people_db[n1:n2]:
if input('По вашему запросу ничего не найдено, хотите изменить параметры поиска? ') == 'да':
main()
else:
return people_db[n1:n2]
def main():
country_id = get_country_id()
city_id = get_city_id(country_id)
sex = check_sex()
age_from, age_to = check_age()
print('Поиск в процессе...')
people = get_people(access_token, sex, age_from, age_to, city_id, country_id)
people_db = write_result(people)
n1, n2 = 0, 10
get_ten_users(people_db, n1, n2)
if __name__ == "__main__":
welcome()
access_token = input('Введите токен для ВК (если у Вас нет токена,\nнапечатайте "нет" и пройдите по ссылке): ')
if access_token == "нет":
get_token()
access_token = input('Введите полученный токен для ВК: ')
main()
# print(clear_my_db())
|
[
"13dropsofsun@gmail.com"
] |
13dropsofsun@gmail.com
|
8d948459ced273568dc01a0c3df913416818a7dd
|
48d3ca5ebb17e9ee137cf45e1d8010d8eff9c65f
|
/BOJ/python/4195(친구네트워크).py
|
7fc28a7fbead43c60e0e38bbcc142dfdf62f27ae
|
[] |
no_license
|
dooking/CodingTest
|
81d18e711a806ee14946b2aa006eda9c235f0b04
|
1467700f3461c5fe61bf1b4e668e0c9016d8c860
|
refs/heads/master
| 2023-02-23T15:59:15.274908
| 2023-02-10T06:35:12
| 2023-02-10T06:35:12
| 237,138,420
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
def find(x):
# path compression 기법
if parent[x] != x:
parent[x] = find(parent[x])
return parent[x]
def union(x, y):
x = find(x)
y = find(y)
if x != y:
parent[y] = x
number[x] += number[y]
test_case = int(input())
for _ in range(test_case):
parent = dict()
number = dict()
f = int(input())
for _ in range(f):
x, y = input().split(' ')
if x not in parent:
parent[x] = x
number[x] = 1
if y not in parent:
parent[y] = y
number[y] = 1
union(x, y)
print(number[find(x)])
|
[
"123456ghghgh@naver.com"
] |
123456ghghgh@naver.com
|
da2ad3b2f4ed741c04d6ceeb4c16732a5f9d261b
|
b175a3abfa14992d9b07d53adc12700ded3b1c02
|
/MachineLearning/OVO&OVR/case-ovr.py
|
c99b38ad4c882de397bff3a0f14fa038a2b4b5f0
|
[] |
no_license
|
BarryZM/Python-AI
|
d695a1569e5497da391b578e6638cc11479bfbaa
|
251dc4002f9d7e5dd789e62b813651f2006f6ab6
|
refs/heads/master
| 2023-03-17T15:33:35.530258
| 2020-04-01T10:32:47
| 2020-04-01T10:32:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 845
|
py
|
# -*- coding: utf-8 -*-
'''
Created by hushiwei on 2018/6/18
Desc : OVR案例代码
'''
import numpy as np
from sklearn import datasets
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
from sklearn.metrics import accuracy_score,precision_score
# 加载数据
iris = datasets.load_iris()
x, y = iris.data, iris.target
print('样本数量:%d, 特征数量:%d' % x.shape)
print("label分类个数: ", np.unique(y))
# ovr模型构建
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(x, y)
# 预测结果输出
print(clf.predict(x))
print('准确率:%.3f' % accuracy_score(y, clf.predict(x)))
print('准确率:%.3f' % precision_score(y, clf.predict(x)))
# 模型属性输出
k = 1
for item in clf.estimators_:
print('第%d个模型' % k)
print(item)
k += 1
print(clf.classes_)
|
[
"hsw.time@gmail.com"
] |
hsw.time@gmail.com
|
2c7d6be1687ee4887ff265a690c14e5190c8f27b
|
52acdb157a0b15d60b8e2c06fc02880fd8340ef6
|
/retrive.py
|
59c0729a3c1a15a19c64b299961cf4b10fa1a2c7
|
[] |
no_license
|
dawnblade97/licplatereg
|
7f50aa243378abbca8c32c66d9f60c1e9f0449e5
|
230ab4fcce5b5345a2d7842c283be9a4df79ec91
|
refs/heads/master
| 2020-07-11T10:33:20.952968
| 2019-08-27T10:53:41
| 2019-08-27T10:53:41
| 204,514,514
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
import json
#print( json.load(open("Indian_Number_plates.json","r")) )
#with open ('Indian_Number_plates.json') as plate:
# data=json.load(plate)
import urllib.request as ur
tweets = []
cnt=0
for line in open('Indian_Number_plates.json', 'r'):
data=json.loads(line)
#print(data['content'])
ur.urlretrieve(data['content'],"C:\\Users\\maste\\Desktop\\dataset\\img"+str(cnt)+".jpg")
cnt+=1
|
[
"49042994+dawnblade97@users.noreply.github.com"
] |
49042994+dawnblade97@users.noreply.github.com
|
594faf68337ee58b731d63469a8680468787772f
|
daee02c2c5d0d3848592cd9fe7dee2c1ba62415c
|
/hw5/test.py
|
615c4e16f4f2f9b83cf92d86fafa1a5dc45785ec
|
[] |
no_license
|
seanbbear/SocialNetwork_Assignment
|
7cc9b4a6824dfb09955d998747e0e33b3bdbff3a
|
88fb6e9cb79a2a64b1e246a24690d14aad190972
|
refs/heads/master
| 2022-11-25T18:19:20.960473
| 2020-07-10T02:06:08
| 2020-07-10T02:06:08
| 278,507,592
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,533
|
py
|
import numpy as np
from tqdm import tqdm
data = np.load("seg_data.npy",allow_pickle=True)
def get_wordlist(data):
print("---start getting wordlist---")
wordlist = []
for content in tqdm(data):
for word in content:
wordlist.append(word)
return list(set(wordlist))
def chi_square(word):
pos = 0
neg = 0
shitty = 0
unshitty = 0
pos_shitty = 0
pos_unshitty = 0
neg_shitty = 0
neg_unshitty = 0
for i in data:
if "韓國瑜" in i:
pos += 1
if word in i:
shitty += 1
pos_shitty += 1
else:
unshitty += 1
pos_unshitty += 1
else:
neg += 1
if word in i:
shitty += 1
neg_shitty += 1
else:
unshitty += 1
neg_unshitty += 1
ex = [pos * (shitty/(shitty+unshitty)), pos * (unshitty/(shitty+unshitty)), neg * (shitty/(shitty+unshitty)), neg * (unshitty/(shitty+unshitty))]
ob = [pos_shitty,pos_unshitty,neg_shitty,neg_unshitty]
score = 0
for i in range(3):
score += ((ex[i]-ob[i]) ** 2)/ex[i]
return word,score
if __name__ == "__main__":
chi_score = {}
wordlist = get_wordlist(data)
# print(wordlist[5])
for word in tqdm(wordlist):
chi_score[word] = chi_square(word)
# expect = expectation(word)
# chi_score[word] = chi_square(expect,observe)
np.save("chi_score_test.npy",chi_score)
|
[
"k7489759@gmail.com"
] |
k7489759@gmail.com
|
c789cf9d1eb18ea4437af34c26ae53afb498e638
|
5c94e4df517745a564250f5a066d1c05dcf02c24
|
/car/migrations/0001_initial.py
|
2968b3369b930e37c84741edb8a7dba42a558fe2
|
[] |
no_license
|
aysenurozbay/Airport_Transfer_with_Python_Django
|
e4cd4310f0121f5ecf6ca97f33c466de8ff6bc2a
|
e5a31d59ec6e968e996ee75542bd4891e53c466a
|
refs/heads/master
| 2023-06-05T20:43:19.860280
| 2021-07-02T12:06:51
| 2021-07-02T12:06:51
| 347,430,605
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,187
|
py
|
# Generated by Django 3.1.7 on 2021-03-14 12:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30)),
('keywords', models.CharField(max_length=255)),
('description', models.CharField(max_length=30)),
('status', models.CharField(choices=[('True', 'Evet'), ('False', 'Hayır')], max_length=10)),
('image', models.ImageField(blank=True, upload_to='images/')),
('slug', models.SlugField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='car.category')),
],
),
]
|
[
"aysenurozbay1004@gmail.com"
] |
aysenurozbay1004@gmail.com
|
1d2c120548969e949dfa2abe8beb7bdf89338787
|
b64e685dec5d95baad28c26b245df2bb0b45f8ed
|
/Data Inspection.py
|
9fee83b8728400884564e282e546da6b7406069b
|
[] |
no_license
|
pfescriva/Chicago-Trucks-Classification
|
9f9f1d95a223924ced43b9d371594d3d7935eb82
|
40c82a93a68a01c349eea312accaf932490a0314
|
refs/heads/main
| 2023-04-04T22:57:53.053084
| 2021-04-10T18:20:11
| 2021-04-10T18:20:11
| 356,643,275
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,683
|
py
|
import pandas as pd
import multiprocessing as mp
import sys
####### PROJECT ###########
# Load the data
import multiprocessing as mp
import pandas as pd
vehicles = pd.read_csv("C:/Users/Carmen/OneDrive/Archivos - Todo/1 - Master Statistics/Period 2/Traffic_Crashes_-_Vehicles.csv")
crashes = pd.read_csv("C:/Users/Carmen/OneDrive/Archivos - Todo/1 - Master Statistics/Period 2/Traffic_Crashes_-_Crashes.csv", sep=',')
# Just for confirming datasets' structure:
vehicles['VEHICLE_ID'].duplicated().any() # Primary Key
vehicles['CRASH_RECORD_ID'].duplicated().any() # You have
crashes['CRASH_RECORD_ID'].duplicated().any() # Primary Key
# Proceeding to the data join (We're stil targeting behicles, but now have more columns)
df = pd.merge(vehicles, crashes, how = 'left', on = 'CRASH_RECORD_ID')
# No longer needed
del vehicles
del crashes
# Inspection
df.shape
# First Cleaning
pd.set_option('display.max_rows', 140)
df.isnull().sum()
# Cleaning dataset
## I want to clear all the columns with no analytical utiliy or with a excessive ammount of empty values
del df['CRASH_RECORD_ID']
del df['CRASH_UNIT_ID']
del df['VEHICLE_ID']
max_number_of_nas = 100000
set1 = df.loc[:, (df.isnull().sum() <= max_number_of_nas)]
set2 = df.loc[:, (df.isnull().sum() <= max_number_of_nas)]
pd.set_option('display.max_rows', 140)
df.isnull().sum()
# Descriptives
from scipy.stats import pearsonr
pearsonr(data1, data2)
# ML
import sklearn as sk
from sklearn import tree
clf = tree.DecisionTreeClassifier()
clf.fit()
# decision tree: Explanation from the cscience professor
### Trail - Python - Test
import pandas as pd
vehicles = pd.read_csv("C:/Users/Carmen/OneDrive/Archivos - Todo/1 - Master Statistics/Period 2/Traffic_Crashes_-_Vehicles.csv")
crashes = pd.read_csv("C:/Users/Carmen/OneDrive/Archivos - Todo/1 - Master Statistics/Period 2/Traffic_Crashes_-_Crashes.csv", sep=',')
df = pd.merge(vehicles, crashes, how='left', on ='CRASH_RECORD_ID')
pd.set_option('display.max_rows', 140)
df.isnull().sum()
data = df.head(2000)
View(data)
dat = data[['CRASH_HOUR', 'INJURIES_UNKNOWN', 'DAMAGE']]
dat.dropna(subset=['CRASH_HOUR', 'INJURIES_UNKNOWN', 'DAMAGE'], inplace=True)
dat.isnull().sum() # Awesome
x = dat[['CRASH_HOUR', 'INJURIES_UNKNOWN']]
y = dat[['DAMAGE']]
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline,make_pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectKBest
from sklearn import model_selection
from sklearn.model_selection import GridSearchCV
import warnings
warnings.filterwarnings('ignore')
clf = RandomForestClassifier(random_state = 10, max_features='sqrt')
pipe = Pipeline([('classify', clf)])
param = {'classify__n_estimators':list(range(20, 30, 1)),
'classify__max_depth':list(range(3, 10, 1))}
grid = GridSearchCV(estimator = pipe, param_grid = param, scoring = 'accuracy', cv = 10)
grid.fit(x, y)
print(grid.best_params_)
print(grid.best_score_)
### Titanic data
train_df = pd.read_csv("C:/Users/Carmen/OneDrive/Archivos - Todo/1 - Master Statistics/Period 2/train.csv")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline,make_pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectKBest
from sklearn import model_selection
from sklearn.model_selection import GridSearchCV
import warnings
warnings.filterwarnings('ignore')
train_df["Age"].fillna(train_df.Age.mean(), inplace=True)
train_df["Embarked"].fillna("S", inplace=True)
train_df.isnull().sum()
x_train = train_df[['Pclass', 'Sex', 'Age', 'Fare', 'Embarked', 'Parch', 'SibSp']]
x_train = pd.get_dummies(x_train)
y_train = train_df[['Survived']]
clf = RandomForestClassifier(random_state = 10, max_features='sqrt')
pipe = Pipeline([('classify', clf)])
param = {'classify__n_estimators':list(range(20, 30, 1)),
'classify__max_depth':list(range(3, 10, 1))}
grid = GridSearchCV(estimator = pipe, param_grid = param, scoring = 'accuracy', cv = 10)
grid.fit(x_train, y_train)
print(grid.best_params_)
print(grid.best_score_)
# End
from sklearn import tree
clf = tree.DecisionTreeClassifier()
clf.fit()
type(x)
type(y['DAMAGE'])
#
## Trainning
- Tunning
"Skicit learn works with numpy by defual"
"In numpy, categorical variables need to be defines as integers or dummies(one.hot-encoding)"
"NAs are np.na"
"If you use NumPy you loss the names because you use nnumpy matrices"
""
## Data Partition
x_train = df[['LONGITUDE','MAKE']]
y_train = df['DAMAGE']
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline,make_pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectKBest
from sklearn import model_selection
from sklearn.model_selection import GridSearchCV
import warnings
warnings.filterwarnings('ignore')
clf = RandomForestClassifier(random_state = 10, max_features='sqrt', n_jobs = 5)
pipe = Pipeline([('classify', clf)])
param = {'classify__n_estimators':list(range(20, 30, 1)),
'classify__max_depth':list(range(3, 10, 1))}
grid = GridSearchCV(estimator = pipe, param_grid = param, scoring = 'accuracy', cv = 10)
grid.fit(x, y)
print(grid.best_params_)
print(grid.best_score_)
## Model Evaluation
# The idea is to identify the model with the highest accuracy. I a variable is an execellent predictor,
# Even though, it has 80% empty values, we migth considered keeing it in!
# non useful variables:
ID
|
[
"71879463+pfescriva@users.noreply.github.com"
] |
71879463+pfescriva@users.noreply.github.com
|
4bd493f273e1f83f2fe011829ccf0632072d72b6
|
5621a2215dea381e661ef141ebc09d4e1815f242
|
/oauth_token_manager_test.py
|
ae5a667cebae24c5cb8ed11d6ee809944b8c0051
|
[
"Apache-2.0"
] |
permissive
|
martincochran/score-minion
|
7ec66b7bb7b8402e5c60c402936ceb21023d2ea6
|
58197798a0a3a4fbcd54ffa0a2fab2e865985bfd
|
refs/heads/master
| 2020-04-12T06:27:02.218104
| 2017-04-09T20:44:21
| 2017-04-09T20:44:21
| 27,320,444
| 0
| 0
| null | 2014-11-30T01:32:21
| 2014-11-30T01:12:00
| null |
UTF-8
|
Python
| false
| false
| 2,779
|
py
|
#!/usr/bin/env python
#
# Copyright 2014 Martin Cochran
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import test_env_setup
from google.appengine.ext import testbed
import oauth_token_manager
class OauthTokenManagerTest(unittest.TestCase):
def setUp(self):
"""Stub out the datastore so we can test it."""
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_memcache_stub()
self.testbed.init_datastore_v3_stub()
def tearDown(self):
self.testbed.deactivate()
def testMockManager(self):
token_manager = oauth_token_manager.OauthTokenManager(is_mock=True)
self.assertEquals('', token_manager.GetSecret())
self.assertEquals('', token_manager.GetToken())
secret = 'my secret'
token = 'token for my secret'
token_manager.AddSecret(secret)
token_manager.AddToken(token)
self.assertEquals(secret, token_manager.GetSecret())
self.assertEquals(token, token_manager.GetToken())
secret = 'new secret'
token = 'token for new secret'
token_manager.AddSecret(secret)
token_manager.AddToken(token)
self.assertEquals(secret, token_manager.GetSecret())
self.assertEquals(token, token_manager.GetToken())
# Ensure we didn't actually touch the data store.
account_query = oauth_token_manager.ApiSecret.query(
ancestor=oauth_token_manager.api_secret_key()).order(
-oauth_token_manager.ApiSecret.date_added)
oauth_secrets = account_query.fetch(10)
self.assertEquals(0, len(oauth_secrets))
def testDatastoreBackedManager(self):
token_manager = oauth_token_manager.OauthTokenManager()
self.assertEquals('', token_manager.GetSecret())
self.assertEquals('', token_manager.GetToken())
secret = 'my secret'
token = 'token for my secret'
token_manager.AddSecret(secret)
token_manager.AddToken(token)
self.assertEquals(secret, token_manager.GetSecret())
self.assertEquals(token, token_manager.GetToken())
secret = 'new secret'
token = 'token for new secret'
token_manager.AddSecret(secret)
token_manager.AddToken(token)
self.assertEquals(secret, token_manager.GetSecret())
self.assertEquals(token, token_manager.GetToken())
if __name__ == '__main__':
unittest.main()
|
[
"martin.cochran@gmail.com"
] |
martin.cochran@gmail.com
|
d6499c4d42ad85ac400a5bbec98636b06be7ab7d
|
e9814e85c3c0ef112a99764a410555f47044e389
|
/views.py
|
7b4bd8496db19ebcf2ed11c4845ab396af3e04b5
|
[] |
no_license
|
hatsem78/flask_jwt
|
4ed7b14f356e029302d3a48aac94e9fe6eeda8eb
|
28b594412eccac71cc3823f187f9ef6d5458b6cf
|
refs/heads/master
| 2021-06-10T06:00:30.821269
| 2021-05-19T13:47:00
| 2021-05-19T13:47:00
| 180,415,067
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 125
|
py
|
from run import app
from flask import jsonify
@app.route('/')
def index():
return jsonify({'message': 'hello, world'})
|
[
"ohatsembiller@kiusys.com"
] |
ohatsembiller@kiusys.com
|
934e86330e972edcbc373ccb4c4487c22ccc2e5e
|
09aee268ce72d282f53fe94f42478e2b3b48127d
|
/multiappProject/multiappProject/wsgi.py
|
86941870258341acad5fe5fe9c73ac5a014b4bc7
|
[] |
no_license
|
keshava519/Django_Projects
|
c95d0f8c55d4cc946291be6fb058b7298aefe596
|
99584892b9d9ec6b6395a382c684b4d036d07874
|
refs/heads/main
| 2023-02-23T03:44:32.110742
| 2021-01-27T15:15:13
| 2021-01-27T15:15:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
"""
WSGI config for multiappProject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "multiappProject.settings")
application = get_wsgi_application()
|
[
"keshava.cadcam@gmail.com"
] |
keshava.cadcam@gmail.com
|
9cda8fc2c01c9820f390f8122800fa8f79f4e834
|
e04ee5f0d78547f07d3fa4b970582ab5242a39de
|
/Auto test.py
|
4a3633b5ebff6ffa652ab77cde03a87a1ca76da2
|
[] |
no_license
|
PrashantSwamiAcc/TestProject
|
f4786116b8c0293f95a09262712ec0cf70df73d4
|
2e419ec95d2ff1c0f609c2e8cf85e2b3c8be057d
|
refs/heads/main
| 2023-08-02T04:04:54.971074
| 2021-09-20T14:04:48
| 2021-09-20T14:04:48
| 407,182,029
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
import scores
from rps import rps
from compute import compute
from scores import tabulate
tests = int(input('How many tests?'))
count = 0
score = []
userplays = []
complays = []
while count <= tests:
inp = compute()
comp = compute()
wl, inp, comp = rps(inp, comp)
score.append(wl)
userplays.append(inp)
complays.append(comp)
count += 1
test = scores.tabulate(score, userplays, complays)
|
[
"noreply@github.com"
] |
PrashantSwamiAcc.noreply@github.com
|
870d46d45893fc0a9c7cc2b09129942a1f7bedd3
|
558f85dec459f2e67dc3ef24be551139cab63e04
|
/noughtpad_app/views.py
|
7bfa03dd6f4389f607e1831c9d432fbbddb4c51a
|
[
"MIT"
] |
permissive
|
codelixir/nought-pad
|
28d4a30c0e5bbbee8822fcb45032717dfca635e6
|
79987a87070ca5c9f48303ca701d00cf0bb27eac
|
refs/heads/main
| 2023-08-13T23:00:46.628958
| 2021-09-23T07:57:21
| 2021-09-23T07:57:21
| 374,750,298
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,882
|
py
|
from django.shortcuts import get_object_or_404, render
from django.urls import reverse_lazy, reverse
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
from django.http import HttpResponseRedirect, request
from django.contrib.auth.models import User
from .models import Category, Note, Profile
from .forms import AddNoteForm, EditNoteForm, EditProfileForm
def LikeView(request, pk):
note = get_object_or_404(Note, id=request.POST.get('note_id'))
user = request.user
if note.likes.filter(id=user.id).exists():
note.likes.remove(user)
else:
note.likes.add(user)
return HttpResponseRedirect(reverse('note-details', args=[str(pk)]))
def UserNotesView(request, pk):
author = get_object_or_404(User, id=pk)
author_notes = Note.objects.filter(author=author)
return render(request, 'user_notes.html', {'author': author, 'author_notes': author_notes})
class HomeView(ListView):
'Render the Homepage'
model = Note
template_name = 'home.html'
ordering = ['-timestamp']
class NoteView(DetailView):
'Render a complete view of a Note'
model = Note
template_name = 'note_details.html'
def get_context_data(self, *args, **kwargs):
note = get_object_or_404(Note, id=self.kwargs['pk'])
liked = True if note.likes.filter(
id=self.request.user.id).exists() else False
context_data = super(NoteView, self).get_context_data(*args, **kwargs)
context_data["categories"] = [cat for cat in Category.objects.all()]
context_data["total_likes"] = note.total_likes()
context_data["like_button"] = "btn-primary" if liked else "btn-outline-primary"
return context_data
class AddNoteView(CreateView):
'Add a new note'
model = Note
form_class = AddNoteForm
template_name = 'add_note.html'
class EditNoteView(UpdateView):
'Edit an existing note'
model = Note
form_class = EditNoteForm
template_name = 'edit_note.html'
class DeleteNoteView(DeleteView):
'Delete a note'
model = Note
template_name = 'delete_note.html'
success_url = reverse_lazy('home')
class EditProfileView(UpdateView):
'Edit the public profile'
model = Profile
template_name = 'edit_profile.html'
form_class = EditProfileForm
success_url = reverse_lazy('home')
def get_object(self):
user = self.request.user
if user.is_authenticated:
return user.profile
else:
return None
class CreateProfileView(CreateView):
'Edit the public profile when existing profile is blank'
model = Profile
template_name = 'edit_profile.html'
form_class = EditProfileForm
success_url = reverse_lazy('home')
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
|
[
"pahulpreet.singh@research.iiit.ac.in"
] |
pahulpreet.singh@research.iiit.ac.in
|
ae907a5ae9a5d33fb7689d03edee9680d427a226
|
4ba2d66fb4874a6e25260e1a530f8b27ac1fc77b
|
/main.py
|
5023f3a48d5656adaa27ac53e2d50c18b40c76b2
|
[] |
no_license
|
roscale/stochastic-process-project1-part2
|
37073bf4b4db6cc692b8f9258cf85fab085e99a6
|
3988e65a2c9eadc7ec239bde010e545e020b4a14
|
refs/heads/master
| 2021-05-21T02:27:05.376517
| 2020-05-08T19:40:00
| 2020-05-08T19:40:00
| 252,501,701
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,476
|
py
|
from sys import stderr
from experiment import Experiment, Proportion
from util import *
simulation = input("Simulation [wlin, wfull, wbig, p_transmission, p_interaction, p_vaccination, p_meds]: ")
if simulation == "wlin":
experiment = Experiment("Simulation: Wlin")
experiment.read_adjacency_matrix(make_lin(6))
for _ in range(10):
experiment.soft_reset()
experiment.set_probabilities(0.5, 0.2)
experiment.set_initially_infected(Proportion.VALUE, 1)
experiment.prepare_chain()
while not experiment.virus_is_gone():
experiment.step()
experiment.take_mean()
experiment.plot()
elif simulation == "wfull":
experiment = Experiment("Simulation: Wfull")
experiment.read_adjacency_matrix(make_full(6))
for _ in range(10):
experiment.soft_reset()
experiment.set_probabilities(0.5, 0.2)
experiment.set_initially_infected(Proportion.VALUE, 1)
experiment.prepare_chain()
while not experiment.virus_is_gone():
experiment.step()
experiment.take_mean()
experiment.plot()
elif simulation == "wbig":
experiment = Experiment("Simulation: Wbig")
experiment.read_adjacency_matrix_from_file("Wbig_sparse.txt")
for _ in range(50):
experiment.soft_reset()
experiment.set_probabilities(0.5, 0.2)
experiment.set_initially_infected(Proportion.PERCENTAGE, 0.5)
experiment.prepare_chain()
while not experiment.virus_is_gone():
experiment.step()
experiment.take_mean()
experiment.plot()
elif simulation == "p_transmission":
experiment = Experiment("Simulation: Réduction de la probabilité de transmission")
experiment.read_adjacency_matrix_from_file("Wbig_sparse.txt")
experiment.set_number_of_beds(140, 0.17)
for _ in range(10):
experiment.soft_reset()
experiment.set_probabilities(0.25, 0.2)
experiment.set_initially_infected(Proportion.PERCENTAGE, 0.5)
experiment.prepare_chain()
while not experiment.virus_is_gone():
experiment.step()
experiment.take_mean()
experiment.plot()
print(f"Maximum hospitalised: {experiment.maximum_occupied_beds}/{experiment.beds}")
elif simulation == "p_interaction":
experiment = Experiment("Simulation: Réduction des interactions entre les individus")
experiment.read_adjacency_matrix_from_file("Wbig_sparse.txt")
experiment.set_number_of_beds(140, 0.17)
experiment.reduce_interactions(0.33)
for _ in range(10):
experiment.soft_reset()
experiment.set_probabilities(0.5, 0.2)
experiment.set_initially_infected(Proportion.PERCENTAGE, 0.5)
experiment.prepare_chain()
while not experiment.virus_is_gone():
experiment.step()
experiment.take_mean()
experiment.plot()
print(f"Maximum hospitalised: {experiment.maximum_occupied_beds}/{experiment.beds}")
elif simulation == "p_vaccination":
experiment = Experiment("Simulation: Vaccination d'un pourcentage fixe d'individus")
experiment.read_adjacency_matrix_from_file("Wbig_sparse.txt")
experiment.set_number_of_beds(140, 0.17)
for _ in range(10):
experiment.soft_reset()
experiment.set_probabilities(0.5, 0.2)
experiment.set_initially_infected(Proportion.PERCENTAGE, 0.5)
experiment.vaccinate_people(22.0)
experiment.prepare_chain()
while not experiment.virus_is_gone():
experiment.step()
experiment.take_mean()
experiment.plot()
print(f"Maximum hospitalised: {experiment.maximum_occupied_beds}/{experiment.beds}")
elif simulation == "p_meds":
experiment = Experiment("Simulation: Traitement avec médicaments pour les patients hospitalisés")
experiment.read_adjacency_matrix_from_file("Wbig_sparse.txt")
experiment.set_number_of_beds(140, 0.17)
for _ in range(10):
experiment.soft_reset()
experiment.set_probabilities(0.5, 0.2)
experiment.set_initially_infected(Proportion.PERCENTAGE, 0.5)
experiment.give_meds_to_patients(2.5)
experiment.prepare_chain()
while not experiment.virus_is_gone():
experiment.step()
experiment.take_mean()
experiment.plot()
print(f"Maximum hospitalised: {experiment.maximum_occupied_beds}/{experiment.beds}")
else:
print("Invalid simulation", file=stderr)
|
[
"roscaalex19@gmail.com"
] |
roscaalex19@gmail.com
|
0d68bfb91ed965310a9d735f502d4310075538a4
|
bb2e6e0f1c00dd5e9dd18a71619ea895d6fe07ba
|
/functional_tests.py
|
8f085c7f9e34957c125fdeabae89a53f1e219b19
|
[] |
no_license
|
nauman3128/tdd
|
88fd16c2238a9a8e22758a3cae6a375a0451d3de
|
3ef1a623461422b8346ad4b481dd3503b01b93a6
|
refs/heads/master
| 2016-09-15T19:06:52.502180
| 2015-07-28T12:38:50
| 2015-07-28T12:38:50
| 39,830,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,367
|
py
|
from selenium import webdriver
import unittest
class NewVisitorTest(unittest.TestCase):
def setUp(self): #
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(3)
def tearDown(self): #
self.browser.quit()
def test_can_start_a_list_and_retrieve_it_later(self):
# Edith has heard about a cool new online to-do app. She goes
# to check out its homepage
self.browser.get('http://localhost:8000')
# She notices the page title and header mention to-do lists
self.assertIn('To-Do', self.browser.title)
self.fail('Finish the test!')
# She is invited to enter a to-do item straight away
# She types "Buy peacock feathers" into a text box (Edith's hobby
# is tying fly-fishing lures)
# When she hits enter, the page updates, and now the page lists
# "1: Buy peacock feathers" as an item in a to-do list
# There is still a text box inviting her to add another item. She
# enters "Use peacock feathers to make a fly" (Edith is very methodical)
# The page updates again, and now shows both items on her list
# Edith wonders whether the site will remember her list. Then she sees
# that the site has generated a unique URL for her -- there is some
# explanatory text to that effect.
# She visits that URL - her to-do list is still there.
# Satisfied, she goes back to sleep
if __name__ == '__main__': #
unittest.main()
|
[
"nauman.qc@gmail.com"
] |
nauman.qc@gmail.com
|
722b478c6cbe6b6ada0f907f795d36f9eee81280
|
08f4533b76317c304cbf6d3bc30df5f760235a23
|
/week1/day6/CakeThief.py
|
9956bfee383049ca61719c9fc39f4ecf22dd82ef
|
[] |
no_license
|
surajkumar19/Competitive-Programming
|
49861ee250e4b4918234c5592e6dc1c4690404b3
|
43babbc8a16d1d38ed9ef91b3b83a55f9ec8bfb3
|
refs/heads/master
| 2020-03-21T13:04:42.969008
| 2018-07-21T09:25:16
| 2018-07-21T09:25:16
| 138,586,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,267
|
py
|
# O(mn)
import unittest
import math
def max_duffel_bag_value(cake_tuples, weight_capacity):
maxValuesAtCapacities = [0]*(weight_capacity+1)
for currentCapacity in range(weight_capacity+1):
currentMaxValue = 0
for cakeType in cake_tuples:
if cakeType[0] == 0 and cakeType[1] != 0:
return math.inf
if (cakeType[0]<=currentCapacity):
maxValueUsingCake = cakeType[1]+maxValuesAtCapacities[currentCapacity-cakeType[0]]
currentMaxValue = max(maxValueUsingCake, currentMaxValue)
maxValuesAtCapacities[currentCapacity] = currentMaxValue;
# print(maxValuesAtCapacities)
return maxValuesAtCapacities[weight_capacity]
# Tests
class Test(unittest.TestCase):
def test_one_cake(self):
actual = max_duffel_bag_value([(2, 1)], 9)
expected = 4
self.assertEqual(actual, expected)
def test_two_cakes(self):
actual = max_duffel_bag_value([(4, 4), (5, 5)], 9)
expected = 9
self.assertEqual(actual, expected)
def test_only_take_less_valuable_cake(self):
actual = max_duffel_bag_value([(4, 4), (5, 5)], 12)
expected = 12
self.assertEqual(actual, expected)
def test_lots_of_cakes(self):
actual = max_duffel_bag_value([(2, 3), (3, 6), (5, 1), (6, 1), (7, 1), (8, 1)], 7)
expected = 12
self.assertEqual(actual, expected)
def test_value_to_weight_ratio_is_not_optimal(self):
actual = max_duffel_bag_value([(51, 52), (50, 50)], 100)
expected = 100
self.assertEqual(actual, expected)
def test_zero_capacity(self):
actual = max_duffel_bag_value([(1, 2)], 0)
expected = 0
self.assertEqual(actual, expected)
def test_cake_with_zero_value_and_weight(self):
actual = max_duffel_bag_value([(0, 0), (2, 1)], 7)
expected = 3
self.assertEqual(actual, expected)
def test_cake_with_non_zero_value_and_zero_weight(self):
actual = max_duffel_bag_value([(0, 5)], 5)
expected = float('inf')
print(expected)
self.assertEqual(actual, expected)
unittest.main(verbosity=2)
|
[
"noreply@github.com"
] |
surajkumar19.noreply@github.com
|
cdfc510623cc71cb3b4da894aa9cd0bf95d30739
|
ecb088fd0f1929137e1b646d6d2b82de37028090
|
/_22_Exercise32.py
|
0ed733c27ae0bd17e27102abd6c535b615816aed
|
[] |
no_license
|
divanshudodeja/Learn-Python-The-Hard-Way
|
43b5cebed388c3107e5949518a2d6b66843a3f48
|
bddf63afee93aa28efdf3e5b667342e5bd2a9370
|
refs/heads/master
| 2020-07-01T19:39:13.423226
| 2019-08-31T15:08:17
| 2019-08-31T15:08:17
| 201,276,020
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
the_count = [1, 2, 3, 4, 5]
fruits = ['apples', 'oranges', 'pears', 'apricots']
change = [1, 'pennies', 2, 'dimes', 3, 'quarters']
for number in the_count:
print("This is count %d" % number)
for fruit in fruits:
print("A fruit of type: %s" % fruit)
for i in change:
print("I got %r" % i)
elements = []
for i in range(0, 6):
elements.append(i)
for i in range(0,6):
print("Element was : %d" % i)
|
[
"divanshu.dodeja@gmail.com"
] |
divanshu.dodeja@gmail.com
|
057c350c8121bac5c0707c382478c05830f0c53d
|
a9e335ac27d09bc17f3a18109ca372f110bf5621
|
/students/Y2334/Kotlyarova Sofya/prac_1.3/Prac/Prac/asgi.py
|
108db9e3b02d955b4dfa506e6b5ebd48990b564f
|
[] |
no_license
|
sofkot/ITMO_FSPO_PP_web_development_2020-2021
|
5f072a50e39d878dc7e7ba46cb50f5ccefd166c9
|
f5e6b204c22c58e19ab97a15a3f8f5c6efa4b370
|
refs/heads/main
| 2023-06-16T05:16:48.249937
| 2021-07-01T17:29:59
| 2021-07-01T17:29:59
| 345,800,240
| 0
| 0
| null | 2021-03-08T21:29:35
| 2021-03-08T21:29:34
| null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
"""
ASGI config for Prac project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Prac.settings')
application = get_asgi_application()
|
[
"sofiko.kotliarowa@gmail.com"
] |
sofiko.kotliarowa@gmail.com
|
d5bcb2225e95be7a585ac878101fee2ca086c0ef
|
2292c300925ea481643ed9ec24f14bc862d06463
|
/undertow/tunnel.py
|
57f1429d0c9d679abc091a00786d9fe15b0ff097
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
Morgan243/Undertow
|
68316bb7650a7484eae82a30a921fd2f1a460116
|
b5b242cb1eff60bdf0e94dd5c1ac54ed4c2a6b0b
|
refs/heads/master
| 2021-01-19T11:33:09.734781
| 2017-04-11T20:26:55
| 2017-04-11T20:26:55
| 76,740,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,126
|
py
|
from undertow.net_core.ssh_tunnel import tunnel_to
import argparse
if __name__ == """__main__""":
parser = argparse.ArgumentParser()
parser.add_argument('--user', dest='ssh_user',
type=str, default=None)
parser.add_argument('--host', dest='ssh_host',
type=str, default=None)
parser.add_argument('--port', dest='ssh_port',
type=int, default=22)
parser.add_argument('--remote-port', dest='remote_port',
type=int, default=1337)
#parser.add_argument('--proxy-user', dest='proxy_user',
# type=str, default=None)
#parser.add_argument('--proxy-path', dest='proxy_path_str',
# type=str, default=None)
#parser.add_argument('--list-known-machines', dest='list_machines',
# action='store_true',
# default=False)
args = parser.parse_args()
tunnel = tunnel_to(ssh_host=args.ssh_host, ssh_port=args.ssh_port,
user=args.ssh_user, remote_bind_port=args.remote_port)
print(tunnel)
|
[
"morgansstuart243@gmail.com"
] |
morgansstuart243@gmail.com
|
cb24bf738fec8bda1d40899a49d44e4de04b3312
|
a2cbba69fcc84de566e750f455d1b8d04f841552
|
/engine/nucleo.py
|
ded30edab020379844e45688f0bc41a601f95d6b
|
[] |
no_license
|
caferrari/engine2d-python
|
37627ea4da769bb1b28d02f1c7eb14d298bce65b
|
ee79b2909beb62aee815f7595572b01f69a44e33
|
refs/heads/master
| 2021-04-27T00:28:07.972427
| 2013-06-06T14:08:53
| 2013-06-06T14:08:53
| 10,527,829
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,539
|
py
|
import pygame, input, colisao
from pygame.locals import *
from threading import Thread
class Engine:
""" """
def __init__ (self, w, h, titulo):
pygame.init()
window = pygame.display.set_mode((w, h), HWSURFACE|DOUBLEBUF)
pygame.mouse.set_visible(False)
self.nome = titulo
self.dimensoes = [w, h]
self.tela = pygame.display.get_surface()
self.layers = [[],[],[],[],[],[]]
self.loop = 1
self.sprites = {}
self.clock = 1
self.input = input.Input(self)
self.clock = pygame.time.get_ticks()
self.lastUpdate = self.clock
self.conteiner = {}
self.grupocolisao = []
self.colisao = colisao.Colisao(self)
print "-- Engine Carregada"
def flip (self):
pygame.display.flip()
self.tela.fill([255,255,255])
self.clock = pygame.time.get_ticks()
def addGrupoColisao(self, t1, t2, tipo="qq"):
self.grupocolisao.append([t1, t2, tipo])
def updateAll(self):
#print self.layers[5][0].colidindo
self.input.eventos()
try:
tmp = 1000 / (self.clock - self.lastUpdate)
except:
tmp = 0
self.lastUpdate = self.clock
cont = 0
for layer in self.layers:
cont = cont + len(layer)
for ator in layer:
if ator.ativo:
ator.update()
pygame.display.set_caption( self.nome + " Atores: " + str(cont) + " FPS: " + str(tmp) )
def desenhaAll(self):
for layer in self.layers:
for ator in layer:
if ator.ativo:
ator.desenhar()
def criaEstado(self, arquivo, animado, fps=15, quadros=[1,1], colorkey=[255,0,255]):
import estado
return estado.Estado(self, arquivo, animado, fps, quadros, colorkey)
def criaAtor(self, nome, tipo, layer, posicao):
import ator
return ator.Ator(self, nome, tipo, layer, posicao)
def loadAtor(self, config):
print "Carregando ator: ", config.ator["nome"]
tmp = self.criaAtor(config.ator["nome"], config.ator["tipo"], config.ator["layer"], config.ator["posicao"])
for dados in config.ator["estados"]:
try:
animado = dados["animado"]
except:
animado = True
tmp.addEstado(dados["nome"], self.criaEstado(dados["arquivo"], animado, dados["updatesec"], dados["quadroswh"], dados["colorkey"]))
config.ator["init"](tmp)
tmp.colisao = config.ator["colisao"]
tmp.funcoes = config.ator["funcoes"]
def start(self):
print "-- Inicializando o loop Principal"
self.colisao.start()
while (self.loop):
self.updateAll()
self.desenhaAll()
self.flip()
self.colisao.stop()
print "-- Fim do jogo"
|
[
"caferrari@gmail.com"
] |
caferrari@gmail.com
|
6ccf069eeabedea4fc3fa15f19353da270bffd57
|
5dc1f6f7663230f73b381a5228bb00a5dfa53c1f
|
/Research Work/Training/Conversational_Emotion_Prediction.py
|
d1172b1ac91f457728bdeeedf02d49cbe7a5dc13
|
[] |
no_license
|
sadilchamishka/EmotionFYP
|
4b6108a6aaf7ef06d6ee2c6f13c78e007b0fb930
|
eab98a01f17e2af6190a9a71803c404e3dc410e9
|
refs/heads/master
| 2023-02-22T21:27:27.821660
| 2021-02-01T12:02:34
| 2021-02-01T12:02:34
| 333,349,476
| 4
| 1
| null | 2021-02-01T12:02:35
| 2021-01-27T08:14:13
|
Python
|
UTF-8
|
Python
| false
| false
| 8,241
|
py
|
import numpy as np
np.random.seed(1234)
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import torch.optim as optim
from datetime import datetime
import argparse
import time
import pickle
from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report, precision_recall_fscore_support
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
import pandas as pd
from ConversationModel import BiModel
class IEMOCAPDataset(Dataset):
def __init__(self, path, train=True):
self.videoAudio, self.videoLabels, self.videoSpeakers, self.trainVid, self.testVid = pickle.load(open('/content/drive/My Drive/EmotionRNN2/dataformodel.pkl', 'rb'), encoding='latin1')
'''
joy, trust, fear, surprise, sadness, anticipation, anger, and disgust.= basic 8 emotions
label index mapping = {'hap':0, 'sad':1, 'neu':2, 'ang':3, 'exc':4, 'fru':5} -= we have here
'''
self.keys = [x for x in (self.trainVid if train else self.testVid)]
self.len = len(self.keys)
def __getitem__(self, index):
vid = self.keys[index]
return torch.FloatTensor(self.videoAudio[vid]),\
torch.FloatTensor([[1,0] if x=='M' else [0,1] for x in\
self.videoSpeakers[vid]]),\
torch.FloatTensor([1]*len(self.videoLabels[vid])),\
torch.LongTensor(self.videoLabels[vid]),\
vid
def __len__(self):
return self.len
def collate_fn(self, data):
dat = pd.DataFrame(data)
return [pad_sequence(dat[i]) if i<2 else pad_sequence(dat[i], True) if i<4 else dat[i].tolist() for i in dat]
def get_train_valid_sampler(trainset, valid=0.1):
size = len(trainset)
idx = list(range(size))
split = int(valid*size)
return SubsetRandomSampler(idx[split:]), SubsetRandomSampler(idx[:split])
def get_IEMOCAP_loaders(path, batch_size=32, valid=0.1, num_workers=0, pin_memory=False):
trainset = IEMOCAPDataset(path=path)
train_sampler, valid_sampler = get_train_valid_sampler(trainset, valid)
train_loader = DataLoader(trainset,
batch_size=batch_size,
sampler=train_sampler,
collate_fn=trainset.collate_fn,
num_workers=num_workers,
pin_memory=pin_memory)
valid_loader = DataLoader(trainset,
batch_size=batch_size,
sampler=valid_sampler,
collate_fn=trainset.collate_fn,
num_workers=num_workers,
pin_memory=pin_memory)
testset = IEMOCAPDataset(path=path, train=False)
test_loader = DataLoader(testset,
batch_size=batch_size,
collate_fn=testset.collate_fn,
num_workers=num_workers,
pin_memory=pin_memory)
return train_loader, valid_loader, test_loader
def train_or_eval_model(model, loss_function, dataloader, epoch, optimizer=None, train=False):
losses = []
preds = []
labels = []
masks = []
alphas, alphas_f, alphas_b, vids = [], [], [], []
assert not train or optimizer!=None
if train:
model.train()
else:
model.eval()
for data in dataloader:
if train:
optimizer.zero_grad()
# import ipdb;ipdb.set_trace()
acouf, qmask, umask, label =\
[d.cuda() for d in data[:-1]] if cuda else data[:-1]
#log_prob = model(torch.cat((textf,acouf,visuf),dim=-1), qmask,umask) # seq_len, batch, n_classes
log_prob, alpha, alpha_f, alpha_b = model(acouf, qmask,umask) # seq_len, batch, n_classes
lp_ = log_prob.transpose(0,1).contiguous().view(-1,log_prob.size()[2]) # batch*seq_len, n_classes
labels_ = label.view(-1) # batch*seq_len
loss = loss_function(lp_, labels_, umask)
pred_ = torch.argmax(lp_,1) # batch*seq_len
preds.append(pred_.data.cpu().numpy())
labels.append(labels_.data.cpu().numpy())
masks.append(umask.view(-1).cpu().numpy())
losses.append(loss.item()*masks[-1].sum())
if train:
loss.backward()
optimizer.step()
else:
alphas += alpha
alphas_f += alpha_f
alphas_b += alpha_b
vids += data[-1]
if preds!=[]:
preds = np.concatenate(preds)
labels = np.concatenate(labels)
masks = np.concatenate(masks)
else:
return float('nan'), float('nan'), [], [], [], float('nan'),[]
avg_loss = round(np.sum(losses)/np.sum(masks),4)
avg_accuracy = round(accuracy_score(labels,preds,sample_weight=masks)*100,2)
avg_fscore = round(f1_score(labels,preds,sample_weight=masks,average='weighted')*100,2)
return avg_loss, avg_accuracy, labels, preds, masks,avg_fscore, [alphas, alphas_f, alphas_b, vids]
if __name__ == '__main__':
#batch_size = args.
batch_size = 2
n_classes = 6
#cuda = args.cuda
cuda = False
#n_epochs = args.epochs
n_epochs = 100
D_m = 2000
D_g = 300
D_p = 300
D_e = 200
D_h = 200
D_a = 200 # concat attention
model = BiModel(D_m, D_g, D_p, D_e, D_h,
n_classes=n_classes,
listener_state=False,
context_attention='general',
dropout_rec=0.1,
dropout=0.1)
if cuda:
model.cuda()
loss_weights = torch.FloatTensor([
1/0.086747,
1/0.144406,
1/0.227883,
1/0.160585,
1/0.127711,
1/0.252668,
])
loss_function = MaskedNLLLoss(loss_weights)
optimizer = optim.Adam(model.parameters(),
lr=0.0001,
weight_decay=0.00001)
train_loader, valid_loader, test_loader =\
get_IEMOCAP_loaders('/content/drive/My Drive/Emotion RNN/IEMOCAP_features_raw.pkl',
valid=0.1,
batch_size=batch_size,
num_workers=2)
best_test, best_label, best_pred, best_mask = None, None, None, None
best_uwa = None
for e in range(n_epochs):
start_time = time.time()
train_loss, train_acc, _,_,_,train_fscore,_= train_or_eval_model(model, loss_function,
train_loader, e, optimizer, True)
valid_loss, valid_acc, _,_,_,val_fscore,_= train_or_eval_model(model, loss_function, valid_loader, e)
test_loss, test_acc, test_label, test_pred, test_mask, test_fscore, attentions = train_or_eval_model(model, loss_function, test_loader, e)
dict1 = classification_report(test_label,test_pred,sample_weight=test_mask,digits=4,output_dict=True)
test_uwa = dict1['macro avg']['recall']
print(test_uwa)
if test_acc > 61 and test_uwa > 0.58:
print("***"+str(test_acc)+"***"+str(test_uwa)+"***")
print(classification_report(test_label,test_pred,sample_weight=test_mask,digits=4))
print(confusion_matrix(test_label,test_pred,sample_weight=test_mask))
torch.save(model.state_dict(), '/content/drive/My Drive/Emotion RNN/sadil/'+str(datetime.now())+"-"+str(test_acc)+"-"+str(test_uwa)+'rnn_model_loss_wiegts.pt')
print('epoch {} train_loss {} train_acc {} train_fscore{} valid_loss {} valid_acc {} val_fscore{} test_loss {} test_acc {} test_fscore {} time {}'.\
format(e+1, train_loss, train_acc, train_fscore, valid_loss, valid_acc, val_fscore,\
test_loss, test_acc, test_fscore, round(time.time()-start_time,2)))
|
[
"sandilchamishka@gmail.com"
] |
sandilchamishka@gmail.com
|
c1cb1396dd88ef6fbed0176a71aed933dc22faff
|
aa41762b5ffd4508edda81fc340d7781c9c24b93
|
/Serial Communiction/MicroMojo-Py/micromojo/controller.py
|
e23f95abb7c15d0478e4224094e303d7b783f9cb
|
[] |
no_license
|
EllenWho/MicroMojo-for-Alchitry-labs
|
918c87949059b35c311424da078ff9da93ff9fb4
|
b16eca47a6f916bf39ce7679cc46a0d877b1443e
|
refs/heads/master
| 2023-02-18T22:11:33.443601
| 2021-01-24T17:13:05
| 2021-01-24T17:13:05
| 277,958,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,301
|
py
|
from micromojo import signals
from micromojo import regint
# Modified by Ziyi Hu to fit Mojo, marked some parts to choose between Au and Cu.
# line 20, line 22, line 40, line 50 to line 52, line 165 to line 171.
class MicroFPGA:
def __init__(self, n_lasers, n_ttls, n_servos, n_pwms, n_ais):
self._serial = regint.RegisterInterface()
self.device = self._serial.get_device()
self._lasers = []
self._ttls = []
self._servos = []
self._pwms = []
self._ais = []
if self._serial.is_connected():
self.version = self._serial.read(signals.ADDR_VER)
# self.id = self._serial.read(signals.ADDR_ID)
if (self.version == signals.CURR_VER): #and (self.id == signals.ID_AU or self.id == signals.ID_CU):
# instantiates lasers
for i in range(n_lasers):
self._lasers.append(signals.LaserTrigger(i, self._serial))
# instantiates TTLs
for i in range(n_ttls):
self._ttls.append(signals.Ttl(i, self._serial))
# instantiates lasers
for i in range(n_servos):
self._servos.append(signals.Servo(i, self._serial))
# instantiates lasers
for i in range(n_pwms):
self._pwms.append(signals.Pwm(i, self._serial))
# instantiates lasers
# if self.id == signals.ID_AU:
for i in range(n_ais):
self._ais.append(signals.Analog(i, self._serial))
else:
self.disconnect()
if self.version != signals.CURR_VER:
raise Warning('Wrong version: expected '+str(signals.CURR_VER)+\
', got '+str(self.version)+'. The port has been disconnected')
# if self.id != signals.ID_AU and self.id != signals.ID_CU:
# raise Warning('Wrong board id: expected '+str(signals.ID_AU)+\
# ' (Au) or '+str(signals.ID_CU)+' (Cu), got '+str(self.id)+'. The port has been disconnected')
def disconnect(self):
self._serial.disconnect()
def is_connected(self):
return self.__serial.is_connected()
def get_number_lasers(self):
return len(self._lasers)
def get_number_ttls(self):
return len(self._ttls)
def get_number_servos(self):
return len(self._servos)
def get_number_pwms(self):
return len(self._pwms)
def get_number_analogs(self):
return len(self._ais)
def set_ttl_state(self, channel, value):
if channel >= 0 and channel < self.get_number_ttls():
return self._ttls[channel].set_state(value)
else:
return False
def get_ttl_state(self, channel):
if channel >= 0 and channel < self.get_number_ttls():
return self._ttls[channel].get_state()
else:
return -1
def set_servo_state(self, channel, value):
if channel >= 0 and channel < self.get_number_servos():
return self._servos[channel].set_state(value)
else:
return False
def get_servo_state(self, channel):
if channel >= 0 and channel < self.get_number_servos():
return self._servos[channel].get_state()
else:
return -1
def set_pwm_state(self, channel, value):
if channel >= 0 and channel < self.get_number_pwms():
return self._pwms[channel].set_state(value)
else:
return False
def get_pwm_state(self, channel):
if channel >= 0 and channel < self.get_number_pwms():
return self._pwms[channel].get_state()
else:
return -1
def get_analog_state(self, channel):
if channel >= 0 and channel < self.get_number_analogs():
return self._ais[channel].get_state()
else:
return -1
def set_mode_state(self, channel, value):
if channel >= 0 and channel < self.get_number_lasers():
return self._lasers[channel].set_mode(value)
else:
return False
def get_mode_state(self, channel):
if channel >= 0 and channel < self.get_number_lasers():
return self._lasers[channel].get_mode()
else:
return -1
def set_duration_state(self, channel, value):
if channel >= 0 and channel < self.get_number_lasers():
return self._lasers[channel].set_duration(value)
else:
return False
def get_duration_state(self, channel):
if channel >= 0 and channel < self.get_number_lasers():
return self._lasers[channel].get_duration()
else:
return -1
def set_sequence_state(self, channel, value):
if channel >= 0 and channel < self.get_number_lasers():
return self._lasers[channel].set_sequence(value)
else:
return False
def get_sequence_state(self, channel):
if channel >= 0 and channel < self.get_number_lasers():
return self._lasers[channel].get_sequence()
else:
return -1
def set_laser_state(self, channel, mode, duration, sequence):
if channel >= 0 and channel < self.get_number_lasers():
return self._lasers[channel].set_state(mode, duration, sequence)
else:
return False
def get_laser_state(self, channel):
if channel >= 0 and channel < self.get_number_lasers():
return self._lasers[channel].get_state()
else:
return [-1,-1,-1]
# def get_id(self):
# if self.id == signals.ID_AU:
# return 'Au'
# elif self.id == signals.ID_CU:
# return 'Cu'
# else:
# return 'Unknown'
|
[
"noreply@github.com"
] |
EllenWho.noreply@github.com
|
6971c88f5af8b4f2695f6858174153b5bf96ab88
|
34a5c407030485d38346c082006ab111ef71e55f
|
/dtools/datamirror.py
|
5ad363a90ac3b98744e058fc03688003f323a6f8
|
[
"Apache-2.0"
] |
permissive
|
maguelo/dtools
|
cbd22af60562fc6e52100dd2fba13a83f8390a65
|
21926c8bdddcff33ed27000869f31b819f221d50
|
refs/heads/master
| 2023-06-18T13:24:51.663298
| 2021-07-15T09:46:19
| 2021-07-15T09:46:19
| 350,441,129
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,160
|
py
|
from datetime import datetime
class DatasetMirror():
def __init__(self, target=None, ignore_columns = []):
self.ACTIONS_DICT = {"drop_columns":self.drop_columns,
"apply_fcn":self.apply_fcn}
self._actions = []
self.ignore_columns=ignore_columns
self._fcn={}
self.target = target
def set_ignore_columns(self, ignore_columns):
self.ignore_columns = set(list(self.ignore_columns) + ignore_columns)
def register_function(self, name, fcn):
if name in self._fcn:
raise ValueError("Duplicated function: {}".format(name))
self._fcn[name] = fcn
def _ignore_columns(self, columns):
columns = set(columns)
# Get new set with elements that are only in a but not in b
return list(columns.difference(self.ignore_columns))
def drop_columns(self,data, columns, is_training=True, ignore_column_enabled=True):
columns = self.__common_pre_actions("drop_columns",columns, is_training, ignore_column_enabled)
return data.drop(columns, axis=1)
def __common_pre_actions(self, fcn_name, columns, is_training, ignore_column_enabled):
if ignore_column_enabled:
columns = self._ignore_columns(columns)
if is_training:
self._actions.append((fcn_name, columns))
return columns
def list_transform(self):
for action in self._actions:
print ("{}:\n\tparams: {}\n".format(action[0],action[1:]))
def transform(self, data, is_training=False):
for action in self._actions:
data=self.ACTIONS_DICT[action[0]](data,*action[1:], is_training=is_training)
return data
def apply_fcn(self, fcn_name, data, columns=[], params={}, is_training=True, ignore_column_enabled=True):
if ignore_column_enabled:
columns = self._ignore_columns(columns)
if is_training:
self._actions.append(("apply_fcn",fcn_name, columns, params))
data=self._fcn[fcn_name](data, columns, **params)
return data
|
[
"maguelo@stark-2.local"
] |
maguelo@stark-2.local
|
310de0fac2044190438d822293e99c96469c7b6b
|
10e7464b60f4ad548218f3be5c52447e68358a14
|
/Research Simulations/smartgrid_coop_research/simulation_notebook/nrg_pubsub_ex.py
|
349b5a4955dae17d0fe937937ff38a6f4a358d7f
|
[] |
no_license
|
hlopez058/PHD
|
b5563eca3d6c399b4cf9b78117836a2822601058
|
49ccfab66f7141650b997f3b18cfca3871020306
|
refs/heads/master
| 2022-12-11T02:51:11.217469
| 2022-01-15T14:17:29
| 2022-01-15T14:17:29
| 118,447,682
| 3
| 2
| null | 2022-12-08T02:20:42
| 2018-01-22T11:21:42
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,913
|
py
|
import threading
import paho.mqtt.client as mqtt
import json
import pandas as pd
import time
mqttBroker = "localhost"
mqttPort = 1883 # port for mosquitto broker
client = mqtt.Client("client1") # create new instance
msg_dict = [] # create a list to store the messages
def main():
client.on_connect = on_connect # Define callback function for successful connection
client.on_message = on_message # Define callback function for receipt of a message
client.connect(mqttBroker, mqttPort, 60) # Connect to the broker
client.loop_forever() # Start networking daemon
def on_connect(client, userdata, flags, rc):
print("Connected with result code {0}".format(str(rc)))
client.subscribe("INFO")
def on_message(client, userdata, msg):
# convert msg to json
data = json.loads(msg.payload)
# check if data is in dict
if not any(msg['time'] == data['time'] and
msg['id'] == data['id'] for msg in msg_dict):
# store data in a dictionary
msg_dict.append(data)
def thread_process_msgs(id):
while True:
try:
df = pd.DataFrame.from_records(msg_dict)
tp = df[df['kW'] > 0].groupby(['time']).sum()
tc = df[df['kW'] < 0].groupby(['time']).sum()
# itterate through values of tp
for index, row in tp.iterrows():
# update df with tp at time index
df.loc[df['time'] == index, 'tp'] = row['kW']
for index, row in tc.iterrows():
# update df with tc at time index
df.loc[df['time'] == index, 'tc'] = row['kW']
time.sleep(2)
except Exception as e:
print(e)
if __name__ == '__main__':
threading.Thread(target=thread_process_msgs, args=(1,)).start()
main()
p2p_meter_msg = {
'time': 1637984408,
'qos': 0,
'id': 1,
'kW': 0.12,
}
|
[
""
] | |
2a0373b7b4a522bd639b6871e3dcae7b1300d29e
|
e98018ca78ebdda8827a1ac3801c91aaceade99f
|
/exercise_4.py
|
edaf418a4c0163f50ad778dd263fe6c7416330a5
|
[] |
no_license
|
oddsun/pragmatic
|
6b8f586ae2732dcea52031549650993409df7550
|
922f77119e68c9484ddf52ee02cde0b96676bf89
|
refs/heads/master
| 2023-06-07T04:56:21.240254
| 2021-06-25T02:22:45
| 2021-06-25T02:22:45
| 379,135,443
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,299
|
py
|
import turtle
from functools import partial
def parse_line(line) -> None:
"""
Parse a line of command
:param line: line of commands to parse, assumption: command and arg separated by a space
:return: None
"""
cmd_lst = line.split(" ")
arg = None
if len(cmd_lst) == 1:
cmd = cmd_lst[0]
elif len(cmd_lst) == 2:
cmd, arg = cmd_lst
else:
raise NotImplementedError('Command format not accepted! Only accepting "CMD" or "CMD ARG".')
try:
func = CMD_FUNC_MAP[cmd]
except KeyError:
raise KeyError('Unknown command: ' + cmd)
if arg is not None:
func(arg)
else:
func()
def pen(size=2):
turtle.pen(pensize=size)
def draw(distance, to_angle=0):
turtle.setheading(to_angle=to_angle)
turtle.forward(distance=int(distance))
CMD_FUNC_MAP = {
'P': pen,
'D': turtle.pendown,
'W': partial(draw, to_angle=180),
'N': partial(draw, to_angle=90),
'E': partial(draw, to_angle=0),
'S': partial(draw, to_angle=270),
'U': turtle.penup
}
def parse_file(fp):
with open(fp) as f:
content = f.read()
for line in content.split('\n'):
parse_line(line)
def main():
parse_file('ex4.txt')
if __name__ == '__main__':
main()
input()
|
[
"1161465+oddsun@users.noreply.github.com"
] |
1161465+oddsun@users.noreply.github.com
|
bee6112a5b8cd39d5f373d237af948f353fa884f
|
4133bd4de741900a599d2acede88c14589652454
|
/listas/media.py
|
e3a096bcc39049cdddc47f08c5b348e3b3d2c5b0
|
[] |
no_license
|
victorvhs/zumbi_curso
|
5659a8fbc352ec4174230cda0542957951fa4ba8
|
3afb363874cca2286615d3595c5f50efc865cde1
|
refs/heads/master
| 2021-06-03T02:36:47.847231
| 2021-05-21T23:28:19
| 2021-05-21T23:28:19
| 64,033,054
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
# -*- coding: utf-8 -*-
#Média de 5 numeros dentro de uma lista
notas = [10,10,10,10,10]
soma = 0
i = 0
while i < 5:
soma = soma + notas[i]
i+=1
media = soma / i
print( "Media %5.2f" %media)
|
[
"victor.h.s.reis@gmail.com"
] |
victor.h.s.reis@gmail.com
|
3c4dec3da540fb6d60c5ede62ebdc2f8d3f62931
|
ded10c2f2f5f91c44ec950237a59225e8486abd8
|
/.history/2/matrix_squaring_20200423124213.py
|
52623136a0f6bca492d83b1518d16106b00db3aa
|
[] |
no_license
|
jearistiz/Statistical-Physics-Projects
|
276a86407b32ded4e06b32efb2fadbd8eff8daed
|
d9c5b16a50856e148dc8604d92b6de3ea21fc552
|
refs/heads/master
| 2022-11-05T03:41:23.623050
| 2020-06-28T06:36:05
| 2020-06-28T06:36:05
| 254,909,897
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30,982
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import os
import numpy as np
import matplotlib.pyplot as plt
from time import time
import pandas as pd
# Author: Juan Esteban Aristizabal-Zuluaga
# date: 20200414
def rho_free(x,xp,beta):
"""Uso: devuelve elemento de matriz dsnsidad para el caso de una partícula libre en
un toro infinito.
"""
return (2.*np.pi*beta)**(-0.5) * np.exp(-(x-xp)**2 / (2 * beta))
def harmonic_potential(x):
"""Uso: Devuelve valor del potencial armónico para una posición x dada"""
return 0.5*x**2
def anharmonic_potential(x):
"""Devuelve valor de potencial anarmónico para una posición x dada"""
# return np.abs(x)*(1+np.cos(x)) #el resultado de este potencial es interesante
return 0.5*x**2 - x**3 + x**4
def QHO_canonical_ensemble(x,beta):
"""
Uso: calcula probabilidad teórica cuántica de encontrar al oscilador armónico
(inmerso en un baño térmico a temperatura inversa beta) en la posición x.
Recibe:
x: float -> posición
beta: float -> inverso de temperatura en unidades reducidas beta = 1/T.
Devuelve:
probabilidad teórica cuántica en posición x para temperatura inversa beta.
"""
return (np.tanh(beta/2.)/np.pi)**0.5 * np.exp(- x**2 * np.tanh(beta/2.))
def Z_QHO(beta):
"""Uso: devuelve valor de función de partición para el QHO unidimensional"""
return 0.5/np.sinh(beta/2)
def E_QHO_avg_theo(beta):
"""Uso: devuelve valor de energía interna para el QHO unidimensional"""
return 0.5/np.tanh(0.5*beta)
def rho_trotter(x_max=5., nx=101, beta=1, potential=harmonic_potential):
"""
Uso: devuelve matriz densidad en aproximación de Trotter para altas temperaturas
y bajo influencia del potencial "potential".
Recibe:
x_max: float -> los valores de x estarán en el intervalo (-x_max,x_max).
nx: int -> número de valores de x considerados (igualmente espaciados).
beta: float -> inverso de temperatura en unidades reducidas.
potential: func -> potencial de interacción. Debe ser solo función de x.
Devuelve:
rho: numpy array, shape=(nx,nx) -> matriz densidad en aproximación de Trotter para
altas temperaturas y potencial dado.
grid_x: numpy array, shape=(nx,) -> valores de x en los que está evaluada rho.
dx: float -> separación entre valores contiguos de grid_x
"""
nx = int(nx)
# Si nx es par lo cambiamos al impar más cercano para incluir al 0 en valores de x
if nx%2 == 0:
nx = nx + 1
# Valor de la discretización de posiciones según x_max y nx dados como input
dx = 2 * x_max/(nx-1)
# Lista de valores de x teniendo en cuenta discretización y x_max
grid_x = [i*dx for i in range(-int((nx-1)/2),int((nx-1)/2 + 1))]
# Construcción de matriz densidad dada por aproximación de Trotter
rho = np.array([[rho_free(x , xp, beta) * np.exp(-0.5*beta*(potential(x)+potential(xp)))
for x in grid_x]
for xp in grid_x])
return rho, grid_x, dx
def density_matrix_squaring(rho, grid_x, N_iter=1, beta_ini=1, print_steps=True):
"""
Uso: devuelve matriz densidad luego de aplicarle algoritmo matrix squaring N_iter veces.
En la primera iteración se usa matriz de densidad dada por el input rho (a
temperatura inversa beta_ini); en las siguientes iteraciones se usa matriz densidad
generada por la iteración inmediatamente anterior. El sistema asociado a la matriz
densidad obtenida (al final de aplicar el algoritmo) está a temperatura inversa
beta_fin = beta_ini * 2**(N_iter).
Recibe:
rho: numpy array, shape=(nx,nx) -> matriz densidad discretizada en valores dados
por x_grid.
grid_x: numpy array, shape=(nx,) -> valores de x en los que está evaluada rho.
N_iter: int -> número de iteraciones del algoritmo.
beta_ini: float -> valor de inverso de temperatura asociado a la
matriz densidad rho dada como input.
print_steps: bool -> decide si muestra valores de beta en cada
iteración.
Devuelve:
rho: numpy array, shape=(nx,nx) -> matriz densidad de estado rho a temperatura
inversa igual a beta_fin.
trace_rho: float -> traza de la matriz densidad a temperatura
inversa igual a beta_fin. Por la definición que
tomamos de rho, ésta es equivalente a la función
partición a dicha temperatura.
beta_fin: float -> temperatura inversa del sistema asociado a rho.
"""
# Valor de discretización de las posiciones
dx = grid_x[1] - grid_x[0]
# Cálculo del valor de beta_fin según valores beta_ini y N_iter dados como input
beta_fin = beta_ini * 2 ** N_iter
# Itera algoritmo matrix squaring
if print_steps:
print('\nbeta_ini = %.3f'%beta_ini,
'\n----------------------------------------------------------------')
for i in range(N_iter):
rho = dx * np.dot(rho,rho)
# Imprime información relevante
if print_steps:
print(u'Iteración %d) 2^%d * beta_ini --> 2^%d * beta_ini'%(i, i, i+1))
if print_steps:
print('----------------------------------------------------------------\n' +
u'beta_fin = %.3f'%beta_fin)
# Calcula traza de rho
trace_rho = np.trace(rho)*dx
return rho, trace_rho, beta_fin
def save_csv(data, data_headers=None, data_index=None, file_name=None,
relevant_info=None, print_data=True):
"""
Uso: data debe contener listas que serán las columnas de un archivo CSV que se guardará
con nombre file_name. relevant_info agrega comentarios en primeras líneas del
archivo.
Recibe:
data: array of arrays, shape=(nx,ny) -> cada columna es una columna del archivo.
data_headers: numpy array, shape=(ny,) -> nombres de las columnas
data_index: numpy array, shape=(nx,) -> nombres de las filas
file_name: str -> nombre del archivo en el que se guardarán datos.
relevant_info: list of str -> información que se agrega como comentario en
primeras líneas. Cada elemento de esta lista
se agrega como una nueva línea.
print_data: bool -> decide si imprime datos guardados, en pantalla.
Devuelve:
data_pdDF: pd.DataFrame -> archivo con datos formato "pandas data frame".
guarda archivo con datos e inforamación relevante en primera línea.
"""
data_pdDF = pd.DataFrame(data, columns=data_headers, index=data_index)
# Asigna nombre al archivo para que se guarde en el folder en el que está
# guardado el script que lo usa
script_dir = os.path.dirname(os.path.abspath(__file__))
if file_name==None:
#path completa para este script
file_name = script_dir + '/' + 'file_name.csv'
# Crea archivo CSV y agrega comentarios relevantes dados como input
if relevant_info is not None:
# Agregamos información relevante en primeras líneas
with open(file_name,mode='w') as file_csv:
for info in list(relevant_info):
file_csv.write('# '+info+'\n')
file_csv.close()
# Usamos pandas para escribir en archivo formato csv.
with open(file_name,mode='a') as file_csv:
data_pdDF.to_csv(file_csv)
file_csv.close()
else:
with open(file_name,mode='w') as file_csv:
data_pdDF.to_csv(file_csv)
file_csv.close()
# Imprime datos en pantalla.
if print_data==True:
print(data_pdDF)
return data_pdDF
def run_pi_x_sq_trotter(x_max=5., nx=201, N_iter=7, beta_fin=4, potential=harmonic_potential,
potential_string='harmonic_potential', print_steps=True,
save_data=True, csv_file_name=None, relevant_info=None,
plot=True, save_plot=True, show_plot=True, plot_file_name=None):
"""
Uso: corre algoritmo matrix squaring iterativamente (N_iter veces). En la primera
iteración se usa una matriz densidad en aproximación de Trotter a temperatura
inversa beta_ini = beta_fin * 2**(-N_iter) para potencial dado por potential;
en las siguientes iteraciones se usa matriz densidad generada por la iteración
inmediatamente anterior. Además ésta función guarda datos de pi(x;beta) vs. x
en archivo de texto y grafica pi(x;beta) comparándolo con teoría para el oscilador
armónico cuántico.
Recibe:
x_max: float -> los valores de x estarán en el intervalo (-x_max,x_max).
nx: int -> número de valores de x considerados.
N_iter: int -> número de iteraciones del algoritmo matrix squaring.
beta_ini: float -> valor de inverso de temperatura que queremos tener al final de
aplicar el algoritmo matrix squaring iterativamente.
potential: func -> potencial de interacción usado en aproximación de trotter. Debe
ser función de x.
potential_string: str -> nombre del potencial (con éste nombramos los archivos que
se generan).
print_steps: bool -> decide si imprime los pasos del algoritmo matrix squaring.
save_data: bool -> decide si guarda los datos en archivo .csv.
file_name: str -> nombre de archivo CSV en que se guardan datos. Si valor es None,
se guarda con nombre conveniente según parámetros relevantes.
plot: bool -> decide si grafica.
save_plot: bool -> decide si guarda la figura.
show_plot: bool -> decide si muestra la figura en pantalla.
Devuelve:
rho: numpy array, shape=(nx,nx) -> matriz densidad de estado rho a temperatura
inversa igual a beta_fin.
trace_rho: float -> traza de la matriz densidad a temperatura
inversa igual a beta_fin. Por la definición que
tomamos de "rho", ésta es equivalente a la
función partición en dicha temperatura.
grid_x: numpy array, shape=(nx,) -> valores de x en los que está evaluada rho.
"""
# Cálculo del valor de beta_ini según valores beta_fin y N_iter dados como input
beta_ini = beta_fin * 2**(-N_iter)
# Cálculo de rho con aproximación de Trotter
rho, grid_x, dx = rho_trotter(x_max, nx, beta_ini, potential)
grid_x = np.array(grid_x)
# Aproximación de rho con matrix squaring iterado N_iter veces.
rho, trace_rho, beta_fin_2 = density_matrix_squaring(rho, grid_x, N_iter,
beta_ini, print_steps)
print('---------------------------------------------------------'
+ '---------------------------------------------------------\n'
+ u'Matrix squaring: beta_ini = %.3f --> beta_fin = %.3f'%(beta_ini, beta_fin_2)
+ u' N_iter = %d Z(beta_fin) = Tr(rho(beta_fin)) = %.3E \n'%(N_iter,trace_rho)
+ '---------------------------------------------------------'
+ '---------------------------------------------------------'
)
# Normalización de rho a 1 y cálculo de densidades de probabilidad para valores en grid_x.
rho_normalized = np.copy(rho)/trace_rho
x_weights = np.diag(rho_normalized)
# Guarda datos en archivo CSV.
script_dir = os.path.dirname(os.path.abspath(__file__)) #path completa para este script
if save_data:
# Prepara datos a guardar y headers
pi_x_data = np.array([grid_x.copy(),x_weights.copy()])
pi_x_data_headers = ['position_x','prob_density']
# Nombre del archivo .csv en el que guardamos valores de pi(x;beta_fin).
if csv_file_name is None:
csv_file_name = (u'pi_x-ms-%s-beta_fin_%.3f-x_max_%.3f-nx_%d-N_iter_%d.csv'
%(potential_string,beta_fin,x_max,nx,N_iter))
csv_file_name = script_dir + '/' + csv_file_name
# Información relevante para agregar como comentario al archivo csv.
if relevant_info is None:
relevant_info = ['pi(x;beta_fin) computed using matrix squaring algorithm and'
+ ' Trotter approximation. Parameters:',
u'%s x_max = %.3f nx = %d '%(potential_string,x_max,nx)
+ u'N_iter = %d beta_ini = %.3f '%(N_iter,beta_ini,)
+ u'beta_fin = %.3f'%beta_fin]
# Guardamos valores de pi(x;beta_fin) en archivo csv.
pi_x_data = save_csv(pi_x_data.transpose(), pi_x_data_headers, None, csv_file_name,
relevant_info,print_data=0)
# Gráfica y comparación con teoría
if plot:
plt.figure(figsize=(8,5))
plt.plot(grid_x, x_weights,
label = 'Matrix squaring +\nfórmula de Trotter.\n$N=%d$ iteraciones\n$dx=%.3E$'
%(N_iter,dx))
plt.plot(grid_x, QHO_canonical_ensemble(grid_x,beta_fin), label=u'Valor teórico QHO')
plt.xlabel(u'x')
plt.ylabel(u'$\pi^{(Q)}(x;\\beta)$')
plt.legend(loc='best',title=u'$\\beta=%.2f$'%beta_fin)
plt.tight_layout()
if save_plot:
if plot_file_name is None:
plot_file_name = \
(u'pi_x-ms-plot-%s-beta_fin_%.3f-x_max_%.3f-nx_%d-N_iter_%d.eps'
%(potential_string,beta_fin,x_max,nx,N_iter))
plot_file_name = script_dir + '/' + plot_file_name
plt.savefig(plot_file_name)
if show_plot:
plt.show()
plt.close()
return rho, trace_rho, grid_x
def Z_several_values(temp_min=1./10, temp_max=1/2., N_temp=10, save_Z_csv=True,
Z_file_name = None, relevant_info_Z = None, print_Z_data = True,
x_max=7., nx=201, N_iter=7, potential = harmonic_potential,
potential_string = 'harmonic_potential', print_steps=False,
save_pi_x_data=False, pi_x_file_name=None, relevant_info_pi_x=None,
plot=False, save_plot=False, show_plot=False,
pi_x_plot_file_name=None):
"""
Uso: calcula varios valores para la función partición, Z, usando operador densidad
aproximado aproximado por el algoritmo matrix squaring.
Recibe:
temp_min: float -> Z se calcula para valores de beta en (1/temp_min,1/temp_max)
con N_temp valores igualmente espaciados.
temp_max: float.
N_temp: int.
save_Z_csv: bool -> decide si guarda valores calculados en archivo CSV.
Z_file_name: str -> nombre del archivo en el que se guardan datos de Z. Si valor
es None, se guarda con nombre conveniente según parámetros
relevantes.
relevant_info_Z: list -> infrmación relevante se añade a primeras líneas del archivo.
Cada str separada por una coma en la lista se añade como una
nueva línea.
print_Z_data: bool -> imprime datos de Z en pantalla.
*args: tuple -> argumentos de run_pi_x_sq_trotter
Devuelve:
Z_data: list, shape=(3,)
Z_data[0]: list, shape(N_temp,) -> contiene valores de beta en los que está evaluada Z.
Z_data[1]: list, shape(N_temp,) -> contiene valores de T en los que está evaluada Z.
Z_data[2]: list, shape(N_temp,) -> contiene valores de Z.
Z(beta) = Z(1/T) =
Z_data[0](Z_data[1]) = Z_data[0](Z_data[2])
"""
# Transforma valores de beta en valores de T y calcula lista de beta.
beta_max = 1./temp_min
beta_min = 1./temp_max
N_temp = int(N_temp)
beta_array = np.linspace(beta_max,beta_min,N_temp)
Z = []
# Calcula valores de Z para valores de beta especificados en beta_array.
for beta_fin in beta_array:
rho, trace_rho, grid_x = run_pi_x_sq_trotter(x_max, nx, N_iter, beta_fin, potential,
potential_string, print_steps,
save_pi_x_data, pi_x_file_name,
relevant_info_pi_x, plot, save_plot,
show_plot, pi_x_plot_file_name)
Z.append(trace_rho)
# Calcula el output de la función.
Z_data = np.array([beta_array.copy(), 1./beta_array.copy(), Z.copy()], dtype=float)
# Guarda datos de Z en archivo CSV.
if save_Z_csv == True:
script_dir = os.path.dirname(os.path.abspath(__file__))
if Z_file_name is None:
Z_file_name = ('Z-ms-%s-beta_max_%.3f-'%(potential_string,1./temp_min)
+ 'beta_min_%.3f-N_temp_%d-x_max_%.3f-'%(1./temp_max,N_temp,x_max)
+ 'nx_%d-N_iter_%d.csv'%(nx, N_iter))
Z_file_name = script_dir + '/' + Z_file_name
if relevant_info_Z is None:
relevant_info_Z = ['Partition function at several temperatures',
'%s beta_max = %.3f '%(potential_string,1./temp_min)
+ 'beta_min = %.3f N_temp = %d '%(1./temp_max,N_temp)
+ 'x_max = %.3f nx = %d N_iter = %d'%(x_max,nx, N_iter)]
Z_data_headers = ['beta', 'temperature', 'Z']
Z_data = save_csv(Z_data.transpose(), Z_data_headers, None, Z_file_name, relevant_info_Z,
print_data=False)
if print_Z_data == True:
print(Z_data)
return Z_data
def average_energy(read_Z_data=True, generate_Z_data=False, Z_file_name = None,
plot_energy=True, save_plot_E=True, show_plot_E=True,
E_plot_name=None,
temp_min=1./10, temp_max=1/2., N_temp=10, save_Z_csv=True,
relevant_info_Z=None, print_Z_data=True,
x_max=7., nx=201, N_iter=7, potential=harmonic_potential,
potential_string='harmonic_potential', print_steps=False,
save_pi_x_data=False, pi_x_file_name=None, relevant_info_pi_x=None,
plot_pi_x=False, save_plot_pi_x=False, show_plot_pi_x=False,
plot_pi_x_file_name=None):
"""
Uso: calcula energía promedio, E, del sistema en cuestión dado por potential.
Se puede decidir si se leen datos de función partición o se generan,
ya que E = - (d/d beta )log(Z).
Recibe:
read_Z_data: bool -> decide si se leen datos de Z de un archivo con nombre
Z_file_name.
generate_Z_data: bool -> decide si genera datos de Z.
Nota: read_Z_data y generate_Z_data son excluyentes. Se analiza primero primera opción
Z_file_name: str -> nombre del archivo en del que se leerá o en el que se
guardarán datos de Z. Si valor es None, se guarda con nombre
conveniente según parámetros relevantes.
plot_energy: bool -> decide si gráfica energía.
save_plot_E: bool -> decide si guarda gráfica de energía. Nótese que si
plot_energy=False, no se generará gráfica.
show_plot_E: bool -> decide si muestra gráfica de E en pantalla
E_plot_name: str -> nombre para guardar gráfico de E.
*args: tuple -> argumentos de Z_several_values
Devuelve:
E_avg: list -> valores de energía promedio para beta especificados por
beta__read
beta_read: list
"""
# Decide si lee o genera datos de Z.
if read_Z_data:
Z_file_read = pd.read_csv(Z_file_name, index_col=0, comment='#')
elif generate_Z_data:
t_0 = time()
Z_data = Z_several_values(temp_min, temp_max, N_temp, save_Z_csv, Z_file_name,
relevant_info_Z, print_Z_data, x_max, nx, N_iter, potential,
potential_string, print_steps, save_pi_x_data, pi_x_file_name,
relevant_info_pi_x, plot_pi_x,save_plot_pi_x, show_plot_pi_x,
plot_pi_x_file_name)
t_1 = time()
print('--------------------------------------------------------------------------\n'
+ '%d values of Z(beta) generated --> %.3f sec.'%(N_temp,t_1-t_0))
Z_file_read = Z_data
else:
print('Elegir si se generan o se leen los datos para la función partición, Z.\n'
+ 'Estas opciones son mutuamente exluyentes. Si se seleccionan las dos, el'
+ 'algoritmo escoge leer los datos.')
beta_read = Z_file_read['beta']
temp_read = Z_file_read['temperature']
Z_read = Z_file_read['Z']
# Calcula energía promedio.
E_avg = np.gradient(-np.log(Z_read),beta_read)
# Grafica.
if plot_energy:
plt.figure(figsize=(8,5))
plt.plot(temp_read,E_avg,label=u'$\langle E \\rangle$ via path integral\nnaive sampling')
plt.plot(temp_read,E_QHO_avg_theo(beta_read),label=u'$\langle E \\rangle$ teórico')
plt.legend(loc='best')
plt.xlabel(u'$T$')
plt.ylabel(u'$\langle E \\rangle$')
if save_plot_E:
script_dir = os.path.dirname(os.path.abspath(__file__))
if E_plot_name is None:
E_plot_name = ('E-ms-plot-%s-beta_max_%.3f-'%(potential_string,1./temp_min)
+ 'beta_min_%.3f-N_temp_%d-x_max_%.3f-'%(1./temp_max,N_temp,x_max)
+ 'nx_%d-N_iter_%d.eps'%(nx, N_iter))
E_plot_name = script_dir + '/' + E_plot_name
plt.savefig(E_plot_name)
if show_plot_E:
plt.show()
plt.close()
return E_avg, beta_read.to_numpy()
def calc_error(x,xp,dx):
"""
Uso: error acumulado en cálculo computacional de pi(x;beta) comparado
con valor teórico
"""
x, xp = np.array(x), np.array(xp)
N = len(x)
if N != len(xp):
raise Exception('x y xp deben ser del mismo tamaño.')
else:
return np.sum(np.abs(x-xp))*dx
def optimization(generate_opt_data=True, read_opt_data=False, beta_fin=4, x_max=5,
potential=harmonic_potential, potential_string='harmonic_potential',
nx_min=50, nx_max=1000, nx_sampling=50, N_iter_min=1, N_iter_max=20,
save_opt_data=False, opt_data_file_name=None, opt_relevant_info=None,
plot=True, show_plot=True, save_plot=True, opt_plot_file_name=None):
"""
Uso: calcula diferentes valores de error usando calc_error() para encontrar valores de
dx y beta_ini óptimos para correr el alcoritmo (óptimos = que minimicen error)
Recibe:
generate_opt_data: bool -> decide si genera datos para optimización.
read_opt_data: bool -> decide si lee datos para optimización.
Nota: generate_opt_data y read_opt_data son excluyentes. Se evalúa primero la primera.
nx_min: int
nx_max: int -> se relaciona con dx = 2*x_max/(nx-1).
nx_sampling: int -> se generan nx mediante range(nx_max,nx_min,-1*nx_sampling).
N_iter_min: int
N_iter_max: int -> se relaciona con beta_ini = beta_fin **(-N_iter). Se gereran
valores de N_iter con range(N_iter_max,N_iter_min-1,-1).
save_opt_data: bool -> decide si guarda datos de optimización en archivo CSV.
opt_data_file_name: str -> nombre de archivo para datos de optimización.
plot: bool -> decide si grafica optimización.
show_plot: bool -> decide si muestra optimización.
save_plot: bool -> decide si guarda optimización.
opt_plot_file_name: str -> nombre de gráfico de optimización. Si valor es None, se
guarda con nombre conveniente según parámetros relevantes.
Devuelve:
error: list, shape=(nb,ndx) -> valores de calc_error para diferentes valores de dx y
beta_ini. dx incrementa de izquierda a derecha en lista
y beta_ini incrementa de arriba a abajo.
dx_grid: list, shape=(ndx,) -> valores de dx para los que se calcula error.
beta-ini_grid: list, shape=(nb,) -> valores de beta_ini para los que calcula error.
"""
t_0 = time()
# Decide si genera o lee datos.
if generate_opt_data:
N_iter_min = int(N_iter_min)
N_iter_max = int(N_iter_max)
nx_min = int(nx_min)
nx_max = int(nx_max)
if nx_min%2==1:
nx_min -= 1
if nx_max%2==0:
nx_max += 1
# Crea valores de nx y N_iter (equivalente a generar valores de dx y beta_ini)
nx_values = range(nx_max,nx_min,-1*nx_sampling)
N_iter_values = range(N_iter_max,N_iter_min-1,-1)
dx_grid = [2*x_max/(nx-1) for nx in nx_values]
beta_ini_grid = [beta_fin * 2**(-N_iter) for N_iter in N_iter_values]
error = []
# Calcula error para cada valor de nx y N_iter especificado
# (equivalentemente dx y beta_ini).
for N_iter in N_iter_values:
row = []
for nx in nx_values:
rho,trace_rho,grid_x = run_pi_x_sq_trotter(x_max, nx, N_iter, beta_fin,
potential, potential_string,
False, False, None, None, False,
False, False, None)
grid_x = np.array(grid_x)
dx = grid_x[1]-grid_x[0]
rho_normalized = np.copy(rho)/trace_rho
pi_x = np.diag(rho_normalized)
theoretical_pi_x = QHO_canonical_ensemble(grid_x,beta_fin)
error_comp_theo = calc_error(pi_x,theoretical_pi_x,dx)
row.append(error_comp_theo)
error.append(row)
elif read_opt_data:
error = pd.read_csv(opt_data_file_name, index_col=0, comment='#')
dx_grid = error.columns.to_numpy()
beta_ini_grid = error.index.to_numpy()
error = error.to_numpy()
else:
raise Exception('Escoja si generar o leer datos en optimization(.)')
# Toma valores de error en cálculo de Z (nan e inf) y los remplaza por
# el valor de mayor error en el gráfico.
try:
error = np.where(np.isinf(error),0,error)
error = np.where(np.isnan(error),0,error)
nan_value = 1.3*np.max(error)
error = np.where(error==0, float('nan'), error)
except:
nan_value = 0
error = np.nan_to_num(error, nan=nan_value, posinf=nan_value, neginf=nan_value)
script_dir = os.path.dirname(os.path.abspath(__file__))
# Guarda datos (solo si fueron generados y se escoje guardar)
if generate_opt_data and save_opt_data:
if opt_data_file_name is None:
opt_data_file_name = ('pi_x-ms-opt-%s-beta_fin_%.3f'%(potential_string, beta_fin)
+ '-x_max_%.3f-nx_min_%d-nx_max_%d'%(x_max, nx_min, nx_max)
+ '-nx_sampling_%d-N_iter_min_%d'%(nx_sampling, N_iter_min)
+ '-N_iter_max_%d.csv'%(N_iter_max))
opt_data_file_name = script_dir + '/' + opt_data_file_name
if opt_relevant_info is None:
opt_relevant_info = ['Optimization of parameters dx and beta_ini of matrix squaring'
+ ' algorithm', '%s beta_fin = %.3f '%(potential_string, beta_fin)
+ 'x_max = %.3f nx_min = %d nx_max = %d '%(x_max, nx_min, nx_max)
+ 'nx_sampling = %d N_iter_min = %d '%(nx_sampling, N_iter_min)
+ 'N_iter_max = %d'%(N_iter_max)]
save_csv(error, dx_grid, beta_ini_grid, opt_data_file_name, opt_relevant_info)
t_1 = time()
# Grafica.
if plot:
fig, ax = plt.subplots(1, 1)
DX, BETA_INI = np.meshgrid(dx_grid, beta_ini_grid)
cp = plt.pcolormesh(DX,BETA_INI,error)
plt.colorbar(cp)
ax.set_ylabel(u'$\\beta_{ini}$')
ax.set_xlabel('$dx$')
plt.tight_layout()
if save_plot:
if opt_plot_file_name is None:
opt_plot_file_name = \
('pi_x-ms-opt-plot-%s-beta_fin_%.3f'%(potential_string, beta_fin)
+ '-x_max_%.3f-nx_min_%d-nx_max_%d'%(x_max, nx_min, nx_max)
+ '-nx_sampling_%d-N_iter_min_%d'%(nx_sampling, N_iter_min)
+ '-N_iter_max_%d.eps'%(N_iter_max))
opt_plot_file_name = script_dir + '/' + opt_plot_file_name
plt.savefig(opt_plot_file_name)
if show_plot:
plt.show()
plt.close()
comp_time = t_1 - t_0
return error, dx_grid, beta_ini_grid, comp_time
|
[
"jeaz.git@gmail.com"
] |
jeaz.git@gmail.com
|
c742613b5122688d9b558e72aaa565118b5bb6b8
|
8df8b18d1b1cffa683d49cbb7c6e601b2c58c582
|
/test_scripts/test_stutter.py
|
b78976a4689f30314e0064216f306e97f946b2f5
|
[] |
no_license
|
thatmoodyguy/vision2020
|
3dc4fca17d4458497b901e16ae40a39c5313e103
|
a12a54996e798455b566698f4559dd7296a76c3f
|
refs/heads/master
| 2020-12-19T04:12:43.327384
| 2020-02-29T05:18:16
| 2020-02-29T05:18:16
| 235,616,691
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 888
|
py
|
def destutter_coords(new_coords):
while len(last_coords) > 6:
last_coords.pop()
last_coords.insert(0, "{}:{}".format(new_coords[0], new_coords[1]))
print(last_coords)
sums = {}
for coords in last_coords:
if sums.get(coords) is None:
sums[coords] = 1
else:
sums[coords] = sums[coords] + 1
if sums[coords] >= 4:
spl = coords.split(":")
c = (int(spl[0]), int(spl[1]))
print("winner: {}".format(c))
print("winner: {}".format(new_coords))
return new_coords
last_coords = []
src = [
(10, 10),
(10, 11),
(10, 10),
(10, 10),
(11, 11),
(12, 12),
(13, 13),
(13, 13),
(18,12),
(13, 13),
(18,12),
(13, 13),
(13, 13),
(13, 13),
(18,12),
(13, 13),
(13, 13)
]
for c in src:
destutter_coords(c)
|
[
"john@mentalvelocity.com"
] |
john@mentalvelocity.com
|
7f1ab427afdff94d83f6e4759d9de6159fc61dd8
|
91e0a29332eaadf067807e438a4a595c93507729
|
/Object Oriented Programming/Day-1/Assgn-9.py
|
ea0a1f301af3d1cadd386b082616b15f9c537f6a
|
[] |
no_license
|
ankushsharma0904/Infytq-assignment-solution
|
873f49339e4876ce00485f5c8a429d903fad451a
|
b898a50f8c4d8eed2422ac04b2baa222284cb2b5
|
refs/heads/master
| 2022-11-30T01:03:00.373026
| 2020-08-07T12:01:32
| 2020-08-07T12:01:32
| 285,252,900
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,959
|
py
|
#OOPR-Assgn-9
#Implement Student class here
class Student:
def __init__(self):
self.__student_id = None
self.__marks = None
self.__age = None
self.__course_id = None
self.__fees = None
def set_student_id(self, student_id):
self.__student_id = student_id
def set_marks(self, marks):
self.__marks = marks
def set_age(self, age):
self.__age = age
def set_course_id(self, course_id):
self.__course_id = course_id
def set_fees(self, fees):
self.__fees = fees
def get_student_id(self): return self.__student_id
def get_marks(self): return self.__marks
def get_age(self): return self.__age
def get_course_id(self): return self.__course_id
def get_fees(self): return self.__fees
def validate_marks(self):
return self.get_marks() in range(0, 101)
def validate_age(self):
return self.get_age() > 20
def check_qualification(self):
if self.validate_age() and self.validate_marks():
return self.get_marks() >= 65
return False
def choose_course(self, course_id):
courses = { '1001': 25575.0, '1002': 15500.0 }
if str(course_id) in courses.keys():
self.set_course_id(course_id)
if self.get_marks() > 85:
fees = courses[str(course_id)] - courses[str(course_id)] * 0.25
else:
fees = courses[str(course_id)]
self.__fees = fees
return True
else:
return False
maddy=Student()
maddy.set_student_id(1004)
maddy.set_age(21)
maddy.set_marks(65)
if(maddy.check_qualification()):
print("Student has qualified")
if(maddy.choose_course(1002)):
print("Course allocated")
else:
print("Invalid course id")
else:
print("Student has not qualified")
|
[
"noreply@github.com"
] |
ankushsharma0904.noreply@github.com
|
4cc53c7f07f6e0d25fbff329f7bd73059cedeb84
|
f8d188821191dd6c69313763f71d370c3b1281b2
|
/Code/ExecuteDist.py
|
38a20205d9d00b1241c5a6f521636ed0274e0c94
|
[] |
no_license
|
ahhuang007/SionExecuteProject
|
ae732583b663d86a15120522479c0be2c03df3e5
|
857b6fa497c82ec8db44c8ea2f15d3a28c06a6c5
|
refs/heads/main
| 2023-02-12T17:54:05.177023
| 2021-01-19T07:27:23
| 2021-01-19T07:27:23
| 330,897,925
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 863
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 24 00:14:37 2018
@author: Andy
"""
import numpy as np
import pandas as pd
# 0 for-loops, I'm actually 64042039 IQ
def ExecuteDist(df):
siondf = df[df["champion"] == "Singed"]
siondf = siondf.drop(["matchid", "win", "championid", "rank", "champion", "Unnamed: 0"], 1)
siondf = siondf[siondf["killer_id"] == 0]
siondf = siondf.drop(["killer_id"], 1)
siondf["timestamp"] = siondf["timestamp"]/1000
sdf2 = siondf
#Getting numbers per bin - apparently these can't be used to plot, so simply for making sure I have the right plots
siondf["bin"] = np.ceil(siondf["timestamp"]/120)
newsdf = siondf.groupby(['bin']).agg({'bin':'count'})
newsdf["count"] = newsdf["bin"]
newsdf = newsdf.drop(["bin"], 1)
newsdf = newsdf.reset_index(drop = False)
return sdf2
|
[
"ahhuang007@gmail.com"
] |
ahhuang007@gmail.com
|
e7037b0c637e6e97570ddb28a181f279e8d4c597
|
38744aa4f3ba165a8c043ac51c87b849882ea129
|
/game/lib/python3.7/site-packages/pip-20.2b1-py3.7.egg/pip/_internal/commands/install.py
|
2eb2bef6c2ce6551c2a36965104ce40a6ae0f626
|
[] |
no_license
|
CleverParty/containers
|
5be3c82e38e65ccbaf703fe68f35992ad9941219
|
a0d45e62fda2cb7b047c7a930cf6437e71a31d80
|
refs/heads/master
| 2023-08-04T01:32:58.122067
| 2021-02-07T15:14:35
| 2021-02-07T15:14:35
| 266,421,416
| 2
| 0
| null | 2021-09-22T19:39:31
| 2020-05-23T21:20:17
|
Python
|
UTF-8
|
Python
| false
| false
| 26,024
|
py
|
# The following comment should be removed at some point in the future.
# It's included for now because without it InstallCommand.run() has a
# couple errors where we have to know req.name is str rather than
# Optional[str] for the InstallRequirement req.
# mypy: strict-optional=False
# mypy: disallow-untyped-defs=False
from __future__ import absolute_import
import errno
import logging
import operator
import os
import shutil
import site
from optparse import SUPPRESS_HELP
from pip._vendor import pkg_resources
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.cache import WheelCache
from pip._internal.cli import cmdoptions
from pip._internal.cli.cmdoptions import make_target_python
from pip._internal.cli.req_command import RequirementCommand, with_cleanup
from pip._internal.cli.status_codes import ERROR, SUCCESS
from pip._internal.exceptions import CommandError, InstallationError
from pip._internal.locations import distutils_scheme
from pip._internal.operations.check import check_install_conflicts
from pip._internal.req import install_given_reqs
from pip._internal.req.req_tracker import get_requirement_tracker
from pip._internal.utils.deprecation import deprecated
from pip._internal.utils.distutils_args import parse_distutils_args
from pip._internal.utils.filesystem import test_writable_dir
from pip._internal.utils.misc import (
ensure_dir,
get_installed_version,
protect_pip_from_modification_on_windows,
write_output,
)
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.virtualenv import virtualenv_no_global
from pip._internal.wheel_builder import build, should_build_for_install_command
if MYPY_CHECK_RUNNING:
from optparse import Values
from typing import Any, Iterable, List, Optional
from pip._internal.models.format_control import FormatControl
from pip._internal.req.req_install import InstallRequirement
from pip._internal.wheel_builder import BinaryAllowedPredicate
logger = logging.getLogger(__name__)
def get_check_binary_allowed(format_control):
# type: (FormatControl) -> BinaryAllowedPredicate
def check_binary_allowed(req):
# type: (InstallRequirement) -> bool
if req.use_pep517:
return True
canonical_name = canonicalize_name(req.name)
allowed_formats = format_control.get_allowed_formats(canonical_name)
return "binary" in allowed_formats
return check_binary_allowed
class InstallCommand(RequirementCommand):
"""
Install packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports installing from "requirements files", which provide
an easy way to specify a whole environment to be installed.
"""
usage = """
%prog [options] <requirement specifier> [package-index-options] ...
%prog [options] -r <requirements file> [package-index-options] ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
def __init__(self, *args, **kw):
super(InstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(cmdoptions.requirements())
cmd_opts.add_option(cmdoptions.constraints())
cmd_opts.add_option(cmdoptions.no_deps())
cmd_opts.add_option(cmdoptions.pre())
cmd_opts.add_option(cmdoptions.editable())
cmd_opts.add_option(
'-t', '--target',
dest='target_dir',
metavar='dir',
default=None,
help='Install packages into <dir>. '
'By default this will not replace existing files/folders in '
'<dir>. Use --upgrade to replace existing packages in <dir> '
'with new versions.'
)
cmdoptions.add_target_python_options(cmd_opts)
cmd_opts.add_option(
'--user',
dest='use_user_site',
action='store_true',
help="Install to the Python user install directory for your "
"platform. Typically ~/.local/, or %APPDATA%\\Python on "
"Windows. (See the Python documentation for site.USER_BASE "
"for full details.)")
cmd_opts.add_option(
'--no-user',
dest='use_user_site',
action='store_false',
help=SUPPRESS_HELP)
cmd_opts.add_option(
'--root',
dest='root_path',
metavar='dir',
default=None,
help="Install everything relative to this alternate root "
"directory.")
cmd_opts.add_option(
'--prefix',
dest='prefix_path',
metavar='dir',
default=None,
help="Installation prefix where lib, bin and other top-level "
"folders are placed")
cmd_opts.add_option(cmdoptions.build_dir())
cmd_opts.add_option(cmdoptions.src())
cmd_opts.add_option(
'-U', '--upgrade',
dest='upgrade',
action='store_true',
help='Upgrade all specified packages to the newest available '
'version. The handling of dependencies depends on the '
'upgrade-strategy used.'
)
cmd_opts.add_option(
'--upgrade-strategy',
dest='upgrade_strategy',
default='only-if-needed',
choices=['only-if-needed', 'eager'],
help='Determines how dependency upgrading should be handled '
'[default: %default]. '
'"eager" - dependencies are upgraded regardless of '
'whether the currently installed version satisfies the '
'requirements of the upgraded package(s). '
'"only-if-needed" - are upgraded only when they do not '
'satisfy the requirements of the upgraded package(s).'
)
cmd_opts.add_option(
'--force-reinstall',
dest='force_reinstall',
action='store_true',
help='Reinstall all packages even if they are already '
'up-to-date.')
cmd_opts.add_option(
'-I', '--ignore-installed',
dest='ignore_installed',
action='store_true',
help='Ignore the installed packages, overwriting them. '
'This can break your system if the existing package '
'is of a different version or was installed '
'with a different package manager!'
)
cmd_opts.add_option(cmdoptions.ignore_requires_python())
cmd_opts.add_option(cmdoptions.no_build_isolation())
cmd_opts.add_option(cmdoptions.use_pep517())
cmd_opts.add_option(cmdoptions.no_use_pep517())
cmd_opts.add_option(cmdoptions.install_options())
cmd_opts.add_option(cmdoptions.global_options())
cmd_opts.add_option(
"--compile",
action="store_true",
dest="compile",
default=True,
help="Compile Python source files to bytecode",
)
cmd_opts.add_option(
"--no-compile",
action="store_false",
dest="compile",
help="Do not compile Python source files to bytecode",
)
cmd_opts.add_option(
"--no-warn-script-location",
action="store_false",
dest="warn_script_location",
default=True,
help="Do not warn when installing scripts outside PATH",
)
cmd_opts.add_option(
"--no-warn-conflicts",
action="store_false",
dest="warn_about_conflicts",
default=True,
help="Do not warn about broken dependencies",
)
cmd_opts.add_option(cmdoptions.no_binary())
cmd_opts.add_option(cmdoptions.only_binary())
cmd_opts.add_option(cmdoptions.prefer_binary())
cmd_opts.add_option(cmdoptions.require_hashes())
cmd_opts.add_option(cmdoptions.progress_bar())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
@with_cleanup
def run(self, options, args):
# type: (Values, List[Any]) -> int
if options.use_user_site and options.target_dir is not None:
raise CommandError("Can not combine '--user' and '--target'")
cmdoptions.check_install_build_global(options)
upgrade_strategy = "to-satisfy-only"
if options.upgrade:
upgrade_strategy = options.upgrade_strategy
cmdoptions.check_dist_restriction(options, check_target=True)
install_options = options.install_options or []
options.use_user_site = decide_user_install(
options.use_user_site,
prefix_path=options.prefix_path,
target_dir=options.target_dir,
root_path=options.root_path,
isolated_mode=options.isolated_mode,
)
target_temp_dir = None # type: Optional[TempDirectory]
target_temp_dir_path = None # type: Optional[str]
if options.target_dir:
options.ignore_installed = True
options.target_dir = os.path.abspath(options.target_dir)
if (os.path.exists(options.target_dir) and not
os.path.isdir(options.target_dir)):
raise CommandError(
"Target path exists but is not a directory, will not "
"continue."
)
# Create a target directory for using with the target option
target_temp_dir = TempDirectory(kind="target")
target_temp_dir_path = target_temp_dir.path
global_options = options.global_options or []
session = self.get_default_session(options)
target_python = make_target_python(options)
finder = self._build_package_finder(
options=options,
session=session,
target_python=target_python,
ignore_requires_python=options.ignore_requires_python,
)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
req_tracker = self.enter_context(get_requirement_tracker())
directory = TempDirectory(
options.build_dir,
delete=build_delete,
kind="install",
globally_managed=True,
)
try:
reqs = self.get_requirements(args, options, finder, session)
warn_deprecated_install_options(
reqs, options.install_options
)
preparer = self.make_requirement_preparer(
temp_build_dir=directory,
options=options,
req_tracker=req_tracker,
session=session,
finder=finder,
use_user_site=options.use_user_site,
)
resolver = self.make_resolver(
preparer=preparer,
finder=finder,
options=options,
wheel_cache=wheel_cache,
use_user_site=options.use_user_site,
ignore_installed=options.ignore_installed,
ignore_requires_python=options.ignore_requires_python,
force_reinstall=options.force_reinstall,
upgrade_strategy=upgrade_strategy,
use_pep517=options.use_pep517,
)
self.trace_basic_info(finder)
requirement_set = resolver.resolve(
reqs, check_supported_wheels=not options.target_dir
)
try:
pip_req = requirement_set.get_requirement("pip")
except KeyError:
modifying_pip = None
else:
# If we're not replacing an already installed pip,
# we're not modifying it.
modifying_pip = pip_req.satisfied_by is None
protect_pip_from_modification_on_windows(
modifying_pip=modifying_pip
)
check_binary_allowed = get_check_binary_allowed(
finder.format_control
)
reqs_to_build = [
r for r in requirement_set.requirements.values()
if should_build_for_install_command(
r, check_binary_allowed
)
]
_, build_failures = build(
reqs_to_build,
wheel_cache=wheel_cache,
build_options=[],
global_options=[],
)
# If we're using PEP 517, we cannot do a direct install
# so we fail here.
# We don't care about failures building legacy
# requirements, as we'll fall through to a direct
# install for those.
pep517_build_failures = [
r for r in build_failures if r.use_pep517
]
if pep517_build_failures:
raise InstallationError(
"Could not build wheels for {} which use"
" PEP 517 and cannot be installed directly".format(
", ".join(r.name for r in pep517_build_failures)))
to_install = resolver.get_installation_order(
requirement_set
)
# Consistency Checking of the package set we're installing.
should_warn_about_conflicts = (
not options.ignore_dependencies and
options.warn_about_conflicts
)
if should_warn_about_conflicts:
self._warn_about_conflicts(to_install)
# Don't warn about script install locations if
# --target has been specified
warn_script_location = options.warn_script_location
if options.target_dir:
warn_script_location = False
installed = install_given_reqs(
to_install,
install_options,
global_options,
root=options.root_path,
home=target_temp_dir_path,
prefix=options.prefix_path,
pycompile=options.compile,
warn_script_location=warn_script_location,
use_user_site=options.use_user_site,
)
lib_locations = get_lib_location_guesses(
user=options.use_user_site,
home=target_temp_dir_path,
root=options.root_path,
prefix=options.prefix_path,
isolated=options.isolated_mode,
)
working_set = pkg_resources.WorkingSet(lib_locations)
installed.sort(key=operator.attrgetter('name'))
items = []
for result in installed:
item = result.name
try:
installed_version = get_installed_version(
result.name, working_set=working_set
)
if installed_version:
item += '-' + installed_version
except Exception:
pass
items.append(item)
installed_desc = ' '.join(items)
if installed_desc:
write_output(
'Successfully installed %s', installed_desc,
)
except EnvironmentError as error:
show_traceback = (self.verbosity >= 1)
message = create_env_error_message(
error, show_traceback, options.use_user_site,
)
logger.error(message, exc_info=show_traceback)
return ERROR
if options.target_dir:
self._handle_target_dir(
options.target_dir, target_temp_dir, options.upgrade
)
return SUCCESS
def _handle_target_dir(self, target_dir, target_temp_dir, upgrade):
ensure_dir(target_dir)
# Checking both purelib and platlib directories for installed
# packages to be moved to target directory
lib_dir_list = []
with target_temp_dir:
# Checking both purelib and platlib directories for installed
# packages to be moved to target directory
scheme = distutils_scheme('', home=target_temp_dir.path)
purelib_dir = scheme['purelib']
platlib_dir = scheme['platlib']
data_dir = scheme['data']
if os.path.exists(purelib_dir):
lib_dir_list.append(purelib_dir)
if os.path.exists(platlib_dir) and platlib_dir != purelib_dir:
lib_dir_list.append(platlib_dir)
if os.path.exists(data_dir):
lib_dir_list.append(data_dir)
for lib_dir in lib_dir_list:
for item in os.listdir(lib_dir):
if lib_dir == data_dir:
ddir = os.path.join(data_dir, item)
if any(s.startswith(ddir) for s in lib_dir_list[:-1]):
continue
target_item_dir = os.path.join(target_dir, item)
if os.path.exists(target_item_dir):
if not upgrade:
logger.warning(
'Target directory %s already exists. Specify '
'--upgrade to force replacement.',
target_item_dir
)
continue
if os.path.islink(target_item_dir):
logger.warning(
'Target directory %s already exists and is '
'a link. pip will not automatically replace '
'links, please remove if replacement is '
'desired.',
target_item_dir
)
continue
if os.path.isdir(target_item_dir):
shutil.rmtree(target_item_dir)
else:
os.remove(target_item_dir)
shutil.move(
os.path.join(lib_dir, item),
target_item_dir
)
def _warn_about_conflicts(self, to_install):
try:
package_set, _dep_info = check_install_conflicts(to_install)
except Exception:
logger.error("Error checking for conflicts.", exc_info=True)
return
missing, conflicting = _dep_info
# NOTE: There is some duplication here from pip check
for project_name in missing:
version = package_set[project_name][0]
for dependency in missing[project_name]:
logger.critical(
"%s %s requires %s, which is not installed.",
project_name, version, dependency[1],
)
for project_name in conflicting:
version = package_set[project_name][0]
for dep_name, dep_version, req in conflicting[project_name]:
logger.critical(
"%s %s has requirement %s, but you'll have %s %s which is "
"incompatible.",
project_name, version, req, dep_name, dep_version,
)
def get_lib_location_guesses(*args, **kwargs):
scheme = distutils_scheme('', *args, **kwargs)
return [scheme['purelib'], scheme['platlib']]
def site_packages_writable(**kwargs):
return all(
test_writable_dir(d) for d in set(get_lib_location_guesses(**kwargs))
)
def decide_user_install(
use_user_site, # type: Optional[bool]
prefix_path=None, # type: Optional[str]
target_dir=None, # type: Optional[str]
root_path=None, # type: Optional[str]
isolated_mode=False, # type: bool
):
# type: (...) -> bool
"""Determine whether to do a user install based on the input options.
If use_user_site is False, no additional checks are done.
If use_user_site is True, it is checked for compatibility with other
options.
If use_user_site is None, the default behaviour depends on the environment,
which is provided by the other arguments.
"""
# In some cases (config from tox), use_user_site can be set to an integer
# rather than a bool, which 'use_user_site is False' wouldn't catch.
if (use_user_site is not None) and (not use_user_site):
logger.debug("Non-user install by explicit request")
return False
if use_user_site:
if prefix_path:
raise CommandError(
"Can not combine '--user' and '--prefix' as they imply "
"different installation locations"
)
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
logger.debug("User install by explicit request")
return True
# If we are here, user installs have not been explicitly requested/avoided
assert use_user_site is None
# user install incompatible with --prefix/--target
if prefix_path or target_dir:
logger.debug("Non-user install due to --prefix or --target option")
return False
# If user installs are not enabled, choose a non-user install
if not site.ENABLE_USER_SITE:
logger.debug("Non-user install because user site-packages disabled")
return False
# If we have permission for a non-user install, do that,
# otherwise do a user install.
if site_packages_writable(root=root_path, isolated=isolated_mode):
logger.debug("Non-user install because site-packages writeable")
return False
logger.info("Defaulting to user installation because normal site-packages "
"is not writeable")
return True
def warn_deprecated_install_options(requirements, options):
# type: (List[InstallRequirement], Optional[List[str]]) -> None
"""If any location-changing --install-option arguments were passed for
requirements or on the command-line, then show a deprecation warning.
"""
def format_options(option_names):
# type: (Iterable[str]) -> List[str]
return ["--{}".format(name.replace("_", "-")) for name in option_names]
offenders = []
for requirement in requirements:
install_options = requirement.install_options
location_options = parse_distutils_args(install_options)
if location_options:
offenders.append(
"{!r} from {}".format(
format_options(location_options.keys()), requirement
)
)
if options:
location_options = parse_distutils_args(options)
if location_options:
offenders.append(
"{!r} from command line".format(
format_options(location_options.keys())
)
)
if not offenders:
return
deprecated(
reason=(
"Location-changing options found in --install-option: {}. "
"This configuration may cause unexpected behavior and is "
"unsupported.".format(
"; ".join(offenders)
)
),
replacement=(
"using pip-level options like --user, --prefix, --root, and "
"--target"
),
gone_in="20.2",
issue=7309,
)
def create_env_error_message(error, show_traceback, using_user_site):
"""Format an error message for an EnvironmentError
It may occur anytime during the execution of the install command.
"""
parts = []
# Mention the error if we are not going to show a traceback
parts.append("Could not install packages due to an EnvironmentError")
if not show_traceback:
parts.append(": ")
parts.append(str(error))
else:
parts.append(".")
# Spilt the error indication from a helper message (if any)
parts[-1] += "\n"
# Suggest useful actions to the user:
# (1) using user site-packages or (2) verifying the permissions
if error.errno == errno.EACCES:
user_option_part = "Consider using the `--user` option"
permissions_part = "Check the permissions"
if not using_user_site:
parts.extend([
user_option_part, " or ",
permissions_part.lower(),
])
else:
parts.append(permissions_part)
parts.append(".\n")
return "".join(parts).strip() + "\n"
|
[
"shanatmail@gmail.com"
] |
shanatmail@gmail.com
|
494ad832fa6990170d4b16f55a4cc7e8f864096f
|
1b9bb81824a6e3623f4a1a39bb226794eb6838bb
|
/Ch3-Control-Structures/p4.py
|
b2d55b237db6970e02c25cf7649c4fca175ef111
|
[] |
no_license
|
sabricast/Charles-Dierbach-Solutions
|
9c920506192b46671bf84c5a10f85f825955a651
|
f8ec88c8156ae2d0e43d7fe9a190f9b83065e6e5
|
refs/heads/master
| 2020-03-19T00:07:56.822258
| 2018-06-22T22:09:39
| 2018-06-22T22:09:39
| 135,455,240
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 307
|
py
|
# Program that sums a series of positive integers entered by the user,
# excluding all numbers greater than 100.
terminate = False
sum = 0
while not terminate:
num = int(input('Please, enter an integer: '))
if num > 0:
sum = sum + num
if num == -1:
terminate = True
print(sum)
|
[
"sabrina.castejon@gmail.com"
] |
sabrina.castejon@gmail.com
|
f6da95104305909cbfb8a5ff584892ff174bb1df
|
11d265eba2ced9de43c339e4014c779b521320cd
|
/budget/urls.py
|
d21285624659058a7773c6937a88fcec99164e59
|
[] |
no_license
|
Sloshpit/budget_old
|
d9271de625cd7e3aa66ccbec501b005e50cd2812
|
a5603996b026542adb3bc8c578c03bcb843bea01
|
refs/heads/master
| 2022-04-23T08:42:43.377827
| 2020-04-25T14:40:39
| 2020-04-25T14:40:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 981
|
py
|
"""budget URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('accounts/', include('accounts.urls')),
path('budgettracker/', include('budgettracker.urls')),
path('categories/', include('categories.urls')),
path('transactions/', include('transactions.urls')),
path('admin/', admin.site.urls),
]
|
[
"neel.maheshwari@gmail.com"
] |
neel.maheshwari@gmail.com
|
8c6c888913d98e1c22f9888d836e117845354dbb
|
320280bfce76713436b76ffc3125ccf37e65a324
|
/AnalyzeMiniPlusSubstructure/test/ttbar/ttbar_447.py
|
c15a0f7aeaa121a42e163f11de21cc3f77df3c9d
|
[] |
no_license
|
skhalil/MiniValidation
|
75ea5c0d7cde17bf99c7d31501f8384560ee7b99
|
1a7fb8377e29172483ea6d3c7b3e427ff87e7e37
|
refs/heads/master
| 2016-09-05T10:31:38.562365
| 2015-01-29T05:30:32
| 2015-01-29T05:30:32
| 29,898,162
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,861
|
py
|
import FWCore.ParameterSet.Config as cms
###############################################
useMiniAOD = True
# AOD
pfcandidates = 'particleFlow'
chsstring = 'pfNoPileUpJME'
genjetparticles = 'genParticles'
importantgenparticles = 'genParticles'
tracks = 'generalTracks'
vertices = 'offlinePrimaryVertices'
mergedvertices = 'inclusiveMergedVertices'
mergedvertices2 = ''
primaryvertices = 'offlinePrimaryVertices'
#miniAOD
if useMiniAOD:
pfcandidates = 'packedPFCandidates'
genjetparticles = 'packedGenParticles'
importantgenparticles = 'prunedGenParticles'
tracks = 'unpackedTracksAndVertices'
vertices = 'unpackedTracksAndVertices'
mergedvertices = 'unpackedTracksAndVertices'
mergedvertices2 = 'secondary'
primaryvertices = 'offlineSlimmedPrimaryVertices'
print 'useMiniAOD = '+str(useMiniAOD)
print ' pfcandidates = '+pfcandidates
print ' genjetparticles = '+genjetparticles
print ' importantgenparticles = '+importantgenparticles
print ' tracks = '+tracks
print ' vertices = '+vertices
print ' mergedvertices = '+mergedvertices
print ' mergedvertices2 = '+mergedvertices2
print ' primaryvertices = '+primaryvertices
###############################################
# SETUP
process = cms.Process("USER")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(False) , allowUnscheduled = cms.untracked.bool(True) )
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
process.MessageLogger.cerr.FwkJob.limit=1
process.MessageLogger.cerr.ERROR = cms.untracked.PSet( limit = cms.untracked.int32(0) )
###############################################
# SOURCE
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'root://cmsxrootd-site.fnal.gov//store/mc/Phys14DR/TTJets_MSDecaysCKM_central_Tune4C_13TeV-madgraph-tauola/MINIAODSIM/PU20bx25_PHYS14_25_V1-v1/10000/A0F8757B-8875-E411-99A4-002590AC4C52.root'
)
)
###############################################
# ANA
process.demo = cms.EDAnalyzer("AnalyzeMiniPlusSubstructure",
vertices = cms.InputTag("offlineSlimmedPrimaryVertices"),
muons = cms.InputTag("slimmedMuons"),
electrons = cms.InputTag("slimmedElectrons"),
taus = cms.InputTag("slimmedTaus"),
photons = cms.InputTag("slimmedPhotons"),
jets = cms.InputTag("slimmedJets"),
fatjets = cms.InputTag("slimmedJetsAK8"),
mets = cms.InputTag("slimmedMETs"),
pfCands = cms.InputTag("packedPFCandidates"),
packed = cms.InputTag("packedGenParticles"),
pruned = cms.InputTag("prunedGenParticles"),
bits = cms.InputTag("TriggerResults","","HLT"),
prescales = cms.InputTag("patTrigger")
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string("ttbar447.root"),
closeFileFast = cms.untracked.bool(True)
)
###############################################
# RECO AND GEN SETUP
process.load('PhysicsTools.PatAlgos.producersLayer1.patCandidates_cff')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.Geometry_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.GlobalTag.globaltag ='PHYS14_25_V2'
#'START70_V6::All'
#'START70_V6::All'
process.load('RecoJets.Configuration.RecoPFJets_cff')
process.load('RecoJets.Configuration.RecoGenJets_cff')
#process.fixedGridRhoFastjetAll.pfCandidatesTag = pfcandidates
process.fixedGridRhoFastjetAll.pfCandidatesTag = 'packedPFCandidates'
process.fixedGridRhoAll.pfCandidatesTag = 'packedPFCandidates'
# process.fixedGridRhoAll.pfCandidatesTag = .InputTag("packedPFCandidates")
# process.fixedGridRhoFastjetAll = fixedGridRhoFastjetAll.clone( pfCandidatesTag = cms.InputTag("packedPFCandidates"))
# process.fixedGridRhoAll = fixedGridRhoAll.clone( pfCandidatesTag = cms.InputTag("packedPFCandidates"))
from RecoJets.JetProducers.SubJetParameters_cfi import SubJetParameters
from RecoJets.JetProducers.PFJetParameters_cfi import *
from RecoJets.JetProducers.CaloJetParameters_cfi import *
from RecoJets.JetProducers.AnomalousCellParameters_cfi import *
from RecoJets.JetProducers.CATopJetParameters_cfi import *
from RecoJets.JetProducers.GenJetParameters_cfi import *
from RecoJets.JetProducers.caTopTaggers_cff import *
###############################################
process.content = cms.EDAnalyzer("EventContentAnalyzer")
process.p = cms.Path(
#process.fixedGridRhoFastjetAll
process.demo
)
|
[
"skhalil@fnal.gov"
] |
skhalil@fnal.gov
|
041d2d78acf76ad17561a3cf13bdd379042cbb0b
|
efae09bf12200004e50121572fef13fc635b255c
|
/Day14/Code/mypack/games/contra.py
|
c06e80acc0703fc451f419ff3ab906a886da338b
|
[] |
no_license
|
supremepoison/python
|
5655146a7a49d94e42b8aad139ae6b48a72c7015
|
9abc639c1496e6bd228dd923be98e54280658946
|
refs/heads/master
| 2020-04-05T07:07:05.096234
| 2018-11-08T07:22:04
| 2018-11-08T07:22:04
| 156,664,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
# file : mypack/games/cotra.py
def play():
print("正在玩 魂斗罗")
def game_over():
# #绝对导入
# from mypack.menu import show_menu
# show_menu()
#相对导入:相对于当前 mypack/games/
from ..menu import show_menu
show_menu()
#调用mypack/games/tanks.py里的play()
...#相对导入
from .tanks import play
play()
print("魂斗罗模块被加载")
|
[
"897550138@qq.com"
] |
897550138@qq.com
|
d805d86670a23c03a0561093ce7ee2e1e1665412
|
1145a739e0472baf15086da421bb20ccc693f631
|
/student_private.py
|
c0dddb5d92d6a5ddfd267a445845d534d009cfde
|
[] |
no_license
|
islon/lxf
|
261f73af6539ad26155e6bcb2c5284847fd90250
|
c354bfb4d301b627bc3154197da052f40854e504
|
refs/heads/master
| 2021-01-12T17:51:06.818285
| 2016-11-27T18:44:24
| 2016-11-27T18:44:24
| 71,651,056
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 521
|
py
|
class Student(object):
def __init__(self,name,score):
self.__name=name
self.__score=score
def pt_score(self):
print "%s:%d"%(self.__name,self.__score)
def get_name(self):
return self.__name
def get_score(self):
return self.__score
def __len__(self):
return 100
xm=Student("xiaoming",80)
print xm.pt_score()
print xm.get_name(),":",xm.get_score()
print xm._Student__name
print isinstance(xm,Student)
print dir("asdf")
print dir(xm)
print len("daf")
print "adf:","adf".__len__()
print "len(xm):",len(xm)
|
[
"longai1567@163.com"
] |
longai1567@163.com
|
53ce4757b88e4be8b9d68c9da903d256458d43ca
|
10256107b92bbf3c85371943a9ccd65f6a4b1092
|
/qubayes_tools.py
|
494db7d2c5b261b2e9a6a6f6e8fdb5f2b99f9fc0
|
[
"MIT"
] |
permissive
|
Quantum-Ducks/QuBayes
|
5f970de8a721a6734499df9049a67fb95064889f
|
f9b658f5e5ebcf3d9327472b09dd89b5d740758f
|
refs/heads/master
| 2022-11-29T02:09:24.242073
| 2020-07-16T19:20:26
| 2020-07-16T19:20:26
| 273,986,416
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,019
|
py
|
from itertools import product
import numpy as np
from network_setup import *
def generate_cond_keys(child, ps):
##############################################
#THIS FUNCTION WILL GENERATE A LIST OF STRINGS TO USE AS KEYS FOR CONDITIONAL PROBABILITIES
### INPUT ###
# s_0 int number of states of the child node
# s_i list number of states for each parent node, from most to least significant
### OUTPUT ###
# list of strings to use as keys for conditional probabilities (included commas in case there is ever an >11-state node!)
##############################################
cname = child.name
cstates = child.states
ranges = [[child.name], child.states.keys()]
for p in ps:
ranges.append([str(p.name)])
ranges.append(p.states.keys())
enumed = product(*ranges)
add = [",","_"]
cond_keys = []
for enum in enumed:
suff = 0
enum = list(enum)
parent_str = ''
for i in range(2,len(enum)-1):
suff = (suff + 1)%2
parent_str += str(enum[i]) + add[suff]
parent_str += str(enum[len(enum)-1])
cond_keys.append("%s_%s|%s"%(str(enum[0]), str(enum[1]), parent_str))
return cond_keys
def generate_parent_str(ps):
##############################################
#THIS FUNCTION WILL GENERATE A LIST OF STRINGS TO USE AS KEYS FOR CONDITIONAL PROBABILITIES
### INPUT ###
# s_0 int number of states of the child node
# s_i list number of states for each parent node, from most to least significant
### OUTPUT ###
# list of strings to use as keys for conditional probabilities (included commas in case there is ever an >11-state node!)
##############################################
ranges = []
for p in ps:
ranges.append([str(p.name)])
ranges.append(p.states.keys())
enumed = product(*ranges)
add = [",","_"]
cond_keys = []
for enum in enumed:
suff = 0
enum = list(enum)
parent_str = ''
for i in range(len(enum)-1):
suff = (suff + 1)%2
parent_str += str(enum[i]) + add[suff]
parent_str += str(enum[len(enum)-1])
cond_keys.append("%s"%(parent_str))
return cond_keys
class Node:
# A single variable in the Bayesian network
def __init__(self, name, data, states=None, parents=[]):
### INPUTS ###
# name: str name of variable
# data: array state data for the node
# states: dict keys are state names, values are the int each takes on in the data
# parents: list strings of names of parent nodes to this node
##############
if states == None:
states = {}
for i in range(max(data) + 1):
states.update({str(i) : i})
self.name = name
self.data = data
self.states = states
self.parents = parents
|
[
"mbruff@unc.edu"
] |
mbruff@unc.edu
|
97526b2d07a57713e091b7a40b2016652f423481
|
961042b1e542e3648b20de407041f02b5b0d50f3
|
/python_lesson/homework/hw4/rain.py
|
45fc59b643e77a6a5f50a268805ee8546b9f7291
|
[] |
no_license
|
jasonwu0908/tibame
|
f4a9275bfbcc4b8e591907482c725a872b55c18a
|
9f49f95f1bdcb6d241737657cbb4f468f57c22d2
|
refs/heads/master
| 2020-08-28T23:26:53.180559
| 2020-05-17T06:34:08
| 2020-05-17T06:34:08
| 217,852,090
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,528
|
py
|
# 輸入一字串,字串為”all” 表示計算60個月的總平均降雨量,”year”、”season”和”month”
# 分別表示計算某年、某季或某月的平均降雨量。若為後三者,再輸入一個正整數表示那一年、那一季或那一月。
# 說明:5年12個月的降雨量以三維陣列形式事先給好60個浮點數
# 需做誤錯處理:
# a. 輸入除了”all”、”year”、”season”和”month”以外的字串
# b. 若輸入”year”,而正整數輸入1至5以外的數字
# c. 若輸入”season”,而正整數輸入1至4以外的數字
# d. 若輸入”month”,而正整數輸入1至12以外的數字
import random
list_rain = []
list_order = ['all', 'year', 'season', 'month']
def give_rain_list():
for i in range(5):
list_rain.append([])
for j in range(4):
list_rain[i].append([])
for k in range(3):
list_rain[i][j].append(round(float(random.randint(0,1000)/random.randint(1,50)), 2))
return list_rain
def give_order():
try:
str_order = str(input('請輸入:\tall, year, season, month:'))
if str_order not in list_order:
raise ValueError
else:
print(str_order)
return str_order
except ValueError:
print('請輸入英文')
return give_order()
except NameError:
print('英文拼錯')
return give_order()
except SyntaxError:
print(SyntaxError)
return give_order()
def avg_rain(commend):
total = 0
row = len(list_rain)
col = len(list_rain[0])
kon = len(list_rain[0][0])
if commend == 'all':
for i in range(row):
for j in range(col):
for k in range(kon):
total += list_rain[i][j][k]
rain_avg = total / (row * col * kon)
return round(rain_avg, 2)
elif commend == 'year':
try:
year_num = int(input('請輸入1~5:'))
if year_num > 5 or year_num < 1:
raise IndexError
else:
for j in range(col):
for k in range(kon):
total += list_rain[year_num-1][j][k]
rain_avg_year = (total / (col * kon))
return rain_avg_year
except ValueError:
print('請勿給數字以外的字,請輸入數字1~5:')
return avg_rain(commend)
except IndexError:
print('不再數字範圍內,請輸入數字1~5:')
return avg_rain(commend)
elif commend == 'season':
try:
season_num = int(input('請輸入1~4:'))
if season_num > 4 or season_num < 1:
raise IndexError
else:
for i in range(row):
for k in range(kon):
total += list_rain[i][season_num-1][k]
rain_avg_season = (total / row * kon)
return round(rain_avg_season, 2)
except ValueError:
print('請勿給數字以外的字,請輸入數字1~4:')
return avg_rain(commend)
except IndexError:
print('不再數字範圍內,請輸入數字1~4:')
return avg_rain(commend)
else:
try:
month_num = int(input('請輸入1~12:'))
month = (month_num-1) % 3
if month_num > 12 or month_num < 1:
raise IndexError
else:
if 3 <= month_num <= 5:
col = 0
elif 6 <= month_num <= 8:
col = 1
elif 9 <= month_num <= 11:
col = 2
else:
col = 3
for i in range(row):
total += list_rain[i][col][month]
rain_avg_month = (total / row)
print(rain_avg_month)
return round(rain_avg_month, 2)
except ValueError:
print('請勿給數字以外的字,請輸入數字1~12:')
return avg_rain(commend)
except IndexError:
print('不再數字範圍內,請輸入數字1~12:')
return avg_rain(commend)
def main():
list_rain = give_rain_list()
for i in range(5):
print(list_rain[i])
print()
print('=' * 50)
commend = give_order()
print('=' * 50)
x = avg_rain(commend)
print(x)
print('=' * 50)
main()
|
[
"jw840908@gmail.com"
] |
jw840908@gmail.com
|
53bf12582b148ff36491728a65ce0940d7cd0349
|
c3feb5a8569436f22192412f5e3234b940994622
|
/writing_raster.py
|
910920a5b5b7aaa73f435ff1a72b1119cf8dfd40
|
[] |
no_license
|
grathee/ISRIC-awc
|
68aa511810ab05e4a5847c929f7e043aa7900d13
|
90629100a8fa6c236f65ffa6596a02f503c2ff2a
|
refs/heads/master
| 2021-01-10T10:57:22.352441
| 2016-04-10T20:24:53
| 2016-04-10T20:24:53
| 51,150,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 10 16:01:08 2016
@author: user
"""
rf = gdal.Open("/media/user/data/AWC-IN_022016/sdata/BLD_sd1_M_1km_T386.tif")
cols = rf.RasterXSize
rows = rf.RasterYSize
geotransform = rf.GetGeoTransform()
originX= geotransform[0]
originY= geotransform[3]
pixelWidth= geotransform[1]
pixelHeight= geotransform[5]
driver = rf.GetDriver()
|
[
"geetika.rathee@wur.nl"
] |
geetika.rathee@wur.nl
|
ec14f58a983cc07d6cc84ca1a9767d97e9932fe9
|
480849d5c9de11ad7a57b8d491e7047f3b3a643e
|
/functions.py
|
7dccc1335ea73c9fa16ca9836aa6e4062b247fcf
|
[] |
no_license
|
Jrossi11/Streamlit-Backtesting
|
b404730d33018f005bca38995a7bf50fe097f94f
|
04028afd5a506e831a53a20b1a6f712635a9486c
|
refs/heads/main
| 2023-04-04T07:09:13.651200
| 2021-04-15T15:13:47
| 2021-04-15T15:13:47
| 344,124,535
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,706
|
py
|
import yfinance as yf
import numpy as np
import pandas as pd
from indicators import MACD, BBANDS
def create_df(ticker, days, std, a, b, signal, start, end, freq):
df = yf.download(ticker, start = start, end=end, interval = freq)
data = pd.DataFrame()
data['Close'] = df['Close']
data = BBANDS(df,days,std)
data['Macd'] = MACD(df, a, b, signal)[0]
data['Signal_line'] = MACD(df, 12, 26, 9)[1]
data['EWMA_short'] = df['Close'].ewm(span=20, adjust=False).mean()
data['EWMA_long'] = df['Close'].ewm(span=40, adjust=False).mean()
return data
def buy_sell_func(data, stop_loss=0.1, short_allowed=True):
"""
Esta funcion contiene las reglas que el algoritmo sigue para dar las señales
Parameters
----------
data : Matriz que contiene los precios de cierre y los parametros del indicador
take_profit : Float con el movimiento porcbuyentual para tomar ganancia
stop_loss : Float con el movimiento porcentual para cerrar operacion negativa
Returns
-------
listas: buy_sell[0] son las compras y buy_sell[1] son las ventas en el momento que se produjeron.
"""
long_positions = []
short_positions = []
last_entry = 0
long = 0
short = 0
for i in range(len(data)):
if long == 1: #Long position exit conditions
short_positions.append(np.nan)
if data['Close'][i] > data['Upper'][i]: #Exit with upper bband
long_positions.append(-data['Close'][i])
long = 0
last_entry = 0
elif data['Close'][i] < (last_entry*(1-stop_loss)): #Stop loss triger
long_positions.append(-data['Close'][i])
long = 0
last_entry = 0
else: #Holding the stock
long_positions.append(np.nan)
elif short == 1: #Short position exit conditions
long_positions.append(np.nan)
if data['Close'][i] < data['Lower'][i]: #Exit with lower bband
short_positions.append(-data['Close'][i])
short = 0
last_entry = 0
elif data['Close'][i] > (last_entry*(1-stop_loss)): #Stop loss triger
short_positions.append(-data['Close'][i])
short = 0
last_entry = 0
else: #Holding the stock
short_positions.append(np.nan)
elif short == 0 and long ==0: #Short position entry conditions
if data['Macd'][i] < data['Signal_line'][i] and data['Macd'][i-1] > data['Signal_line'][i-1] \
and data['EWMA_short'][i] < data['EWMA_long'][i] and short_allowed==True: #Short position entry conditions
long_positions.append(np.nan)
short_positions.append(data['Close'][i])
short = 1
last_entry = data['Close'][i]
elif data['Macd'][i] > data['Signal_line'][i] and data['Macd'][i-1] < data['Signal_line'][i-1] \
and data['EWMA_short'][i] > data['EWMA_long'][i]: #Long position entry conditions
long_positions.append(data['Close'][i])
short_positions.append(np.nan)
long = 1
last_entry = data['Close'][i]
else:
long_positions.append(np.nan)
short_positions.append(np.nan)
df = pd.DataFrame({'index':data.index,'Longs':long_positions,'Shorts':short_positions})
return [long_positions, short_positions, df]
"""PERFORMANCE ANALISYS"""
def performance(data, bs_data):
"""
Esta funcion calcula el "pl" que es la ganancia y perdida por cada transaccion,
el "pl_returns" que es la ganancia porcentual de cada operacion.
Tambien calcula el win ratio, la cantidad de operaciones exitosas sobre el total
la media y la varianza de los retornos
"""
pl = [] #Calculo los retornos diarios
long = 0
short = 0
prof = 0
total = 0
for i in range(len(data)):
if long == 0 and short == 0:
if bs_data[0][i] > 0:
long=1
index_open=i
pl.append(1)
elif bs_data[1][i] > 0:
short=1
index_open=i
pl.append(1)
else:
pl.append(1)
elif long==1:
if bs_data[0][i] < 0:
pl.append(data['Close'][i]/data['Close'][i-1])
long=0
total += 1
if data['Close'][i]>data['Close'][index_open]:
prof += 1
else:
pl.append(data['Close'][i]/data['Close'][i-1])
elif short==1:
if bs_data[1][i] < 0:
pl.append(data['Close'][i-1]/data['Close'][i])
short=0
total += 1
if data['Close'][i]<data['Close'][index_open]:
prof += 1
else:
pl.append(data['Close'][i-1]/data['Close'][i])
if total > 0:
win_ratio = prof/total
else:
win_ratio = 0
return [pl, win_ratio, prof, total]
def capital_func(data,pl_data,initial_cap, buy_sell):
"""
Parameters
----------
initial_cap : El input es el capital inicial con el cual operara el algo
Returns
-------
cap : Evolucion del capital a medida que el algo tradea
tot_return : Retorno desde que el algo comenzo a operar
"""
cap = np.cumprod(pl_data[0])*1000
stock_performance = data['Close']/data['Close'].shift(1)
stock_reference = np.cumprod(stock_performance)*1000
stock_reference[0] = initial_cap
stock_performance = stock_reference[-1]/stock_reference[0]-1
tot_return = cap[-1]/initial_cap-1
return cap, stock_reference, tot_return, stock_performance
"""
data=create_df("AAPL", 20,1,12,26,9,"2010-01-01","2020-01-01", "1d")
bs_data=buy_sell_func(data,0.1,False)
pl_data=performance(data,bs_data)
cap=capital_func(data,pl_data,1000, bs_data)
"""
|
[
"noreply@github.com"
] |
Jrossi11.noreply@github.com
|
aa7a76f3941412f0af24311100dbb6ddf5fd5935
|
632544cb106ca1456d528cd650418f66f1715e5b
|
/backend/app_1_21879/settings.py
|
2957bf57e0b60bcedc3768f223af76b86999ba84
|
[] |
no_license
|
crowdbotics-apps/app-1-21879
|
e38079331882dd023ea086e5f2699fa887f688e5
|
2ba321b7f4d5c48a7a787db49d424891a4c01803
|
refs/heads/master
| 2023-01-02T06:15:05.927470
| 2020-10-23T17:48:54
| 2020-10-23T17:48:54
| 306,707,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,083
|
py
|
"""
Django settings for app_1_21879 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app_1_21879.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app_1_21879.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
ccee0ae76b708de8d873628460ed5fd1800af471
|
33612e7ca82f22bb8c96515f448afefd680f9134
|
/dho-screencap.py
|
03474b750a113b35da8ba5e1c205518ea1ea2f9f
|
[] |
no_license
|
coderpete/dotfiles
|
959bca73c61924fe363b96d027305b60829e57f3
|
c09e75853c048a31cddfc38aaa2674dde352b7f8
|
refs/heads/master
| 2022-08-15T06:21:29.645361
| 2022-08-03T01:21:56
| 2022-08-03T01:21:56
| 4,587,100
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,649
|
py
|
#!/usr/bin/env python
'''
Integrates Mac OS X's screenshot utility with DreamObjects for easy sharing.
'''
from datetime import datetime
from uuid import uuid4
import os
import subprocess
import webbrowser
import urllib2
import json
import boto
import boto.s3.connection
# configuration
f = open('.dho_access', 'r')
# required
dho_access_key = f.readline().strip()
dho_secret_key = f.readline().strip()
dho_screenshots_bucket = f.readline().strip()
# optional
cname = f.readline().strip()
# other variables
now = datetime.now()
this_month = datetime.strftime(now, '%Y%m')
tstamp = datetime.strftime(datetime.now(), '%d%H%M%S')
filename = this_month + '/' + tstamp + '_' + str(uuid4()) + '.png'
# start interactive screen capture
print 'Capturing screenshot...'
if not os.path.exists('/tmp/' + this_month):
os.mkdir('/tmp/' + this_month)
subprocess.call(['screencapture', '-i', '/tmp/%s' % filename])
print 'Connecting to DreamObjects...'
connection = boto.connect_s3(
aws_access_key_id=dho_access_key,
aws_secret_access_key=dho_secret_key,
host='objects.dreamhost.com'
)
print 'Getting target bucket...'
bucket = connection.get_bucket(dho_screenshots_bucket)
key = bucket.new_key(filename)
print 'Uploading to DreamObjects...'
key.set_contents_from_file(open('/tmp/%s' % filename, 'rb'))
key.set_canned_acl('private')
signed_url = key.generate_url(
expires_in=60*60*3,
query_auth=True,
force_http=True
)
print 'Screenshot available at:'
print '\t', signed_url
print 'Copying url to clipboard...'
os.system('echo "%s" | pbcopy' % signed_url)
print 'Opening in browser...'
webbrowser.open_new_tab(signed_url)
|
[
"pete.chudykowski@dreamhost.com"
] |
pete.chudykowski@dreamhost.com
|
2e10e0235035397bd356c040ca5aa38dfa087ded
|
d7e3988bd90ffa6259d564e8426eed18766a5e03
|
/old/tarea3.py
|
6aec20f4c9fa2621c44eb679717a861f0a75232e
|
[] |
no_license
|
yerkortiz/py-code
|
7e405705bcd92819c555ba0ba43551fa3c852657
|
b120c4c613c8cbecf89a8473fde40bb1d69bc724
|
refs/heads/master
| 2023-01-08T22:17:05.807915
| 2020-11-14T18:58:26
| 2020-11-14T18:58:26
| 214,654,529
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,013
|
py
|
from gost import *
#gost.py es parte de la libreria, pero solo el archivo.
#como es un standar ruso, gran parte de lo que encontré en internet
#estaba en ese idioma así que el camino más corto fue ver directamente
#la implementación del algoritmo y ver como usarlo
#el resto de funciones de pygost son para codificar y decodificar entre arreglos de bytes y hexadecimal
from functools import partial
from pygost.utils import hexdec
from pygost.utils import strxor
from pygost.utils import xrange
from pygost.utils import hexenc
#pip instal pygost
#ejecutar con python3 tarea3.py
pt = hexdec("holaaaaa".encode("utf-8").hex())
key = hexdec("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa".encode("utf-8").hex())
ct = ecb_encrypt(key, pt)
#dt = ecb_encrypt(key, ct)
print('mensaje en texto plano: ' + hexenc(pt))
print('mensaje encriptado: ' + hexenc(ct))
#print(hexenc(dt))
html ="""
<p>Mensaje secreto</p>
<div class='kuznyechik' id='"""+hexenc(ct)+"""'></div>
"""
file = open("index.html","w")
file.write(html)
file.close()
|
[
"yerko.ortizm@mail.udp.cl"
] |
yerko.ortizm@mail.udp.cl
|
ea7d3b60c24ca6ccdc9de4c84e580fdb27c80e0b
|
802997443ff625296e09eed55e349f319953e47a
|
/Versoes Anteriores/Gestao_v1/gestao/gestaoapp/models/projeto.py
|
0c74fe69f47d23d7d7e0e0ec089490b5d30b7192
|
[] |
no_license
|
elizabethsilvano/GestaoProjetos
|
7531277f4c51fb7802a70f24a406ad81bc467d96
|
3d21fff452cf42cd2d78a6591d0fab9ac9b46eb5
|
refs/heads/master
| 2023-04-01T18:54:41.932959
| 2015-11-17T17:43:00
| 2015-11-17T17:43:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 509
|
py
|
from django.db import models
from gestaoapp.models import Coordenador
from gestaoapp.models import Bolsista
from gestaoapp.models import Nucleo
class Projeto(models.Model):
nome_projeto = models.CharField(max_length=255)
coordenador = models.ForeignKey(Coordenador)
nucleo = models.ForeignKey(Nucleo)
bolsista = models.ForeignKey(Bolsista)
cliente = models.CharField(max_length=255)
data_inicio = models.DateField()
data_termino = models.DateField()
def __unicode__(self):
return self.nome_projeto
|
[
"paatrick_reis@hotmail.com"
] |
paatrick_reis@hotmail.com
|
305ec6ed0199cbc83888fec821fb398ad5eb7314
|
677744a2392da10141b0a8f19458dba730a35bff
|
/Algorithm/backjoon/bronze/최댓값.py
|
b62e8f5f601576834ee97b03b9c76756d2ccfd14
|
[] |
no_license
|
xxoellie/growth_recording
|
daf1cf2a89b62ca4e683bd6e7b5507b038bbd66d
|
f45e46a50a7941d31c3c9a754d40617da9541a31
|
refs/heads/master
| 2023-09-06T08:13:39.720428
| 2021-11-17T15:16:38
| 2021-11-17T15:16:38
| 403,450,642
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 107
|
py
|
num = []
for i in range(9):
num.append(int(input()))
a = max(num)
b= num.index(a)
print(a)
print(b+1)
|
[
"elliesohyeon1202@gmail.com"
] |
elliesohyeon1202@gmail.com
|
f42e432426681660f78bd781ade6c893b3eeee8f
|
b6cdc35b12a1ae8834d76c7731f79c9689f75e04
|
/polling_bot/excel_reader.py
|
d73b1a9173bcc5252bd8567185c7fd38be113e52
|
[
"Apache-2.0"
] |
permissive
|
balemessenger/poll_bot
|
40abd3235c1587d73a84d395d124ae7e56a2d140
|
672ed66aa376ac1580c48c119ca2743a16976326
|
refs/heads/master
| 2023-08-07T22:13:18.246312
| 2020-02-19T06:06:11
| 2020-02-19T06:06:11
| 241,539,543
| 0
| 0
|
Apache-2.0
| 2023-07-23T06:07:38
| 2020-02-19T05:21:01
|
Python
|
UTF-8
|
Python
| false
| false
| 960
|
py
|
from collections import defaultdict
import pandas
class ExcelReader:
def __init__(self, file_path):
self.file = file_path
self.data_list = defaultdict(list)
self.data = list
def read_excel_data(self, sheets=None):
if sheets and isinstance(sheets, list):
for sheet_number in sheets:
self.data = []
data_frame = pandas.read_excel(self.file, sheet_name=sheet_number, header=None)
# keys = data_frame.keys()
for value_list in data_frame.values:
inner_data = []
for value in value_list:
inner_data.append(value)
self.data.append(inner_data)
self.data_list[sheet_number].append(self.data)
def validate_fields(self, data: dict):
raise NotImplementedError()
#
@property
def get_data(self):
return self.data_list
|
[
"bayatimasoood@gmail.com"
] |
bayatimasoood@gmail.com
|
ffecc0dbc60ce26f507ac4edbfda23261709f8bf
|
41a0cd9b039e3c7752ff938077aa228bb9773575
|
/ExCode/Lab09_NormalMapOnTangentSpace/main.py
|
9a44ebfb7910ed3d3103a8bdda68d9485577d26a
|
[] |
no_license
|
dknife/201802_GPUProgramming
|
c23d1361df41fca267f99e83a442266f29989507
|
1a722082a74bb8b6088c47bdc88375a81b68d1b7
|
refs/heads/master
| 2021-07-17T07:44:11.498713
| 2019-01-22T19:42:36
| 2019-01-22T19:42:36
| 147,673,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,273
|
py
|
import wx # requires wxPython package
from wx import glcanvas
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import numpy as np
import random as rnd
import math
import Light
import Shader
import Texture
import Surface
class MyFrame(wx.Frame) :
def __init__(self):
self.size = (1280, 720)
wx.Frame.__init__(self, None, title = "wx frame", size = self.size,
style = wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER)
self.panel = MyPanel(self)
class MyPanel(wx.Panel) :
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.canvas = OpenGLCanvas(self)
self.shaderButton = wx.Button(self, wx.ID_ANY, "Shader On/Off",
pos=(1030, 20), size=(200,40), style = 0)
self.shaderLabel = wx.StaticText(self, -1, pos=(1030, 60), style=wx.ALIGN_CENTER)
self.shaderLabel.SetLabel("currently the shader is off")
self.Bind(wx.EVT_BUTTON, self.OnShaderButton, self.shaderButton)
self.lightLabel = wx.StaticText(self, -1, pos=(1030,150), style=wx.ALIGN_CENTER)
self.lightLabel.SetLabel("Light")
self.lightSlider = wx.Slider(self, -1, pos=(1030, 180), size = (200,50), style = wx.SL_HORIZONTAL|wx.SL_AUTOTICKS,
value=0, minValue=-20, maxValue=20)
self.objectRotation = wx.StaticText(self, -1, pos=(1030, 250), style=wx.ALIGN_CENTER)
self.objectRotation.SetLabel("Object Rotatation (Y)")
self.objectRotationSlider = wx.Slider(self, -1, pos=(1030, 280), size=(200, 50),
style=wx.SL_HORIZONTAL | wx.SL_AUTOTICKS,
value=0, minValue=-90, maxValue=90)
self.Bind(wx.EVT_SLIDER, self.OnLightSlider, self.lightSlider)
self.Bind(wx.EVT_SLIDER, self.OnRotationSlider, self.objectRotationSlider)
def OnLightSlider(self, event):
val = event.GetEventObject().GetValue()
self.canvas.lightX = val / float(10)
def OnRotationSlider(self, event):
val = event.GetEventObject().GetValue()
self.canvas.objectAngle = val
def OnShaderButton(self, event):
if self.canvas.bDrawWithShader == True :
self.canvas.bDrawWithShader = False
self.shaderLabel.SetLabel("currently the shader is off")
else :
self.canvas.bDrawWithShader = True
self.shaderLabel.SetLabel("currently the shader is on")
class OpenGLCanvas(glcanvas.GLCanvas):
def __init__(self, parent) :
self.initialized = False
self.bDrawWithShader = False
self.shader = None
self.size = (1024,720)
self.aspect_ratio = 1
self.lightX = 0.0
self.objectAngle = 0.0
glcanvas.GLCanvas.__init__(self, parent, -1, size = self.size)
self.context = glcanvas.GLContext(self)
self.SetCurrent(self.context)
self.Bind(wx.EVT_PAINT, self.OnDraw)
self.Bind(wx.EVT_IDLE, self.OnIdle)
self.light = Light.Light()
self.light.setLight()
self.light.setMaterial()
self.light.turnOn()
self.texture = Texture.Texture("normal.png")
attrib_list = ["Tangent", "Binormal"]
self.shader = Shader.Shader("textureMapping.vs", "textureMapping.fs", attrib_list)
self.surface = Surface.Surface(50,50)
self.surface.resetVerts()
self.surface.computeTangentSpace()
self.InitGL()
def InitGL(self):
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
self.aspect_ratio = float(self.size[0]) / self.size[1]
gluPerspective(60, self.aspect_ratio, 0.1, 100.0)
glViewport(0,0,self.size[0], self.size[1])
glEnable(GL_DEPTH_TEST)
self.texture.startTexture()
def OnDraw(self, event):
# clear color and depth buffers
if not self.initialized :
self.InitGL()
self.initialized = True
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# position viewers
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(0,1,1, 0, 0, 0, 0,1,0)
self.light.setLightPoisition(self.lightX, 0.5, 0)
glDisable(GL_LIGHTING)
glPointSize(10)
glColor3f(1, 0, 0)
glBegin(GL_POINTS)
glVertex3f(self.lightX, 0.5, 0)
glEnd()
glEnable(GL_LIGHTING)
glRotatef(self.objectAngle, 0,1,0)
#self.objectAngle+=0.1;
if self.bDrawWithShader :
self.shader.begin()
loc = glGetUniformLocation(self.shader.program, "myTexture")
glUniform1i(loc, 0)
glVertexAttribPointer(10, 3, GL_FLOAT, GL_FALSE, 0, self.surface.tangent)
glEnableVertexAttribArray(10)
glVertexAttribPointer(11, 3, GL_FLOAT, GL_FALSE, 0, self.surface.binorm)
glEnableVertexAttribArray(11)
else : self.surface.drawTangentSpace()
self.surface.drawSurface()
self.shader.end()
self.SwapBuffers()
def OnIdle(self, event):
self.Refresh()
def main() :
app = wx.App()
frame = MyFrame()
frame.Show()
app.MainLoop()
if __name__ == "__main__" :
main()
|
[
"young.min.kang@gmail.com"
] |
young.min.kang@gmail.com
|
42266d9378f9aa963204c4446c5147e3eb0b353b
|
3cdda95ca9ad49915243ba5d3d2b9725880373e1
|
/215-sklearnMultipleLinearRegressionandAdjustedR-squared-FRegression.py
|
cb4e158497e067c65261a8e94dbb7adc424cffb8
|
[] |
no_license
|
evelynda1985/215-MultipleLinerRegressionF-regression
|
ab3910e9223ff10e78660c692087492d2bd2740d
|
b0227a6da9299d7db079677276f7e0b9cda8b0e4
|
refs/heads/main
| 2023-07-15T04:53:35.463960
| 2021-08-27T00:53:14
| 2021-08-27T00:53:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,888
|
py
|
#!/usr/bin/env python
# coding: utf-8
# # Adjusted R-squared - Exercise
#
# Using the code from the lecture, create a function which will calculate the adjusted R-squared for you, given the independent variable(s) (x) and the dependent variable (y).
#
# Check if you function is working properly.
#
# Please solve the exercise at the bottom of the notebook (in order to check if it is working you must run all previous cells).
# ## Import the relevant libraries
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from sklearn.linear_model import LinearRegression
# ## Load the data
# In[2]:
data = pd.read_csv('1.02. Multiple linear regression.csv')
data.head()
# In[3]:
data.describe()
# ## Create the multiple linear regression
# ### Declare the dependent and independent variables
# In[4]:
x = data[['SAT','Rand 1,2,3']]
y = data['GPA']
# ### Regression itself
# In[5]:
reg = LinearRegression()
reg.fit(x,y)
# In[6]:
reg.coef_
# In[7]:
reg.intercept_
# ### Calculating the R-squared
# In[8]:
reg.score(x,y)
# ### Formula for Adjusted R^2
#
# $R^2_{adj.} = 1 - (1-R^2)*\frac{n-1}{n-p-1}$
# In[9]:
x.shape
# In[10]:
r2 = reg.score(x,y)
n = x.shape[0]
p = x.shape[1]
adjusted_r2 = 1-(1-r2)*(n-1)/(n-p-1)
adjusted_r2
# ### Adjusted R^2 function
# In[11]:
def adjusted_r2_funtion(x,y):
r2 = reg.score(x,y)
n = x.shape[0]
p = x.shape[1]
adjusted_r2 = 1 - (1-r2) * (n-1)/(n-p-1)
return adjusted_r2
# In[12]:
adjusted_r2_funtion(x,y)
# In[13]:
from sklearn.feature_selection import f_regression
# In[14]:
f_regression(x,y)
# In[15]:
# F-statistics array([56.04804786, 0.17558437]
# p-values array([7.19951844e-11, 6.76291372e-01]
# In[16]:
p_values = f_regression(x,y)[1]
p_values
# In[17]:
p_values.round(3)
# In[ ]:
|
[
"noreply@github.com"
] |
evelynda1985.noreply@github.com
|
42a1be2cf61229d6cd3c6f26fd4af8dd26623f2c
|
12b5903f2748e9f46ff79969926ebd4090d90988
|
/dbengine.py
|
16b1a1c0a4c748655425f13d5eac30a49a81ef84
|
[] |
no_license
|
Singer-id/seqgan
|
e20cda5cabbbfd63f89999dd376740be22a97901
|
b5f63d4a7ffbd8af9bb87e9c88f6b12f52b6580b
|
refs/heads/main
| 2023-01-22T09:44:43.322394
| 2020-12-09T15:45:31
| 2020-12-09T15:45:31
| 319,955,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,159
|
py
|
import records
import re
from babel.numbers import parse_decimal, NumberFormatError
schema_re = re.compile(r'\((.+)\)')
num_re = re.compile(r'[-+]?\d*\.\d+|\d+')
agg_ops = ['', 'max', 'min', 'count', 'sum', 'avg']
cond_ops = ['=', '>', '<', 'OP']
class DBEngine:
def __init__(self, fdb):
#fdb = 'data/test.db'
self.db = records.Database('sqlite:///{}'.format(fdb))
def execute_query(self, table_id, query, *args, **kwargs):
return self.execute(table_id, query.sel_index, query.agg_index, query.conditions, *args, **kwargs)
def execute(self, table_id, select_index, aggregation_index, conditions, lower=True):
if not table_id.startswith('table'):
table_id = 'table_{}'.format(table_id.replace('-', '_'))
table_info = self.db.query('SELECT sql from sqlite_master WHERE tbl_name = :name', name=table_id).all()[0].sql.replace('\n','')
schema_str = schema_re.findall(table_info)[0]
schema = {}
for tup in schema_str.split(', '):
c, t = tup.split()
schema[c] = t
select = 'col{}'.format(select_index)
agg = agg_ops[aggregation_index]
if agg:
select = '{}({})'.format(agg, select)
where_clause = []
where_map = {}
for col_index, op, val in conditions:
if lower and (isinstance(val, str) or isinstance(val, str)):
val = val.lower()
if schema['col{}'.format(col_index)] == 'real' and not isinstance(val, (int, float)):
try:
val = float(parse_decimal(val))
except NumberFormatError as e:
val = float(num_re.findall(val)[0])
where_clause.append('col{} {} :col{}'.format(col_index, cond_ops[op], col_index))
where_map['col{}'.format(col_index)] = val
where_str = ''
if where_clause:
where_str = 'WHERE ' + ' AND '.join(where_clause)
query = 'SELECT {} AS result FROM {} {}'.format(select, table_id, where_str)
print (query)
out = self.db.query(query, **where_map)
return [o.result for o in out]
|
[
"noreply@github.com"
] |
Singer-id.noreply@github.com
|
78733a3c69333568ebea00a2b8534eb88cc94240
|
34b9b39442bde1a3c8fa670ef60bcc84d772a067
|
/Assignment 3- Deadline 10 Oct 2017/Assignment3_step1_DiNarzo.py
|
a8c729685f567ea64b8c86274dbdc04c671dd8cb
|
[] |
no_license
|
bnajafi/Scientific_Python_Assignments_POLIMI_EETBS
|
b398fc2754b843d63cd06d517235c16177a87dcf
|
8da926e995dcaf02a297c6bb2f3120c49d6d63da
|
refs/heads/master
| 2021-05-07T22:36:14.715936
| 2018-01-16T21:12:33
| 2018-01-16T21:12:33
| 107,265,075
| 38
| 86
| null | 2018-01-16T21:12:34
| 2017-10-17T12:24:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,384
|
py
|
# -*- coding: utf-8 -*-
#EXERCISE 1.3
Materials= {'Wood_bevel_lapped':0.14,'Wood_fiberboard':0.23,'Glass_fiber':2.45,'Wood_stud':0.63,'Gypsum':0.079,
'Outside_sourface':0.03,'Inside_surface':0.12}
Between_studs= ['Wood_bevel_lapped','Wood_fiberboard','Glass_fiber','Gypsum']
At_studs= ['Wood_bevel_lapped','Wood_fiberboard','Wood_stud','Gypsum']
Air= ['Outside_sourface','Inside_surface']
farea=[0.75,0.25]
#RESISTENCES
#Calculating the wood stud
Wall_wood= At_studs + Air
R_wood_tot=0
for anylayer in Wall_wood:
R_wood=Materials[anylayer]
R_wood_tot=R_wood_tot+R_wood
print 'The R value of ' + str(anylayer) +' is ' + str(R_wood) +' °C/W'
print ' '
print 'The total value of R assuming a wall with stud is ' +str(R_wood_tot) +' °C/W'
#Calculating for glass fiber
Wall_glass=Between_studs + Air
R_glass_tot=0
for anylayer1 in Wall_glass:
R_glass=Materials[anylayer1]
R_glass_tot= R_glass_tot+R_glass
print 'The R value of ' + str(anylayer1) +' is ' + str(R_glass) +' °C/W'
print ' '
print 'The total value of R assuming a wall with glass is ' +str(R_glass_tot) +' °C/W'
#HEAT TRANSFER COEFFICIENT
U_stud= farea[1]/R_wood_tot
U_glass= farea[0]/R_glass_tot
U_tot= U_glass+U_stud
print' '
print 'The overall heat transfer coefficient is ' +str(U_tot) +' W/°C'
R_tot=1/U_tot
print ' '
print 'The total resistence is ' +str(R_tot) +' °C/W'
|
[
"behzad najafi"
] |
behzad najafi
|
894c8fe2c30f30f16184a080402e256b71bec1db
|
cd4d4e21d077857fd8d9c999feced51aac5f6b0d
|
/env/bin/django-admin
|
1ce23704dc2af631516e89625efd9ed7d98b0799
|
[] |
no_license
|
fieldhawker/akagi
|
f9b88fe53b3e2bf1eb50185f497af8c0a278e9b2
|
2890061ec3b5c8be5b9d17a7bcf70a939eccc41a
|
refs/heads/master
| 2022-12-21T22:20:15.857871
| 2019-09-09T13:32:12
| 2019-09-09T13:32:12
| 207,040,596
| 0
| 0
| null | 2022-12-08T06:08:35
| 2019-09-08T00:17:40
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 303
|
#!/Users/takano/Documents/SixPack/akagi/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
|
[
""
] | ||
3f2a471cf157e3154c848bf20c4c2bc368f01729
|
1127436a238a56da21757f38d939a5aac9ecf790
|
/myenv/bin/pilprint.py
|
23fdf09841df6c153e811046bfda4f0deb00929d
|
[] |
no_license
|
comeondown/gas
|
b163e86b011c39c4d6243f291c137fc15f6690b2
|
b0182bdfbd2c09797d4dff8424b612c222acfc84
|
refs/heads/master
| 2021-01-21T04:41:46.116834
| 2016-06-16T02:16:18
| 2016-06-16T02:16:18
| 55,646,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,611
|
py
|
#!/var/www/gas/myenv/bin/python3
#
# The Python Imaging Library.
# $Id$
#
# print image files to postscript printer
#
# History:
# 0.1 1996-04-20 fl Created
# 0.2 1996-10-04 fl Use draft mode when converting.
# 0.3 2003-05-06 fl Fixed a typo or two.
#
from __future__ import print_function
import getopt
import os
import sys
import subprocess
VERSION = "pilprint 0.3/2003-05-05"
from PIL import Image
from PIL import PSDraw
letter = (1.0*72, 1.0*72, 7.5*72, 10.0*72)
def description(filepath, image):
title = os.path.splitext(os.path.split(filepath)[1])[0]
format = " (%dx%d "
if image.format:
format = " (" + image.format + " %dx%d "
return title + format % image.size + image.mode + ")"
if len(sys.argv) == 1:
print("PIL Print 0.3/2003-05-05 -- print image files")
print("Usage: pilprint files...")
print("Options:")
print(" -c colour printer (default is monochrome)")
print(" -d debug (show available drivers)")
print(" -p print via lpr (default is stdout)")
print(" -P <printer> same as -p but use given printer")
sys.exit(1)
try:
opt, argv = getopt.getopt(sys.argv[1:], "cdpP:")
except getopt.error as v:
print(v)
sys.exit(1)
printerArgs = [] # print to stdout
monochrome = 1 # reduce file size for most common case
for o, a in opt:
if o == "-d":
# debug: show available drivers
Image.init()
print(Image.ID)
sys.exit(1)
elif o == "-c":
# colour printer
monochrome = 0
elif o == "-p":
# default printer channel
printerArgs = ["lpr"]
elif o == "-P":
# printer channel
printerArgs = ["lpr", "-P%s" % a]
for filepath in argv:
try:
im = Image.open(filepath)
title = description(filepath, im)
if monochrome and im.mode not in ["1", "L"]:
im.draft("L", im.size)
im = im.convert("L")
if printerArgs:
p = subprocess.Popen(printerArgs, stdin=subprocess.PIPE)
fp = p.stdin
else:
fp = sys.stdout
ps = PSDraw.PSDraw(fp)
ps.begin_document()
ps.setfont("Helvetica-Narrow-Bold", 18)
ps.text((letter[0], letter[3]+24), title)
ps.setfont("Helvetica-Narrow-Bold", 8)
ps.text((letter[0], letter[1]-30), VERSION)
ps.image(letter, im)
ps.end_document()
if printerArgs:
fp.close()
except:
print("cannot print image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
|
[
"root@vm20711.hv8.ru"
] |
root@vm20711.hv8.ru
|
e25b8faaa3e5edff82ea687079a64c3f7e2ce024
|
328bc5a846fb951e1127f13706ad09e75b93b023
|
/0x00-python_variable_annotations/2-floor.py
|
877281684c3a01003daa1b49e5a17f3c69dac206
|
[] |
no_license
|
MatriMariem/holbertonschool-web_back_end
|
37f39cc286d949e347baafee0697c8ad042dbb05
|
2ab609541ff8b45cdc923c24d629f160ddc6f3cf
|
refs/heads/master
| 2023-02-28T23:06:47.490221
| 2021-01-28T13:08:43
| 2021-01-28T13:08:43
| 305,419,798
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
#!/usr/bin/env python3
""" a type-annotated function floor """
import math
def floor(n: float) -> int:
"""
a type-annotated function floor that
takes a float n as argument
and returns the floor of the float.
"""
return math.floor(n)
|
[
"meriemmatri1994@gmail.com"
] |
meriemmatri1994@gmail.com
|
25bee7f047a571e837f50efb534fc2afe2c02d47
|
53f2f59c8a186b7028596679a1307f6c5b3bc853
|
/install-webfaction-cpanel-awstats.py
|
f172b85f4438093fc1817910e2d8aa68a7bd1cd3
|
[] |
no_license
|
turian/osqa-install-webfaction
|
f7c4189b681ac2ea15bcd4db3817d4aa5293503d
|
9a80576ebddd1955a5221736ab31556f110f1fe4
|
refs/heads/master
| 2020-06-03T06:47:34.439598
| 2010-07-03T19:43:58
| 2010-07-03T19:43:58
| 597,755
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 954
|
py
|
#!/usr/bin/env python
from globals import *
from moreglobals import *
import sys
from cpanel import try_remove, force_create
import xmlrpclib
server = xmlrpclib.ServerProxy('https://api.webfaction.com/')
session_id, account = server.login(USERNAME, PASSWORD)
#print >> sys.stderr, repr(session_id)
#print >> sys.stderr, repr(account)
#{'username': 'test5', 'home': '/home2', 'id': 237}
#for i in server.list_emails(session_id):
# print >> sys.stderr, i
r = force_create(server, session_id, AWSTATS_APPNAME, "app", "create_app", "delete_app", "list_apps", ['awstats68', False, WEBSITENAME])
if SERVERIP is None:
SERVERIP = server.list_websites(session_id)[0]["ip"]
print >> sys.stderr, "No SERVERIP given. Using %s" % SERVERIP
r = force_create(server, session_id, WEBSITENAME, "website", "create_website", "delete_website", "list_websites", [SERVERIP, False, [FULLDOMAINNAME], [APPNAME, URLPATH], [AWSTATS_APPNAME, AWSTATS_URLPATH]])
|
[
"turian@gmail.com"
] |
turian@gmail.com
|
acef5a2b5636f84c4e332012fbb9a52906076306
|
9554f2acac15c65a1092b8ff2c118edc27c8b754
|
/server.py
|
ccc9782e412cb18b9f138603d1128af130d19c7b
|
[] |
no_license
|
hunnain/flaskReacttodoapp
|
3af1feee9bdbbf72987d9fe4f2ed276794a163e5
|
d7fac40991e3bbc5892c0076e25cb62f8e54d210
|
refs/heads/master
| 2020-03-26T17:03:40.888646
| 2018-12-17T14:53:44
| 2018-12-17T14:53:44
| 145,139,506
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,052
|
py
|
import os
from flask import Flask, render_template, url_for, request, send_from_directory, session, jsonify
import json
import firebase_admin
from firebase_admin import db, credentials, auth
from flask import Session
app = Flask(__name__, static_folder="./dist", template_folder="./")
# SESSION_TYPE = 'redis'
app.config.from_object(__name__)
# Session(app)
cred = credentials.Certificate("firebaseauth.json")
# firebase_admin.initialize_app(cred)
firebase_admin.initialize_app(options={
'databaseURL': 'https://ultimate-todo-app.firebaseio.com'
})
ultimateTodo = db.reference('ultimateTodo')
# secre key
app.secret_key = b'_5#y2L"F4Q8z\n\xec]/'
# session['userKey'] = 88666
@app.route('/')
def index():
return render_template("index.html")
@app.route('/registeruser', methods=['GET', 'POST'])
def registerUser():
data = request.get_json(silent=True)
userData = data["user"]
# print('This is a data',data)
print('data', userData['email'])
user = auth.create_user(
email=userData['email'],
email_verified=False,
password=userData['password'],
display_name=userData['userName'],
disabled=False
)
print('This is siggn up data', user)
userDb = ultimateTodo.child('users').push(userData)
return 'success'
@app.route('/loginuser', methods=['POST'])
def loginUser():
data = request.get_json(silent=True)
userData = data["user"]
email = userData['email']
password = userData['password']
ref = db.reference('ultimateTodo/users')
refData = ref.get()
userAuth = 'success'
for key, val in refData.items():
if(email == val['email'] and password == val['password']):
print('correct')
userAuth = 'success'
userVal = val
userKey = key
session['logged_in'] = True
session['userKey'] = userKey
session['userVal'] = userVal
app.secret_key = userKey
print('This my user val', userVal, key, session)
else:
print('false')
userAuth = 'notsuccess'
session['logged_in'] = False
# print(val['email'])
# print(ref.get())
return userAuth
@app.route('/logginUserData', methods=['GET'])
def loggedinUser():
data = session
dataUid = data['userKey']
datareq = data['userVal']
userVal = {
'email': datareq['email'],
'username': datareq['userName'],
'joiningdate': datareq['joiningdate'],
'uid': dataUid
}
return jsonify(userVal)
# Logout User
@app.route('/logoutUser', methods=['POST'])
def logoutUser():
data = session
logout = request.get_json(silent=True)
session.clear()
print('My logout', logout, 'My session', session)
return 'Sucesfully logout'
@app.route('/addtodos', methods=['GET', 'POST'])
def addTodos():
data = request.get_json(silent=True)
userData = data["user"]
if userData['uid'] == session['userKey']:
userVal = {
'todo': userData['todo'],
'description': userData['description']
}
# userVal = jsonify(userVal)
userDb = ultimateTodo.child('users').child(session['userKey']).child('todos').push(userVal)
else:
return False
# print('mu data',userData,'sessionkey',session['userKey'],'my user val',userVal)
@app.route('/fetchtodoapi/v1.0', methods=['GET','POST'])
def fecthTodo():
data = request.get_json(silent=True)
# userData = data["user"]
# email = userData['email']
# password = userData['password']
ref = db.reference('ultimateTodo/users')
refData = ref.get()
for key, val in refData.items():
jsonVal = jsonify(val)
print('fecthing datatatat', jsonVal)
else:
print('false')
# print(val['email'])
# print(ref.get())
return jsonVal
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
return render_template("index.html")
app.run(host='0.0.0.0', debug=True, port=5050)
|
[
"hunnainpashahgchgc@gmail.com"
] |
hunnainpashahgchgc@gmail.com
|
099ef008b3f080c1f48b86d42aebb4646b7a3341
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_040/ch42_2020_03_30_19_42_32_027965.py
|
abd6af2dd2a16f2dd4ee9fb879398800cc961684
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
x=0
lista=[]
palavra = 0
while palavra!="fim":
palavra = input("Digite uma palavra: ")
lista.append(palavra)
while x < len(lista):
if (lista[x])[0] == "a":
print (lista[x])
x+=1
else:
x+=1
|
[
"you@example.com"
] |
you@example.com
|
7eee97a56674f29c34f9eeec948a1a8cfacc1158
|
8f850b559b8058f3e31712e7809191576a774449
|
/src/levenshtein.py
|
84a492c6435977e83f3d7ec7cca7196aa20b9df9
|
[] |
no_license
|
crystal-k7/edit-distance
|
4c1f60354aede0fb10887f21eda35328bff94180
|
afc9cc7e48684af369c22ca201edddc5c44aa775
|
refs/heads/master
| 2022-09-01T07:04:40.522227
| 2020-05-25T05:02:30
| 2020-05-25T05:02:30
| 266,682,219
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,220
|
py
|
from time import time
# Levenshtein Distance(Edit Distance Algorithm)
def levenshtein(ref, input):
dist = list()
for i in range(len(ref) + 1):
temp = list()
for j in range(len(input) + 1):
temp.append(0)
dist.append(temp)
for i in range(len(ref) + 1):
dist[i][0] = i
for j in range(len(input) + 1):
dist[0][j] = j
for j in range(1, len(input) + 1):
for i in range(1, len(ref) + 1):
if ref[i-1] == input[j-1]:
dist[i][j] = dist[i-1][j-1]
else:
dist[i][j] = min(dist[i - 1][j - 1] + 1, min(dist[i][j - 1] + 1, dist[i - 1][j] + 1))
#for line in dist:
# print(line)
return dist, dist[len(ref)][len(input)]
def scoring(ref, input, dist):
N = len(ref)
i = len(ref)
j = len(input)
D = 0
I = 0
S = 0
# 일치 워드
search_word = list()
while not (i == 0 and j == 0):
s = min(dist[i - 1][j], dist[i - 1][j - 1], dist[i][j - 1])
if s == dist[i][j]:
i -= 1
j -= 1
search_word.append(ref[i])
else: # I <==> D 변경했음
if s == dist[i - 1][j]: # I: 추가 (왼쪽)
print("삭제:", ref[i - 1])
D += 1
i -= 1
elif s == dist[i - 1][j - 1]: # S: 변경 (왼쪽 위)
print("수정:", ref[i - 1], input[j - 1])
S += 1
i -= 1
j -= 1
elif s == dist[i][j - 1]: # D: 삭제 (위쪽)
print("삽입:", input[j - 1])
I += 1
j -= 1
search_word.append(" ")
H = N - S - D
corr = H / N * 100
acc = (H - I) / N * 100
print("일치하는 글자들")
print(ref)
print(input)
print("".join(reversed(search_word)))
print("-------------------------------------------------------------------------------")
print("WORD: corr={:.2f}%, acc={:.2f}% [H:{}, D:{}, S={}, I={}, N={}]".format(corr, acc, H, D, S, I, N))
print("===============================================================================")
return corr, acc, H, D, S, I, N
|
[
"soul7crystal@gmail.com"
] |
soul7crystal@gmail.com
|
5949ea4733b4a8596f5cd12e5fa04142cea360f1
|
d4999070ab353e7067677aec333d46eb20586606
|
/django_basic/settings.py
|
cc3fbc9987d880c887b7ee13ba0b1e189107194f
|
[] |
no_license
|
rubenqc/basic_django
|
610d101353afc0f041d3b03ee1374699f9a4642d
|
9bc78b0d00ff5a5b8558768efad451087cec44ac
|
refs/heads/main
| 2023-06-10T16:45:44.814831
| 2021-07-04T00:07:06
| 2021-07-04T00:07:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,257
|
py
|
"""
Django settings for django_basic project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-5ok##hp7w+@+)xl!^e$hh=#g5kg_jyd9@p_=lklvh%!fqfv(3m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_basic.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_basic.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"mmacolluncoc@uni.pe"
] |
mmacolluncoc@uni.pe
|
96eb1e8ba12a5a37a9f2fcc5ae4473e1479a4e81
|
22ab14f25c770afcffb74401ff618883485483e8
|
/市场行情爬取/scrapy-pyinstaller/gp/spiders/DailyFunds.py
|
e3403309ab13642a4646bb2bcb50077ec9ff3f2f
|
[] |
no_license
|
nativefans/note
|
c86f5864b07774e9d18332830d4f5d38ce9a5691
|
25e426de22fd72c0569fb1d329ce437b0a26c706
|
refs/heads/master
| 2022-10-28T11:00:30.076231
| 2020-06-21T13:22:43
| 2020-06-21T13:22:43
| 273,837,221
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,113
|
py
|
# -*- coding: utf-8 -*-
import scrapy
import logging
import copy
from datetime import date
from ..items import FundsItem
from scrapy.utils.project import get_project_settings
settings = get_project_settings()
#logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('DailyFundSpider')
class DailyfundsSpider(scrapy.Spider):
name = 'DailyFunds'
allowed_domains = [
'investing.com'
]
def __init__(self):
self.url = [
'https://cn.investing.com/funds/usa-funds?&issuer_filter=0'
]
self.y = date.today().year
self.today = date.today().strftime('%Y-%m-%d')
def start_requests(self):
for each in self.url:
logger.debug(f'现在开始爬取美国每日基金数据------')
yield scrapy.Request(url=each, callback=self.company_parse)
def company_parse(self, response):
content = response.xpath(
'//select[@class="selectBox float_lang_base_2 js-issuer-filter"]/option/text()'
).extract()
list = []
try:
for each in content:
list.append(each)
except Exception as e:
logger.warning(e)
for _company in list[1:]:
_url = f'https://cn.investing.com/funds/usa-funds?&issuer_filter={_company}'
logger.info(f'正爬取 {_company} 的基金数据------')
yield scrapy.Request(url=_url,
meta={'company': copy.deepcopy(_company), 'download_timeout': 30},
callback=self.funds_parse)
def funds_parse(self, response):
_url = 'https://cn.investing.com'
_company = response.meta['company']
_rate = response.xpath('//table[@id="etfs"]/tbody/tr/td[5]/text()').extract()
_date = response.xpath('//table[@id="etfs"]/tbody/tr/td[7]/text()').extract()
_code = response.xpath('//table[@id="etfs"]/tbody/tr/td[@class="left symbol"]/@title').extract()
_fund = response.xpath('//table[@id="etfs"]/tbody/tr/td[@class="bold left noWrap elp plusIconTd"]/span/@data-name').extract()
_href = response.xpath('//table[@id="etfs"]/tbody/tr/td[@class="bold left noWrap elp plusIconTd"]/a/@href').extract()
for i in range(len(_code)):
date = _date[i].split('/')
update_date = f'{self.y}-{date[1]}-{date[0]}'
if _rate != "0.00%" and update_date <= self.today:
url = _url + _href[i]
yield scrapy.Request(url= url,
meta={
'date':copy.deepcopy(update_date),
'code': copy.deepcopy(_code[i]),
'fundName': copy.deepcopy(_fund[i]),
'company': copy.deepcopy(_company),
'download_timeout': 30
},
callback=self.data_parse)
def data_parse(self, response):
item = FundsItem()
_date = response.meta['date']
item['date'] = response.meta['date']
item['code'] = response.meta['code']
item['fundName'] = response.meta['fundName']
_company = response.meta['company']
try:
# if:日期判断
closingPrice = response.xpath('//span[@id="last_last"]/text()').extract_first()
change = response.xpath('//div[@class="top bold inlineblock"]/span[2]/text()').extract_first()
growthRate = response.xpath('//div[@class="top bold inlineblock"]/span[4]/text()').extract_first().replace('%','')
MorningstarRating = str(len(response.xpath('//*[@id="quotes_summary_secondary_data"]/div/ul/li[1]/span[2]/i[@class="morningStarDark"]').extract()))
TotalAssets = response.xpath('//*[@id="quotes_summary_secondary_data"]/div/ul/li[2]/span[2]/text()').extract_first()
OneYearChange = response.xpath('//div[@class="clear overviewDataTable overviewDataTableWithTooltip"]/div[2]/span[2]/text()').extract_first().replace(' ','').replace('%','')
previousClose = response.xpath('//div[@class="clear overviewDataTable overviewDataTableWithTooltip"]/div[3]/span[2]/text()').extract_first()
RiskRating = str(len(response.xpath('//div[@class="clear overviewDataTable overviewDataTableWithTooltip"]/div[4]/span[2]/i[@class="morningStarDark"]').extract()))
TTMYield = response.xpath('//div[@class="clear overviewDataTable overviewDataTableWithTooltip"]/div[5]/span[2]/text()').extract_first().replace('%','')
ROE = response.xpath('//div[@class="clear overviewDataTable overviewDataTableWithTooltip"]/div[6]/span[2]/text()').extract_first().replace('%','')
turnover = response.xpath('//div[@class="clear overviewDataTable overviewDataTableWithTooltip"]/div[8]/span[2]/text()').extract_first().replace('%','')
ROA = response.xpath('//div[@class="clear overviewDataTable overviewDataTableWithTooltip"]/div[9]/span[2]/text()').extract_first().replace('%','')
totalMarketCapitalization = response.xpath('//div[@class="clear overviewDataTable overviewDataTableWithTooltip"]/div[14]/span[2]/text()').extract_first()
YTDFundReturn = response.xpath('//table[@class="genTbl openTbl crossRatesTbl"]/tbody/tr[2]/td[2]/text()').extract_first().replace('%','')
ThreeMonthFundReturn = response.xpath('//table[@class="genTbl openTbl crossRatesTbl"]/tbody/tr[2]/td[3]/text()').extract_first().replace('%','')
OneYearFundReturn = response.xpath('//table[@class="genTbl openTbl crossRatesTbl"]/tbody/tr[2]/td[4]/text()').extract_first().replace('%','')
ThreeYearFundReturn = response.xpath('//table[@class="genTbl openTbl crossRatesTbl"]/tbody/tr[2]/td[5]/text()').extract_first().replace('%','')
FiveYearFundReturn = response.xpath('//table[@class="genTbl openTbl crossRatesTbl"]/tbody/tr[2]/td[6]/text()').extract_first().replace('%','')
item['closingPrice'] = closingPrice
item['previousClose'] = previousClose
item['growthRate'] = growthRate
item['change'] = change
item['OneYearChange'] = OneYearChange
item['turnover'] = turnover
item['MorningstarRating'] = MorningstarRating
item['RiskRating'] = RiskRating
item['TTMYield'] = TTMYield
item['ROE'] = ROE
item['ROA'] = ROA
item['YTDFundReturn'] = YTDFundReturn
item['ThreeMonthFundReturn'] = ThreeMonthFundReturn
item['OneYearFundReturn'] = OneYearFundReturn
item['ThreeYearFundReturn'] = ThreeYearFundReturn
item['FiveYearFundReturn'] = FiveYearFundReturn
item['TotalAssets'] = TotalAssets
item['totalMarketCapitalization'] = totalMarketCapitalization
item['company'] = _company
yield item
except Exception as e:
logger.warning(e)
|
[
"1366124823@qq.com"
] |
1366124823@qq.com
|
f1c0d28721be3c4b319beb10fa3e9e4745ad45a6
|
1cfefe5b50ad7c780c03254cd02bb4319485cf4f
|
/face_adding/core/augimg/augment.py
|
3b49eea08ed80d39e4c93f1f19400e5aacc134de
|
[] |
no_license
|
phamngocquy/face_recognition_server
|
b08d75508654fc2a7589f55cb896f9b2585896e6
|
3263c1cd24494e1f2793e9e3cbe06cd731608de8
|
refs/heads/master
| 2021-09-21T23:44:12.761191
| 2018-09-03T10:36:58
| 2018-09-03T10:36:58
| 146,284,256
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,532
|
py
|
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
import numpy as np
import cv2
import os
from datetime import datetime
from face_adding.utils.config import Config
from face_adding.models import *
class ImgAugment(object):
@staticmethod
def make(path, name):
person = Person.objects.filter(name=name)
if len(person) <= 0:
person = Person(name=name)
person.save()
else:
person = person.last()
img_raw = Image(path=path, person_id=person.id)
img_raw.save()
try:
img = cv2.imread(path)
images = np.array(
[img for _ in range(12)],
dtype=np.uint8
)
seq = iaa.Sequential([
iaa.Affine(
rotate=(0.0, 30),
translate_px=iap.RandomSign(iap.Poisson(3)) # set seed for randomSign
)
])
images_aug = seq.augment_images(images)
store_path = os.path.join(Config.storePath, person.name.replace(' ', ''))
for index, img in enumerate(images_aug):
img_path = "{}/{}.jpg".format(store_path,
person.name.replace(' ', '') + str(datetime.now().microsecond))
cv2.imwrite(img_path, img)
img = Image(path=img_path, person_id=person.id)
img.save()
except IOError:
print("Path not exists!")
|
[
"phamngocquy97@gmail.com"
] |
phamngocquy97@gmail.com
|
836eabd9670091509ba654c4b4d8203fe6124063
|
c56a670ce30216c753e054603c5eef0804ca6866
|
/ros2/kmr_communication/kmr_communication/nodes/camera_node.py
|
7b4609bf834b817b8733bf6ee2efb2c7272b7606
|
[] |
no_license
|
TPK4960-RoboticsAndAutomation-Master/ROS2-ENTITY
|
72b98e7bc4ced15e5746dceb9ff6cac8e9744fde
|
a67f734ecc55654375edaad52debe90bcc64526f
|
refs/heads/main
| 2023-04-20T15:51:25.719852
| 2021-05-04T14:13:00
| 2021-05-04T14:13:00
| 346,657,896
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,697
|
py
|
#!/usr/bin/env python3
import sys
from typing import Callable
import rclpy
import argparse
from std_msgs.msg import String, Float64
from rclpy.node import Node
from rclpy.utilities import remove_ros_args
import subprocess
def cl_red(msge): return '\033[31m' + msge + '\033[0m'
def cl_green(msge): return '\033[32m' + msge + '\033[0m'
class CameraNode(Node):
def __init__(self, robot):
super().__init__('camera_node')
self.name = 'camera_node'
self.robot = robot
self.status = 0
self.declare_parameter('id')
self.id = self.get_parameter('id').value
self.declare_parameter('udp/ip')
self.ip = self.get_parameter('udp/ip').value
self.proc = None
# Subscribers
sub_camera = self.create_subscription(String, 'handle_camera_' + str(self.id), self.handle_camera, 10)
sub_status_check = self.create_subscription(String, 'status_check', self.status_callback, 10)
# Publishers
self.camera_status_publisher = self.create_publisher(String, 'camera_status', 10)
self.publish_status()
def status_callback(self, data):
self.publish_status()
def handle_camera(self, data):
if data.data.lower() == "start" and self.status == 0:
print(cl_green("Starting camera"))
self.proc = subprocess.Popen(["/bin/bash", "kmr_communication/kmr_communication/script/startcamera.sh", self.ip])
self.status = 1
elif data.data.lower() == "stop":
try:
self.status = 0
self.proc.terminate()
self.proc = None
print(cl_green("Stopping camera"))
except AttributeError:
print(cl_red("Camera was never started, therefore never stopped"))
self.publish_status()
def publish_status(self):
msg = String()
msg.data = self.id + ":" + self.robot + ":camera:" + str(self.status) + ":" + str(self.ip) #ip = ip:port
self.camera_status_publisher.publish(msg)
def tear_down(self):
try:
self.destroy_node()
rclpy.shutdown()
print(cl_green("Successfully tore down camera node"))
except:
print(cl_red('Error: ') + "rclpy shutdown failed")
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-ro', '--robot')
args = parser.parse_args(remove_ros_args(args=argv))
while True:
rclpy.init(args=argv)
camera_node = CameraNode(args.robot)
rclpy.spin(camera_node)
if __name__ == '__main__':
main()
|
[
"andreas@arnholm.org"
] |
andreas@arnholm.org
|
03b2f74c8f32bd75544078a14aaba0e73a6f8709
|
e66b0e2461eaabd677e3c60726002533bf7e0d89
|
/serverExempel/funktioner.py
|
a495252ab4721a0c21cc8043b01aa53fd1e10a24
|
[] |
no_license
|
simoneje/projektspel
|
72940f5155bf682b39da3043886aacc6ed809989
|
227ee803459c4d7bdbec60ac1a1f2a60ba115f84
|
refs/heads/master
| 2021-04-14T07:12:19.479431
| 2020-03-27T07:00:41
| 2020-03-27T07:00:41
| 249,215,363
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,907
|
py
|
import numpy as np
import os
import sqlite3
from sqlite3 import Error
from flask import Flask, escape, request, jsonify, render_template
import requests
import time
import sys
RAD = 6
KOLUMN = 7
#Skapar en 2d array utav nollor
def boardCreate(Rad, Kolumn):
board = np.((Rad,Kolumn))
return board
#bestämmer vart objektet/pjäsen släpps
def dropObject(board, row, col, obj):
board[row][col] = obj
#Kollar om kolumnen är tillgänglig för att
def validLoc(board, col):
return board[RAD-1][col] == 0
def getOpenRow(board, col, rad):
for r in range(rad):
if board[r][col] == 0:
return r
def flipBoard(board):
print(np.flip(board, 0))
def dbConnection(db_file):
conn = None
try:
conn = sqlite3.connect(db_file)
except Error:
print(Error)
return conn
def cleanDbTable(dbtable, db_file):
conn = dbConnection(db_file)
sql = f'DELETE FROM {dbtable}'
cur = conn.cursor()
cur.execute(sql)
conn.commit()
def fillBoard(Rad, Kolumn):
try:
p1Move = requests.get('http://127.0.0.1:5000/playermoves1')
p2Move = requests.get('http://127.0.0.1:5000/playermoves2')
except requests.exceptions.ConnectionError:
print('Error connecting to server')
board = boardCreate(RAD, KOLUMN)
p1Movelist = p1Move.json()
p2Movelist = p2Move.json()
if len(p1Movelist) > 0:
while len(p1Movelist) or len(p2Movelist) > 0:
inCol = p1Movelist.pop(0)
if validLoc(board, inCol):
row = getOpenRow(board, inCol, RAD)
dropObject(board, row, inCol, 1)
if victory(board, 1):
print('Game Over!')
flipBoard(board)
time.sleep(2)
print('But BOTH are WINNERS :D')
time.sleep(6)
cleanDbTable('player1moves', 'data.db')
cleanDbTable('player2moves', 'data.db')
cleanDbTable('game', 'data.db')
cleanDbTable('turn', 'data.db')
cleanDbTable('move', 'data.db')
sys.exit()
if len(p2Movelist) > 0:
inCol = p2Movelist.pop(0)
if validLoc(board, inCol):
row = getOpenRow(board, inCol, RAD)
dropObject(board, row, inCol, 2)
if victory(board, 2):
print('Game Over!')
flipBoard(board)
time.sleep(2)
print('But BOTH are WINNERS :D')
time.sleep(6)
cleanDbTable('player1moves', 'data.db')
cleanDbTable('player2moves', 'data.db')
cleanDbTable('game', 'data.db')
cleanDbTable('turn', 'data.db')
cleanDbTable('move', 'data.db')
sys.exit()
else:
pass
return flipBoard(board)
else:
board = boardCreate(RAD, KOLUMN)
return board
def victory(board, piece):
#Kollar horizentalt för vinst
for k in range(KOLUMN-3): #tar bort tre eftersom det går ej att vinna från vissa postioner
for r in range(RAD):
if board[r][k] == piece and board[r][k+1] == piece and board[r][k+2] == piece and board[r][k+3] == piece:
return True
#Kollar vertikalt för vinst
for k in range(KOLUMN):
for r in range(RAD-3): # vi kan inte starta på top raden
if board[r][k] == piece and board[r+1][k] == piece and board[r+2][k] == piece and board[r+3][k] == piece:
return True
#Kollar diagonellt för vinst positiv, måste ha minus 3 på bägge eftersom man inte kan vinna från top 3.
#Denna funktion kollar dem positiva värderna för diagonellt
for k in range(KOLUMN-3):
for r in range(RAD-3):
if board[r][k] == piece and board[r+1][k+1] == piece and board[r+2][k+2] == piece and board[r+3][k+3] == piece:
return True
#Kollar diagonellt för vinst negativt, måste ha
for k in range(KOLUMN-3):
for r in range(3, RAD): #har 3 för tredje indexet på spelplanen som behövs för att kunna få fyra i rad.
if board[r][k] == piece and board[r-1][k+1] == piece and board[r-2][k+2] == piece and board[r-3][k+3] == piece:
return True
# cleanDbTable('player1moves', 'data.db')
# cleanDbTable('player2moves', 'data.db')
# cleanDbTable('game', 'data.db')
# cleanDbTable('turn', 'data.db')
# cleanDbTable('move', 'data.db')
|
[
"noreply@github.com"
] |
simoneje.noreply@github.com
|
904d92dd1f12f043fe448e3d4decd8e93773cea5
|
c297e5e766c65d89d2b62682fccd902bdd0412f1
|
/encoder/fcn8_incepV3_u_net.py
|
6bf1899a31b340f83e269622e03addb1454bd5e9
|
[] |
no_license
|
zmyu/My_kittiseg
|
ea23b8b1f520403ec7d6a1961bfba5132e7182ca
|
4a15c738cb8285cf96a7a3b7f1ee1ae0eead13ed
|
refs/heads/master
| 2021-09-08T07:10:15.084960
| 2018-03-08T07:20:30
| 2018-03-08T07:20:30
| 113,545,809
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 944
|
py
|
"""
Utilize inceptionV3 as encoder.
------------------------
The MIT License (MIT)
Copyright (c) 2017 Marvin Teichmann
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from inception import inception_v3_u_net as inception
import os
import tensorflow.contrib.slim as slim
def inference(hypes, images, train=True):
""".
Args:
images: Images placeholder, from inputs().
train: whether the network is used for train of inference
Returns:
softmax_linear: Output tensor with the computed logits.
"""
with slim.arg_scope(inception.inception_v3_arg_scope()):
_, logit, _ = inception.inception_v3_fcn(images,is_training=train,dropout_keep_prob=hypes['solver']['dropout'])
logits = {}
logits['images'] = images
#TODO this is what we want
logits['fcn_logits'] = logit
return logits
|
[
"zmyu@vw-mobvoi.com"
] |
zmyu@vw-mobvoi.com
|
b92e139730515dc62aed3f13e56bb4ee72213938
|
69ec16d994040399b8167b66e18f456c9419ec16
|
/PyQt5Ex/pyqt2.py
|
b8b6e52f0f366db3e4ab13079a7cd4358ed5bab2
|
[] |
no_license
|
syshinkr/Python-Study
|
e8201701b1df74b31cc8eb04aaedad9061b46cfc
|
5eee7928049e7532a2c31e63a46b1036e5b9fa6d
|
refs/heads/master
| 2020-06-12T06:48:00.723277
| 2019-06-28T06:36:46
| 2019-06-28T06:36:46
| 194,219,844
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
class Window(QtWidgets.QMainWindow):
def __init__(self):
super(Window, self).__init__()
self.setGeometry(50, 50, 300, 300)
self.setWindowTitle('PyQt5')
self.setWindowIcon(QtGui.QIcon('image/bird.png'))
self.show()
app = QtWidgets.QApplication(sys.argv)
GUI = Window()
sys.exit(app.exec_())
|
[
"syshinkr228@gmail.com"
] |
syshinkr228@gmail.com
|
14f8948b312431d771703a511b627c4b06b8c8ea
|
b39778e9a161fab5b42577042b2e66a1e201ee08
|
/v1/accounts/urls.py
|
3fede521463b56e2c9d809cc28c6c1c3f64a32df
|
[
"MIT"
] |
permissive
|
Kenan7/Bank
|
e1f95d05d44f0749c7c90f32b619d4cd2df449ce
|
45e2a558dd725dcc9c9995e03dbc0d02221b3710
|
refs/heads/master
| 2022-11-17T06:27:30.291586
| 2020-07-14T08:01:39
| 2020-07-14T08:01:39
| 279,518,998
| 0
| 0
|
MIT
| 2020-07-14T07:50:39
| 2020-07-14T07:50:38
| null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
from django.urls import path
from .views.account import AccountView
urlpatterns = [
# Accounts
path('accounts', AccountView.as_view()),
]
|
[
"buckyroberts@gmail.com"
] |
buckyroberts@gmail.com
|
c0c175405a61370f1bddbb8daa7d3b347ad91026
|
e86d020f8ade86b86df6ad8590b4458a9d415491
|
/projects/test-crrr/base_auth/utils/org.py
|
36eb99ea5977b0b4d024845975eb2040cc04547e
|
[] |
no_license
|
g842995907/guops-know
|
e4c3b2d47e345db80c27d3ba821a13e6bf7191c3
|
0df4609f3986c8c9ec68188d6304d033e24b24c2
|
refs/heads/master
| 2022-12-05T11:39:48.172661
| 2019-09-05T12:35:32
| 2019-09-05T12:35:32
| 202,976,887
| 1
| 4
| null | 2022-11-22T02:57:53
| 2019-08-18T08:10:05
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,811
|
py
|
import logging
from django.db.models import Q
from base_auth import app_settings
from base_auth.models import User
logger = logging.getLogger(__name__)
def org_level(org):
level = 1
while org.parent:
level += 1
org = org.parent
return level
def get_org_level(user):
if user.is_superuser:
return 0
if user.is_admin:
return 0
if not user.organization:
_illegal_user(user)
return org_level(user.organization)
def can_add_org(operate_user, parent):
if parent:
t_org_level = org_level(parent)
if t_org_level >= app_settings.ORG_DEPTH:
return False
if operate_user.is_superuser:
return True
if not parent:
return False
if operate_user.group != User.Group.ADMIN:
return False
o_org_level = get_org_level(operate_user)
t_org_level = org_level(parent)
o_org = operate_user.organization
t_org = parent
if o_org_level > t_org_level:
return False
else:
while t_org:
if t_org == o_org:
return True
t_org = t_org.parent
return False
def can_operate_org(operate_user, org):
if operate_user.is_superuser:
return True
if operate_user.group != User.Group.ADMIN:
return False
o_org_level = get_org_level(operate_user)
t_org_level = org_level(org)
o_org = operate_user.organization
t_org = org
if o_org_level >= t_org_level:
return False
else:
while t_org.parent:
if t_org.parent == o_org:
return True
t_org = t_org.parent
return False
def can_add_user(operate_user, org, group):
if operate_user.is_superuser:
return True
if not org or not group:
return False
if operate_user.group != User.Group.ADMIN:
return False
o_org_level = get_org_level(operate_user)
t_org_level = org_level(org)
o_org = operate_user.organization
t_org = org
if o_org_level == t_org_level:
return o_org == t_org and group > User.Group.ADMIN
elif o_org_level < t_org_level:
t_org = t_org.parent
while t_org:
if t_org == o_org:
return True
t_org = t_org.parent
return False
else:
return False
def can_operate_user(operate_user, target_user):
if operate_user == target_user:
return True
if operate_user.is_superuser:
return True
if target_user.is_superuser:
return False
if operate_user.group != User.Group.ADMIN:
return False
o_org_level = get_org_level(operate_user)
t_org_level = get_org_level(target_user)
o_org = operate_user.organization
t_org = target_user.organization
if o_org_level == t_org_level:
return o_org == t_org
elif o_org_level < t_org_level:
while t_org.parent:
if t_org.parent == o_org:
return True
t_org = t_org.parent
return False
else:
return False
def _illegal_user(user):
msg = 'illegal user[%s]!' % user.pk
logger.error(msg)
raise Exception(msg)
def get_filter_org_params(user, field=None):
user_org_level = get_org_level(user)
if user_org_level == 0:
return Q()
if field:
base_key = '{}__organization'.format(field)
else:
base_key = 'organization'
params = Q(**{base_key: user.organization})
for i in range(app_settings.ORG_DEPTH - user_org_level):
base_key = '{}{}'.format(base_key, '__parent')
params = params | Q(**{base_key: user.organization})
return params
def filter_org_queryset(user, queryset, field=None):
return queryset.filter(get_filter_org_params(user, field))
|
[
"842995907@qq.com"
] |
842995907@qq.com
|
de358e94f05cb216d81c1af497a11e985cb81554
|
b3aa0ec7bdb6084bff3bcd2652aa123b08ccb7b5
|
/real_deal/api/apps.py
|
656acc8e49f7810bd593c95e59ba3fbc31edf8d3
|
[] |
no_license
|
silverlogic/real-deal-back
|
9e31796f1783dabe1c7b38964fed9732a1ea6842
|
bd45b544a6deb4fcf94b40c12e4fd8014b94fa3b
|
refs/heads/master
| 2021-01-25T11:55:28.701052
| 2017-06-11T10:38:52
| 2017-06-11T10:38:52
| 93,952,483
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 116
|
py
|
from django.apps import AppConfig
class ApiConfig(AppConfig):
name = 'real_deal.api'
verbose_name = 'API'
|
[
"ryanpineo@gmail.com"
] |
ryanpineo@gmail.com
|
1a29da616b6de738a22a6267911bf60a23306322
|
dc616d885cc37e03357de4b03e2857af49bbc68e
|
/numpy/13.py
|
02cb8f7ca5c7f3644b7247a5cb9b8fe80f97b3f6
|
[] |
no_license
|
sergey061102/AltanML
|
fb446b5ef53dd67ace53823132d02ba9f2b3eb38
|
6511cabe1b9b22b5f0215505aac7f4395fd9dfaf
|
refs/heads/master
| 2020-04-29T07:38:18.058319
| 2019-03-20T10:59:55
| 2019-03-20T10:59:55
| 175,960,109
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 97
|
py
|
import numpy as np
Z = np.random.random((10, 10))
Zmin, Zmax = Z.min(), Z.max()
print(Zmin, Zmax)
|
[
"example@example.com"
] |
example@example.com
|
ca757733ef37956f24f3ada5f7ca77951a2acdd8
|
6c26a9bd075d3d54a307d7c1e5a0bc67b50df8c2
|
/python_intermediate/python3/06_try.py
|
90f7c184d4e8c0658a13b89e26d206ffdfe88ef1
|
[] |
no_license
|
marialobillo/dataquest
|
86efc49c0339c07e6263d428b5ecd2f80d395ecb
|
49e8b653adf23a12fb9eb6a972d85bc1797dba0a
|
refs/heads/master
| 2021-08-28T08:01:36.301087
| 2017-12-11T16:02:18
| 2017-12-11T16:02:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 82
|
py
|
try:
float('hello')
except Exception:
print('Error converting to float.')
|
[
"maria.lobillo.santos@gmail.com"
] |
maria.lobillo.santos@gmail.com
|
1d93a85e106b3cb7328abc26ce64275a1eef2658
|
0d3ff1880e0305eec78a2e56be201bb814645a3d
|
/Day 01/Work/fizzbuzz.py
|
99d47825e055f2a443261950c68fbef0570e936a
|
[] |
no_license
|
vijay-lab/FSDP2019
|
1b64ee5dc2c904ecf2f2a64845feaa486bb8e7d7
|
3cba03dc859a3108710cf575d1a400929709b2a8
|
refs/heads/master
| 2020-05-24T08:53:27.677911
| 2019-07-26T13:24:24
| 2019-07-26T13:24:24
| 187,193,086
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 398
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 7 18:14:52 2019
@author: TAPAN
"""
count = 0
while (count< 100):
count = count + 1
if (count%3 == 0 and count%5 == 0 ):
print("FizzBuzz")
continue
elif(count%3 == 0):
print("Fizz")
continue
elif(count%5 == 0):
print("Buzz")
continue
print(count)
|
[
"tapanvijay@outlook.com"
] |
tapanvijay@outlook.com
|
3dc82b9b6ad073e7c515052a34193857fd44cbea
|
ea629f716a83da061b85fbb4c951e0709a731acb
|
/scripts/interf.py
|
4a1b8d3c9b87305626c844701a94d02473bf4082
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
mkarim2017/insarzd
|
5b0c04527574b67d236ef874c8e60b0bceed8c79
|
e7d05f836e7ca044166e38bad549629ed00d71f1
|
refs/heads/master
| 2020-12-22T23:56:09.455132
| 2020-03-30T23:59:47
| 2020-03-30T23:59:47
| 236,969,323
| 0
| 0
|
NOASSERTION
| 2020-01-29T11:43:15
| 2020-01-29T11:43:14
| null |
UTF-8
|
Python
| false
| false
| 1,653
|
py
|
#!/usr/bin/env python3
import os
import sys
import shutil
import argparse
import isce
import isceobj
from crlpac import getWidth
from crlpac import getLength
from crlpac import runCmd
from crlpac import create_xml
def cmdLineParse():
'''
Command line parser.
'''
parser = argparse.ArgumentParser( description='interferometry')
parser.add_argument('-m', '--master', dest='master', type=str, required=True,
help = 'master SLC')
parser.add_argument('-s', '--slave', dest='slave', type=str, required=True,
help = 'resampled slave SLC')
parser.add_argument('-i', '--intf', dest='intf', type=str, required=True,
help = '(output) interferogram')
parser.add_argument('-a', '--amp', dest='amp', type=str, required=True,
help = '(output) amplitudes of master and slave SLCs')
if len(sys.argv) <= 1:
print('')
parser.print_help()
sys.exit(1)
else:
return parser.parse_args()
if __name__ == '__main__':
inps = cmdLineParse()
#get information
masterWidth = getWidth(inps.master + '.xml')
masterLength = getLength(inps.master + '.xml')
#run interf
cmd = "$INSAR_ZERODOP_BIN/interf {} {} {} {} {}".format(inps.master, inps.slave, inps.intf, inps.amp, masterWidth)
#print("{}".format(cmd))
runCmd(cmd)
#get xml file for interferogram
create_xml(inps.intf, masterWidth, masterLength, 'int')
#get xml file for amplitude image
create_xml(inps.amp, masterWidth, masterLength, 'amp')
#./interf.py -m 20130927.slc -s 20141211.slc -i 20130927-20141211.int -a 20130927-20141211.amp
|
[
"cunrenl@caltech.edu"
] |
cunrenl@caltech.edu
|
c2f193958b22b647e6c01429eb9cd83866b0e351
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_070/ch147_2020_04_08_11_05_33_482121.py
|
a1ed04de6fd783e69fc92a8aad6b949ebb31ce09
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 351
|
py
|
def conta_ocorrencias(lista):
dicio = {}
for i in lista:
if i not in dicio:
dicio[i] = 1
else:
dicio[i] +=1
return dicio
def mais_frequente(lista):
x = conta_ocorrencias(lista)
mais = 0
palavra = 0
for i,n in x.items():
if n > mais:
palavra = i
return palavra
|
[
"you@example.com"
] |
you@example.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.