content stringlengths 5 1.05M |
|---|
from cardboard import types
from cardboard.ability import (
AbilityNotImplemented, spell, activated, triggered, static
)
from cardboard.cards import card, common, keywords, match
@card("Lifespinner")
def lifespinner(card, abilities):
def lifespinner():
return AbilityNotImplemented
return lifespinner,
@card("Teardrop Kami")
def teardrop_kami(card, abilities):
def teardrop_kami():
return AbilityNotImplemented
return teardrop_kami,
@card("Ire of Kaminari")
def ire_of_kaminari(card, abilities):
def ire_of_kaminari():
return AbilityNotImplemented
return ire_of_kaminari,
@card("Heartless Hidetsugu")
def heartless_hidetsugu(card, abilities):
def heartless_hidetsugu():
return AbilityNotImplemented
return heartless_hidetsugu,
@card("Quillmane Baku")
def quillmane_baku(card, abilities):
def quillmane_baku():
return AbilityNotImplemented
def quillmane_baku():
return AbilityNotImplemented
return quillmane_baku, quillmane_baku,
@card("That Which Was Taken")
def that_which_was_taken(card, abilities):
def that_which_was_taken():
return AbilityNotImplemented
def that_which_was_taken():
return AbilityNotImplemented
return that_which_was_taken, that_which_was_taken,
@card("Higure, the Still Wind")
def higure_the_still_wind(card, abilities):
def higure_the_still_wind():
return AbilityNotImplemented
def higure_the_still_wind():
return AbilityNotImplemented
def higure_the_still_wind():
return AbilityNotImplemented
return higure_the_still_wind, higure_the_still_wind, higure_the_still_wind,
@card("Ornate Kanzashi")
def ornate_kanzashi(card, abilities):
def ornate_kanzashi():
return AbilityNotImplemented
return ornate_kanzashi,
@card("Soratami Mindsweeper")
def soratami_mindsweeper(card, abilities):
def soratami_mindsweeper():
return AbilityNotImplemented
def soratami_mindsweeper():
return AbilityNotImplemented
return soratami_mindsweeper, soratami_mindsweeper,
@card("Kyoki, Sanity's Eclipse")
def kyoki_sanitys_eclipse(card, abilities):
def kyoki_sanitys_eclipse():
return AbilityNotImplemented
return kyoki_sanitys_eclipse,
@card("Sakiko, Mother of Summer")
def sakiko_mother_of_summer(card, abilities):
def sakiko_mother_of_summer():
return AbilityNotImplemented
return sakiko_mother_of_summer,
@card("Kaijin of the Vanishing Touch")
def kaijin_of_the_vanishing_touch(card, abilities):
def kaijin_of_the_vanishing_touch():
return AbilityNotImplemented
def kaijin_of_the_vanishing_touch():
return AbilityNotImplemented
return kaijin_of_the_vanishing_touch, kaijin_of_the_vanishing_touch,
@card("Mirror Gallery")
def mirror_gallery(card, abilities):
def mirror_gallery():
return AbilityNotImplemented
return mirror_gallery,
@card("Kumano's Blessing")
def kumanos_blessing(card, abilities):
def kumanos_blessing():
return AbilityNotImplemented
def kumanos_blessing():
return AbilityNotImplemented
def kumanos_blessing():
return AbilityNotImplemented
return kumanos_blessing, kumanos_blessing, kumanos_blessing,
@card("Hired Muscle")
def hired_muscle(card, abilities):
def hired_muscle():
return AbilityNotImplemented
def hired_muscle():
return AbilityNotImplemented
def hired_muscle():
return AbilityNotImplemented
return hired_muscle, hired_muscle, hired_muscle,
@card("Tomorrow, Azami's Familiar")
def tomorrow_azamis_familiar(card, abilities):
def tomorrow_azamis_familiar():
return AbilityNotImplemented
return tomorrow_azamis_familiar,
@card("Jaraku the Interloper")
def jaraku_the_interloper(card, abilities):
def jaraku_the_interloper():
return AbilityNotImplemented
def jaraku_the_interloper():
return AbilityNotImplemented
return jaraku_the_interloper, jaraku_the_interloper,
@card("Empty-Shrine Kannushi")
def emptyshrine_kannushi(card, abilities):
def emptyshrine_kannushi():
return AbilityNotImplemented
return emptyshrine_kannushi,
@card("Shirei, Shizo's Caretaker")
def shirei_shizos_caretaker(card, abilities):
def shirei_shizos_caretaker():
return AbilityNotImplemented
return shirei_shizos_caretaker,
@card("Final Judgment")
def final_judgment(card, abilities):
def final_judgment():
return AbilityNotImplemented
return final_judgment,
@card("Call for Blood")
def call_for_blood(card, abilities):
def call_for_blood():
return AbilityNotImplemented
def call_for_blood():
return AbilityNotImplemented
return call_for_blood, call_for_blood,
@card("Roar of Jukai")
def roar_of_jukai(card, abilities):
def roar_of_jukai():
return AbilityNotImplemented
def roar_of_jukai():
return AbilityNotImplemented
return roar_of_jukai, roar_of_jukai,
@card("Minamo Sightbender")
def minamo_sightbender(card, abilities):
def minamo_sightbender():
return AbilityNotImplemented
return minamo_sightbender,
@card("Tendo Ice Bridge")
def tendo_ice_bridge(card, abilities):
def tendo_ice_bridge():
return AbilityNotImplemented
def tendo_ice_bridge():
return AbilityNotImplemented
def tendo_ice_bridge():
return AbilityNotImplemented
return tendo_ice_bridge, tendo_ice_bridge, tendo_ice_bridge,
@card("Stream of Consciousness")
def stream_of_consciousness(card, abilities):
def stream_of_consciousness():
return AbilityNotImplemented
return stream_of_consciousness,
@card("Ogre Marauder")
def ogre_marauder(card, abilities):
def ogre_marauder():
return AbilityNotImplemented
return ogre_marauder,
@card("Veil of Secrecy")
def veil_of_secrecy(card, abilities):
def veil_of_secrecy():
return AbilityNotImplemented
def veil_of_secrecy():
return AbilityNotImplemented
return veil_of_secrecy, veil_of_secrecy,
@card("Sway of the Stars")
def sway_of_the_stars(card, abilities):
def sway_of_the_stars():
return AbilityNotImplemented
return sway_of_the_stars,
@card("Psychic Spear")
def psychic_spear(card, abilities):
def psychic_spear():
return AbilityNotImplemented
return psychic_spear,
@card("Petalmane Baku")
def petalmane_baku(card, abilities):
def petalmane_baku():
return AbilityNotImplemented
def petalmane_baku():
return AbilityNotImplemented
return petalmane_baku, petalmane_baku,
@card("Genju of the Spires")
def genju_of_the_spires(card, abilities):
def genju_of_the_spires():
return AbilityNotImplemented
def genju_of_the_spires():
return AbilityNotImplemented
def genju_of_the_spires():
return AbilityNotImplemented
return genju_of_the_spires, genju_of_the_spires, genju_of_the_spires,
@card("Mannichi, the Fevered Dream")
def mannichi_the_fevered_dream(card, abilities):
def mannichi_the_fevered_dream():
return AbilityNotImplemented
return mannichi_the_fevered_dream,
@card("Faithful Squire")
def faithful_squire(card, abilities):
def faithful_squire():
return AbilityNotImplemented
def faithful_squire():
return AbilityNotImplemented
def faithful_squire():
return AbilityNotImplemented
return faithful_squire, faithful_squire, faithful_squire,
@card("Oyobi, Who Split the Heavens")
def oyobi_who_split_the_heavens(card, abilities):
def oyobi_who_split_the_heavens():
return AbilityNotImplemented
def oyobi_who_split_the_heavens():
return AbilityNotImplemented
return oyobi_who_split_the_heavens, oyobi_who_split_the_heavens,
@card("Body of Jukai")
def body_of_jukai(card, abilities):
def body_of_jukai():
return AbilityNotImplemented
def body_of_jukai():
return AbilityNotImplemented
return body_of_jukai, body_of_jukai,
@card("Budoka Pupil")
def budoka_pupil(card, abilities):
def budoka_pupil():
return AbilityNotImplemented
def budoka_pupil():
return AbilityNotImplemented
def budoka_pupil():
return AbilityNotImplemented
return budoka_pupil, budoka_pupil, budoka_pupil,
@card("Ward of Piety")
def ward_of_piety(card, abilities):
def ward_of_piety():
return AbilityNotImplemented
def ward_of_piety():
return AbilityNotImplemented
return ward_of_piety, ward_of_piety,
@card("Clash of Realities")
def clash_of_realities(card, abilities):
def clash_of_realities():
return AbilityNotImplemented
def clash_of_realities():
return AbilityNotImplemented
return clash_of_realities, clash_of_realities,
@card("Crack the Earth")
def crack_the_earth(card, abilities):
def crack_the_earth():
return AbilityNotImplemented
return crack_the_earth,
@card("Patron of the Kitsune")
def patron_of_the_kitsune(card, abilities):
def patron_of_the_kitsune():
return AbilityNotImplemented
def patron_of_the_kitsune():
return AbilityNotImplemented
return patron_of_the_kitsune, patron_of_the_kitsune,
@card("Bile Urchin")
def bile_urchin(card, abilities):
def bile_urchin():
return AbilityNotImplemented
return bile_urchin,
@card("Genju of the Fens")
def genju_of_the_fens(card, abilities):
def genju_of_the_fens():
return AbilityNotImplemented
def genju_of_the_fens():
return AbilityNotImplemented
def genju_of_the_fens():
return AbilityNotImplemented
return genju_of_the_fens, genju_of_the_fens, genju_of_the_fens,
@card("Shuko")
def shuko(card, abilities):
def shuko():
return AbilityNotImplemented
def shuko():
return AbilityNotImplemented
return shuko, shuko,
@card("Shuriken")
def shuriken(card, abilities):
def shuriken():
return AbilityNotImplemented
def shuriken():
return AbilityNotImplemented
return shuriken, shuriken,
@card("Child of Thorns")
def child_of_thorns(card, abilities):
def child_of_thorns():
return AbilityNotImplemented
return child_of_thorns,
@card("Goryo's Vengeance")
def goryos_vengeance(card, abilities):
def goryos_vengeance():
return AbilityNotImplemented
def goryos_vengeance():
return AbilityNotImplemented
return goryos_vengeance, goryos_vengeance,
@card("Ink-Eyes, Servant of Oni")
def inkeyes_servant_of_oni(card, abilities):
def inkeyes_servant_of_oni():
return AbilityNotImplemented
def inkeyes_servant_of_oni():
return AbilityNotImplemented
def inkeyes_servant_of_oni():
return AbilityNotImplemented
return inkeyes_servant_of_oni, inkeyes_servant_of_oni, inkeyes_servant_of_oni,
@card("Kodama of the Center Tree")
def kodama_of_the_center_tree(card, abilities):
def kodama_of_the_center_tree():
return AbilityNotImplemented
def kodama_of_the_center_tree():
return AbilityNotImplemented
return kodama_of_the_center_tree, kodama_of_the_center_tree,
@card("Hokori, Dust Drinker")
def hokori_dust_drinker(card, abilities):
def hokori_dust_drinker():
return AbilityNotImplemented
def hokori_dust_drinker():
return AbilityNotImplemented
return hokori_dust_drinker, hokori_dust_drinker,
@card("Ribbons of the Reikai")
def ribbons_of_the_reikai(card, abilities):
def ribbons_of_the_reikai():
return AbilityNotImplemented
return ribbons_of_the_reikai,
@card("Day of Destiny")
def day_of_destiny(card, abilities):
def day_of_destiny():
return AbilityNotImplemented
return day_of_destiny,
@card("Heed the Mists")
def heed_the_mists(card, abilities):
def heed_the_mists():
return AbilityNotImplemented
return heed_the_mists,
@card("Iwamori of the Open Fist")
def iwamori_of_the_open_fist(card, abilities):
def iwamori_of_the_open_fist():
return AbilityNotImplemented
def iwamori_of_the_open_fist():
return AbilityNotImplemented
return iwamori_of_the_open_fist, iwamori_of_the_open_fist,
@card("Heart of Light")
def heart_of_light(card, abilities):
def heart_of_light():
return AbilityNotImplemented
def heart_of_light():
return AbilityNotImplemented
return heart_of_light, heart_of_light,
@card("Frostling")
def frostling(card, abilities):
def frostling():
return AbilityNotImplemented
return frostling,
@card("Toshiro Umezawa")
def toshiro_umezawa(card, abilities):
def toshiro_umezawa():
return AbilityNotImplemented
def toshiro_umezawa():
return AbilityNotImplemented
return toshiro_umezawa, toshiro_umezawa,
@card("Pus Kami")
def pus_kami(card, abilities):
def pus_kami():
return AbilityNotImplemented
def pus_kami():
return AbilityNotImplemented
return pus_kami, pus_kami,
@card("Uproot")
def uproot(card, abilities):
def uproot():
return AbilityNotImplemented
return uproot,
@card("Shizuko, Caller of Autumn")
def shizuko_caller_of_autumn(card, abilities):
def shizuko_caller_of_autumn():
return AbilityNotImplemented
return shizuko_caller_of_autumn,
@card("Fumiko the Lowblood")
def fumiko_the_lowblood(card, abilities):
def fumiko_the_lowblood():
return AbilityNotImplemented
def fumiko_the_lowblood():
return AbilityNotImplemented
return fumiko_the_lowblood, fumiko_the_lowblood,
@card("Blademane Baku")
def blademane_baku(card, abilities):
def blademane_baku():
return AbilityNotImplemented
def blademane_baku():
return AbilityNotImplemented
return blademane_baku, blademane_baku,
@card("Mending Hands")
def mending_hands(card, abilities):
def mending_hands():
return AbilityNotImplemented
return mending_hands,
@card("Patron of the Akki")
def patron_of_the_akki(card, abilities):
def patron_of_the_akki():
return AbilityNotImplemented
def patron_of_the_akki():
return AbilityNotImplemented
return patron_of_the_akki, patron_of_the_akki,
@card("Goblin Cohort")
def goblin_cohort(card, abilities):
def goblin_cohort():
return AbilityNotImplemented
return goblin_cohort,
@card("Patron of the Nezumi")
def patron_of_the_nezumi(card, abilities):
def patron_of_the_nezumi():
return AbilityNotImplemented
def patron_of_the_nezumi():
return AbilityNotImplemented
return patron_of_the_nezumi, patron_of_the_nezumi,
@card("Genju of the Fields")
def genju_of_the_fields(card, abilities):
def genju_of_the_fields():
return AbilityNotImplemented
def genju_of_the_fields():
return AbilityNotImplemented
def genju_of_the_fields():
return AbilityNotImplemented
return genju_of_the_fields, genju_of_the_fields, genju_of_the_fields,
@card("Crawling Filth")
def crawling_filth(card, abilities):
def crawling_filth():
return AbilityNotImplemented
def crawling_filth():
return AbilityNotImplemented
return crawling_filth, crawling_filth,
@card("Kitsune Palliator")
def kitsune_palliator(card, abilities):
def kitsune_palliator():
return AbilityNotImplemented
return kitsune_palliator,
@card("Blessing of Leeches")
def blessing_of_leeches(card, abilities):
def blessing_of_leeches():
return AbilityNotImplemented
def blessing_of_leeches():
return AbilityNotImplemented
def blessing_of_leeches():
return AbilityNotImplemented
def blessing_of_leeches():
return AbilityNotImplemented
return blessing_of_leeches, blessing_of_leeches, blessing_of_leeches, blessing_of_leeches,
@card("Chisei, Heart of Oceans")
def chisei_heart_of_oceans(card, abilities):
def chisei_heart_of_oceans():
return AbilityNotImplemented
def chisei_heart_of_oceans():
return AbilityNotImplemented
return chisei_heart_of_oceans, chisei_heart_of_oceans,
@card("Mark of Sakiko")
def mark_of_sakiko(card, abilities):
def mark_of_sakiko():
return AbilityNotImplemented
def mark_of_sakiko():
return AbilityNotImplemented
return mark_of_sakiko, mark_of_sakiko,
@card("Kami of the Honored Dead")
def kami_of_the_honored_dead(card, abilities):
def kami_of_the_honored_dead():
return AbilityNotImplemented
def kami_of_the_honored_dead():
return AbilityNotImplemented
def kami_of_the_honored_dead():
return AbilityNotImplemented
return kami_of_the_honored_dead, kami_of_the_honored_dead, kami_of_the_honored_dead,
@card("Forked-Branch Garami")
def forkedbranch_garami(card, abilities):
def forkedbranch_garami():
return AbilityNotImplemented
return forkedbranch_garami,
@card("Isao, Enlightened Bushi")
def isao_enlightened_bushi(card, abilities):
def isao_enlightened_bushi():
return AbilityNotImplemented
def isao_enlightened_bushi():
return AbilityNotImplemented
def isao_enlightened_bushi():
return AbilityNotImplemented
return isao_enlightened_bushi, isao_enlightened_bushi, isao_enlightened_bushi,
@card("In the Web of War")
def in_the_web_of_war(card, abilities):
def in_the_web_of_war():
return AbilityNotImplemented
return in_the_web_of_war,
@card("Mark of the Oni")
def mark_of_the_oni(card, abilities):
def mark_of_the_oni():
return AbilityNotImplemented
def mark_of_the_oni():
return AbilityNotImplemented
def mark_of_the_oni():
return AbilityNotImplemented
return mark_of_the_oni, mark_of_the_oni, mark_of_the_oni,
@card("Mistblade Shinobi")
def mistblade_shinobi(card, abilities):
def mistblade_shinobi():
return AbilityNotImplemented
def mistblade_shinobi():
return AbilityNotImplemented
return mistblade_shinobi, mistblade_shinobi,
@card("Throat Slitter")
def throat_slitter(card, abilities):
def throat_slitter():
return AbilityNotImplemented
def throat_slitter():
return AbilityNotImplemented
return throat_slitter, throat_slitter,
@card("Indebted Samurai")
def indebted_samurai(card, abilities):
def indebted_samurai():
return AbilityNotImplemented
def indebted_samurai():
return AbilityNotImplemented
return indebted_samurai, indebted_samurai,
@card("Terashi's Verdict")
def terashis_verdict(card, abilities):
def terashis_verdict():
return AbilityNotImplemented
return terashis_verdict,
@card("Horobi's Whisper")
def horobis_whisper(card, abilities):
def horobis_whisper():
return AbilityNotImplemented
def horobis_whisper():
return AbilityNotImplemented
return horobis_whisper, horobis_whisper,
@card("Opal-Eye, Konda's Yojimbo")
def opaleye_kondas_yojimbo(card, abilities):
def opaleye_kondas_yojimbo():
return AbilityNotImplemented
def opaleye_kondas_yojimbo():
return AbilityNotImplemented
def opaleye_kondas_yojimbo():
return AbilityNotImplemented
def opaleye_kondas_yojimbo():
return AbilityNotImplemented
return opaleye_kondas_yojimbo, opaleye_kondas_yojimbo, opaleye_kondas_yojimbo, opaleye_kondas_yojimbo,
@card("Scarmaker")
def scarmaker(card, abilities):
def scarmaker():
return AbilityNotImplemented
def scarmaker():
return AbilityNotImplemented
return scarmaker, scarmaker,
@card("Neko-Te")
def nekote(card, abilities):
def nekote():
return AbilityNotImplemented
def nekote():
return AbilityNotImplemented
def nekote():
return AbilityNotImplemented
return nekote, nekote, nekote,
@card("Genju of the Falls")
def genju_of_the_falls(card, abilities):
def genju_of_the_falls():
return AbilityNotImplemented
def genju_of_the_falls():
return AbilityNotImplemented
def genju_of_the_falls():
return AbilityNotImplemented
return genju_of_the_falls, genju_of_the_falls, genju_of_the_falls,
@card("Reduce to Dreams")
def reduce_to_dreams(card, abilities):
def reduce_to_dreams():
return AbilityNotImplemented
return reduce_to_dreams,
@card("Genju of the Cedars")
def genju_of_the_cedars(card, abilities):
def genju_of_the_cedars():
return AbilityNotImplemented
def genju_of_the_cedars():
return AbilityNotImplemented
def genju_of_the_cedars():
return AbilityNotImplemented
return genju_of_the_cedars, genju_of_the_cedars, genju_of_the_cedars,
@card("Toils of Night and Day")
def toils_of_night_and_day(card, abilities):
def toils_of_night_and_day():
return AbilityNotImplemented
return toils_of_night_and_day,
@card("Hundred-Talon Strike")
def hundredtalon_strike(card, abilities):
def hundredtalon_strike():
return AbilityNotImplemented
def hundredtalon_strike():
return AbilityNotImplemented
return hundredtalon_strike, hundredtalon_strike,
@card("Callow Jushi")
def callow_jushi(card, abilities):
def callow_jushi():
return AbilityNotImplemented
def callow_jushi():
return AbilityNotImplemented
def callow_jushi():
return AbilityNotImplemented
return callow_jushi, callow_jushi, callow_jushi,
@card("Yukora, the Prisoner")
def yukora_the_prisoner(card, abilities):
def yukora_the_prisoner():
return AbilityNotImplemented
return yukora_the_prisoner,
@card("Skullsnatcher")
def skullsnatcher(card, abilities):
def skullsnatcher():
return AbilityNotImplemented
def skullsnatcher():
return AbilityNotImplemented
return skullsnatcher, skullsnatcher,
@card("Umezawa's Jitte")
def umezawas_jitte(card, abilities):
def umezawas_jitte():
return AbilityNotImplemented
def umezawas_jitte():
return AbilityNotImplemented
def umezawas_jitte():
return AbilityNotImplemented
return umezawas_jitte, umezawas_jitte, umezawas_jitte,
@card("Baku Altar")
def baku_altar(card, abilities):
def baku_altar():
return AbilityNotImplemented
def baku_altar():
return AbilityNotImplemented
return baku_altar, baku_altar,
@card("Shinka Gatekeeper")
def shinka_gatekeeper(card, abilities):
def shinka_gatekeeper():
return AbilityNotImplemented
return shinka_gatekeeper,
@card("Ogre Recluse")
def ogre_recluse(card, abilities):
def ogre_recluse():
return AbilityNotImplemented
return ogre_recluse,
@card("Matsu-Tribe Sniper")
def matsutribe_sniper(card, abilities):
def matsutribe_sniper():
return AbilityNotImplemented
def matsutribe_sniper():
return AbilityNotImplemented
return matsutribe_sniper, matsutribe_sniper,
@card("Disrupting Shoal")
def disrupting_shoal(card, abilities):
def disrupting_shoal():
return AbilityNotImplemented
def disrupting_shoal():
return AbilityNotImplemented
return disrupting_shoal, disrupting_shoal,
@card("Minamo's Meddling")
def minamos_meddling(card, abilities):
def minamos_meddling():
return AbilityNotImplemented
return minamos_meddling,
@card("Akki Blizzard-Herder")
def akki_blizzardherder(card, abilities):
def akki_blizzardherder():
return AbilityNotImplemented
return akki_blizzardherder,
@card("Ishi-Ishi, Akki Crackshot")
def ishiishi_akki_crackshot(card, abilities):
def ishiishi_akki_crackshot():
return AbilityNotImplemented
return ishiishi_akki_crackshot,
@card("Nezumi Shadow-Watcher")
def nezumi_shadowwatcher(card, abilities):
def nezumi_shadowwatcher():
return AbilityNotImplemented
return nezumi_shadowwatcher,
@card("Azamuki, Treachery Incarnate")
def azamuki_treachery_incarnate(card, abilities):
def azamuki_treachery_incarnate():
return AbilityNotImplemented
def azamuki_treachery_incarnate():
return AbilityNotImplemented
return azamuki_treachery_incarnate, azamuki_treachery_incarnate,
@card("Ninja of the Deep Hours")
def ninja_of_the_deep_hours(card, abilities):
def ninja_of_the_deep_hours():
return AbilityNotImplemented
def ninja_of_the_deep_hours():
return AbilityNotImplemented
return ninja_of_the_deep_hours, ninja_of_the_deep_hours,
@card("Slumbering Tora")
def slumbering_tora(card, abilities):
def slumbering_tora():
return AbilityNotImplemented
return slumbering_tora,
@card("Vital Surge")
def vital_surge(card, abilities):
def vital_surge():
return AbilityNotImplemented
def vital_surge():
return AbilityNotImplemented
return vital_surge, vital_surge,
@card("Traproot Kami")
def traproot_kami(card, abilities):
def traproot_kami():
return AbilityNotImplemented
def traproot_kami():
return AbilityNotImplemented
return traproot_kami, traproot_kami,
@card("Split-Tail Miko")
def splittail_miko(card, abilities):
def splittail_miko():
return AbilityNotImplemented
return splittail_miko,
@card("Sakura-Tribe Springcaller")
def sakuratribe_springcaller(card, abilities):
def sakuratribe_springcaller():
return AbilityNotImplemented
return sakuratribe_springcaller,
@card("Kami of Tattered Shoji")
def kami_of_tattered_shoji(card, abilities):
def kami_of_tattered_shoji():
return AbilityNotImplemented
return kami_of_tattered_shoji,
@card("Stir the Grave")
def stir_the_grave(card, abilities):
def stir_the_grave():
return AbilityNotImplemented
return stir_the_grave,
@card("Patron of the Orochi")
def patron_of_the_orochi(card, abilities):
def patron_of_the_orochi():
return AbilityNotImplemented
def patron_of_the_orochi():
return AbilityNotImplemented
return patron_of_the_orochi, patron_of_the_orochi,
@card("Hero's Demise")
def heros_demise(card, abilities):
def heros_demise():
return AbilityNotImplemented
return heros_demise,
@card("Flames of the Blood Hand")
def flames_of_the_blood_hand(card, abilities):
def flames_of_the_blood_hand():
return AbilityNotImplemented
return flames_of_the_blood_hand,
@card("Unchecked Growth")
def unchecked_growth(card, abilities):
def unchecked_growth():
return AbilityNotImplemented
return unchecked_growth,
@card("First Volley")
def first_volley(card, abilities):
def first_volley():
return AbilityNotImplemented
return first_volley,
@card("Lifegift")
def lifegift(card, abilities):
def lifegift():
return AbilityNotImplemented
return lifegift,
@card("Gods' Eye, Gate to the Reikai")
def gods_eye_gate_to_the_reikai(card, abilities):
def gods_eye_gate_to_the_reikai():
return AbilityNotImplemented
def gods_eye_gate_to_the_reikai():
return AbilityNotImplemented
return gods_eye_gate_to_the_reikai, gods_eye_gate_to_the_reikai,
@card("Scourge of Numai")
def scourge_of_numai(card, abilities):
def scourge_of_numai():
return AbilityNotImplemented
return scourge_of_numai,
@card("Takenuma Bleeder")
def takenuma_bleeder(card, abilities):
def takenuma_bleeder():
return AbilityNotImplemented
return takenuma_bleeder,
@card("Sickening Shoal")
def sickening_shoal(card, abilities):
def sickening_shoal():
return AbilityNotImplemented
def sickening_shoal():
return AbilityNotImplemented
return sickening_shoal, sickening_shoal,
@card("Aura Barbs")
def aura_barbs(card, abilities):
def aura_barbs():
return AbilityNotImplemented
return aura_barbs,
@card("Tallowisp")
def tallowisp(card, abilities):
def tallowisp():
return AbilityNotImplemented
return tallowisp,
@card("Skullmane Baku")
def skullmane_baku(card, abilities):
def skullmane_baku():
return AbilityNotImplemented
def skullmane_baku():
return AbilityNotImplemented
return skullmane_baku, skullmane_baku,
@card("Shimmering Glasskite")
def shimmering_glasskite(card, abilities):
def shimmering_glasskite():
return AbilityNotImplemented
def shimmering_glasskite():
return AbilityNotImplemented
return shimmering_glasskite, shimmering_glasskite,
@card("Twist Allegiance")
def twist_allegiance(card, abilities):
def twist_allegiance():
return AbilityNotImplemented
return twist_allegiance,
@card("Okiba-Gang Shinobi")
def okibagang_shinobi(card, abilities):
def okibagang_shinobi():
return AbilityNotImplemented
def okibagang_shinobi():
return AbilityNotImplemented
return okibagang_shinobi, okibagang_shinobi,
@card("Scaled Hulk")
def scaled_hulk(card, abilities):
def scaled_hulk():
return AbilityNotImplemented
return scaled_hulk,
@card("Akki Raider")
def akki_raider(card, abilities):
def akki_raider():
return AbilityNotImplemented
return akki_raider,
@card("Ronin Warclub")
def ronin_warclub(card, abilities):
def ronin_warclub():
return AbilityNotImplemented
def ronin_warclub():
return AbilityNotImplemented
def ronin_warclub():
return AbilityNotImplemented
return ronin_warclub, ronin_warclub, ronin_warclub,
@card("Ashen Monstrosity")
def ashen_monstrosity(card, abilities):
def ashen_monstrosity():
return AbilityNotImplemented
def ashen_monstrosity():
return AbilityNotImplemented
return ashen_monstrosity, ashen_monstrosity,
@card("Jetting Glasskite")
def jetting_glasskite(card, abilities):
def jetting_glasskite():
return AbilityNotImplemented
def jetting_glasskite():
return AbilityNotImplemented
return jetting_glasskite, jetting_glasskite,
@card("Three Tragedies")
def three_tragedies(card, abilities):
def three_tragedies():
return AbilityNotImplemented
return three_tragedies,
@card("Kami of False Hope")
def kami_of_false_hope(card, abilities):
def kami_of_false_hope():
return AbilityNotImplemented
return kami_of_false_hope,
@card("Overblaze")
def overblaze(card, abilities):
def overblaze():
return AbilityNotImplemented
def overblaze():
return AbilityNotImplemented
return overblaze, overblaze,
@card("Moonlit Strider")
def moonlit_strider(card, abilities):
def moonlit_strider():
return AbilityNotImplemented
def moonlit_strider():
return AbilityNotImplemented
return moonlit_strider, moonlit_strider,
@card("Ronin Cliffrider")
def ronin_cliffrider(card, abilities):
def ronin_cliffrider():
return AbilityNotImplemented
def ronin_cliffrider():
return AbilityNotImplemented
return ronin_cliffrider, ronin_cliffrider,
@card("Waxmane Baku")
def waxmane_baku(card, abilities):
def waxmane_baku():
return AbilityNotImplemented
def waxmane_baku():
return AbilityNotImplemented
return waxmane_baku, waxmane_baku,
@card("Kaiso, Memory of Loyalty")
def kaiso_memory_of_loyalty(card, abilities):
def kaiso_memory_of_loyalty():
return AbilityNotImplemented
def kaiso_memory_of_loyalty():
return AbilityNotImplemented
def kaiso_memory_of_loyalty():
return AbilityNotImplemented
return kaiso_memory_of_loyalty, kaiso_memory_of_loyalty, kaiso_memory_of_loyalty,
@card("Orb of Dreams")
def orb_of_dreams(card, abilities):
def orb_of_dreams():
return AbilityNotImplemented
return orb_of_dreams,
@card("Sosuke's Summons")
def sosukes_summons(card, abilities):
def sosukes_summons():
return AbilityNotImplemented
def sosukes_summons():
return AbilityNotImplemented
return sosukes_summons, sosukes_summons,
@card("Blazing Shoal")
def blazing_shoal(card, abilities):
def blazing_shoal():
return AbilityNotImplemented
def blazing_shoal():
return AbilityNotImplemented
return blazing_shoal, blazing_shoal,
@card("Torrent of Stone")
def torrent_of_stone(card, abilities):
def torrent_of_stone():
return AbilityNotImplemented
def torrent_of_stone():
return AbilityNotImplemented
return torrent_of_stone, torrent_of_stone,
@card("Kentaro, the Smiling Cat")
def kentaro_the_smiling_cat(card, abilities):
def kentaro_the_smiling_cat():
return AbilityNotImplemented
def kentaro_the_smiling_cat():
return AbilityNotImplemented
return kentaro_the_smiling_cat, kentaro_the_smiling_cat,
@card("Terashi's Grasp")
def terashis_grasp(card, abilities):
def terashis_grasp():
return AbilityNotImplemented
return terashis_grasp,
@card("Shining Shoal")
def shining_shoal(card, abilities):
def shining_shoal():
return AbilityNotImplemented
def shining_shoal():
return AbilityNotImplemented
return shining_shoal, shining_shoal,
@card("Nourishing Shoal")
def nourishing_shoal(card, abilities):
def nourishing_shoal():
return AbilityNotImplemented
def nourishing_shoal():
return AbilityNotImplemented
return nourishing_shoal, nourishing_shoal,
@card("Genju of the Realm")
def genju_of_the_realm(card, abilities):
def genju_of_the_realm():
return AbilityNotImplemented
def genju_of_the_realm():
return AbilityNotImplemented
def genju_of_the_realm():
return AbilityNotImplemented
return genju_of_the_realm, genju_of_the_realm, genju_of_the_realm,
@card("Ichiga, Who Topples Oaks")
def ichiga_who_topples_oaks(card, abilities):
def ichiga_who_topples_oaks():
return AbilityNotImplemented
def ichiga_who_topples_oaks():
return AbilityNotImplemented
def ichiga_who_topples_oaks():
return AbilityNotImplemented
return ichiga_who_topples_oaks, ichiga_who_topples_oaks, ichiga_who_topples_oaks,
@card("Cunning Bandit")
def cunning_bandit(card, abilities):
def cunning_bandit():
return AbilityNotImplemented
def cunning_bandit():
return AbilityNotImplemented
def cunning_bandit():
return AbilityNotImplemented
return cunning_bandit, cunning_bandit, cunning_bandit,
@card("Yomiji, Who Bars the Way")
def yomiji_who_bars_the_way(card, abilities):
def yomiji_who_bars_the_way():
return AbilityNotImplemented
return yomiji_who_bars_the_way,
@card("Silverstorm Samurai")
def silverstorm_samurai(card, abilities):
def silverstorm_samurai():
return AbilityNotImplemented
def silverstorm_samurai():
return AbilityNotImplemented
return silverstorm_samurai, silverstorm_samurai,
@card("Enshrined Memories")
def enshrined_memories(card, abilities):
def enshrined_memories():
return AbilityNotImplemented
return enshrined_memories,
@card("Kira, Great Glass-Spinner")
def kira_great_glassspinner(card, abilities):
def kira_great_glassspinner():
return AbilityNotImplemented
def kira_great_glassspinner():
return AbilityNotImplemented
return kira_great_glassspinner, kira_great_glassspinner,
@card("Blinding Powder")
def blinding_powder(card, abilities):
def blinding_powder():
return AbilityNotImplemented
def blinding_powder():
return AbilityNotImplemented
return blinding_powder, blinding_powder,
@card("Harbinger of Spring")
def harbinger_of_spring(card, abilities):
def harbinger_of_spring():
return AbilityNotImplemented
def harbinger_of_spring():
return AbilityNotImplemented
return harbinger_of_spring, harbinger_of_spring,
@card("Loam Dweller")
def loam_dweller(card, abilities):
def loam_dweller():
return AbilityNotImplemented
return loam_dweller,
@card("Floodbringer")
def floodbringer(card, abilities):
def floodbringer():
return AbilityNotImplemented
def floodbringer():
return AbilityNotImplemented
return floodbringer, floodbringer,
@card("Walker of Secret Ways")
def walker_of_secret_ways(card, abilities):
def walker_of_secret_ways():
return AbilityNotImplemented
def walker_of_secret_ways():
return AbilityNotImplemented
def walker_of_secret_ways():
return AbilityNotImplemented
return walker_of_secret_ways, walker_of_secret_ways, walker_of_secret_ways,
@card("Takeno's Cavalry")
def takenos_cavalry(card, abilities):
def takenos_cavalry():
return AbilityNotImplemented
def takenos_cavalry():
return AbilityNotImplemented
return takenos_cavalry, takenos_cavalry,
@card("Patron of the Moon")
def patron_of_the_moon(card, abilities):
def patron_of_the_moon():
return AbilityNotImplemented
def patron_of_the_moon():
return AbilityNotImplemented
def patron_of_the_moon():
return AbilityNotImplemented
return patron_of_the_moon, patron_of_the_moon, patron_of_the_moon,
@card("Threads of Disloyalty")
def threads_of_disloyalty(card, abilities):
def threads_of_disloyalty():
return AbilityNotImplemented
def threads_of_disloyalty():
return AbilityNotImplemented
return threads_of_disloyalty, threads_of_disloyalty, |
import subprocess
import requests
import spacy
import sys
import json
import os
import glob
import geocoder
from fuzzywuzzy import process, fuzz
from sklearn.cluster import AgglomerativeClustering
import numpy as np
ACCEPTED_TAGS = ["GPE", "LOC"]
CMD_TEMPLATE = "./runGeoParse.sh"
GEONAME_URL = "http://api.geonames.org/hierarchyJSON?geonameId={}&username=ngds_adept&style=full"
FUZZY_SIMILARITY_THRESHOLD = 0.85
NUM_CLUSTERS_PERCENT = 0.2
LOCATION_SIZE_THRESHOLD = 0.75
DEBUG = False
if "DEBUG" in os.environ:
if os.environ["DEBUG"].lower() == "true":
DEBUG = True
"""
TEMPORARY NOTES:
Currently: given the entities in a document:
1. will fuzzy string match the geoparse results and filter out the strings
that aren't close to the original term.
2. requests hierarchy of each remaining location (results from geoparse)
3. clusters the locations based on their continent and filters all but the
largest continents (hit based, not size)
4. clusters the locations based on their country and filters all but the
largest countries (hit based, not size)
5. clusters remaining results based on physical location
6. for each (remaining) entity that was found in the document, choose
geoparse result that belongs to the largest cluster, then remove
all other occurances before checking for the next result
TODO:
1. for the US, bring filtering down to the state level
2. do something different with multiword queries -- match each individual word?
3. get some benchmark results
4. write up for Andrew
"""
class NER:
def debug(self, msg):
if DEBUG: print(f"[DEBUG] {msg}\n")
def __init__(self, files_location: str, output_path: str):
self.nlp = spacy.load("en_core_web_trf")
self.files_location = files_location
self.output_path = output_path
self.load_docs()
document_entities = self.tag_entities()
self.run_geonorm(document_entities)
def load_docs(self):
print("Loading docs...")
self.txt_docs = {}
for file in os.listdir(self.files_location):
if file.endswith(".txt") or file.endswith("text"):
f = open(self.files_location + file, "r", encoding="utf8")
self.txt_docs[file] = '\n'.join(f.readlines())
f.close()
self.debug("file contents: " + str(self.txt_docs[file]))
print("Docs loaded.")
"""
Collects all of the entities for each doc and stores in a dictionary
returns: a dictionary mapping the document name to a list of strings (the entities)
"""
def tag_entities(self):
# {doc1: [ent11, ent12, ...], doc2: [ent21, ent22, ..], ...}
documents = {}
for docname in self.txt_docs.keys():
print(f"Tagging {docname}...")
doc = self.txt_docs[docname]
spacy_doc = self.nlp(doc)
# Entities for the current document
entities = []
for ent in spacy_doc.ents:
if ent.label_ not in ACCEPTED_TAGS:
continue
entities.append(ent.text)
documents[docname] = entities
self.debug(f"\tFound {len(entities)} entities in the document")
print("Done tagging documents")
return documents
def run_geonorm(self, documents):
self.debug("Running geoparse")
for docname in documents.keys():
if len(documents[docname]) == 0:
continue
basename = os.path.splitext(os.path.basename(docname))[0]
multiword = {}
cmd = [CMD_TEMPLATE]
# Creats the cmd so it is ./runGeoParse.sh "ent1" "ent2" ...
for ent in documents[docname]:
cmd.append(f"\"{ent}\"")
"""
if len(ent.split()) > 1:
multiword[ent] = geocoder.geonames(ent, key="geonorm_rerank")
print(multiword)
else:
cmd.append(f"\"{ent}\"")
"""
pipe = subprocess.run(" ".join(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
with open(f"{self.output_path}/{basename}_geoparse_output.txt", 'w+', encoding='utf8') as f:
f.write(pipe.stdout.decode('utf-8'))
for tf in glob.glob("/tmp/geoparse*/*"):
os.remove(tf)
reranked = self.rerank_results(pipe.stdout.decode('utf-8'))
self.debug("Saving reranked_results - (location, geoname_id)")
with open(f"{self.output_path}/{basename}_reranked_results.txt", "w+", encoding="utf8") as f:
f.write(str(reranked))
results_dict = {}
document_results = []
ent_idx = 0
for location in reranked.keys():
geoparse_results = reranked[location]
for result in geoparse_results:
self.debug(f"Getting hierarchy of {result[0]} which was returned for {location} - {result[1]}")
# TODO: speed up runtime by running this in multiple threads, this is mainly just lots of
# external IO waiting
result_dict = self.get_geoname_hierarchy(result[1])
if result_dict is not None:
result_dict["NAME"] = result[0]
result_dict["ENTITY"] = location
result_dict["GROUP"] = ent_idx
# result_dict has ID,CONT,PCLI,LAT,LNG,NAME,GROUP of the geoparse result
document_results.append(result_dict)
ent_idx += 1
self.debug("Saving result_dict - [{{ID:~,CONT:~...}},...]")
with open(f"{self.output_path}/{basename}_result_dict.txt", "w+", encoding="utf8") as f:
f.write(str(document_results))
# document_results = [{ID:~,CONT:~,PCLI:~,LAT:~,:LNG:~,NAME:~,GROUP:~}, {ID:~,...}, ...]
document_results = self.remove_region_outliers(document_results, "CONT")
document_results = self.remove_region_outliers(document_results, "PCLI")
X = []
for result in document_results:
coord = [float(result["LAT"]), float(result["LNG"])]
X.append(coord)
clusters = self.cluster_locations(np.array(X))
if clusters is None:
print("Couldn't find clusters in this document! Skipping.")
continue
self.decide_final_results(document_results, clusters, ent_idx, basename)
"""
n_groups: the number of entities found in document that are remaining
"""
def decide_final_results(self, document_results, clusters, n_groups, basename):
final_document_results = []
self.debug("Filtering final clusters")
cluster_sizes = self.get_cluster_sizes(clusters)
self.debug(f"cluster_sizes: {str(cluster_sizes)}")
max_groups = [None] * n_groups
# add the cluster to the document results
for i in range(len(document_results)):
document_results[i]["CLUSTER"] = clusters.labels_[i]
for group in range(n_groups):
self.debug(f"filtering group {group}...")
max_cluster_size = -1
max_cluster_result = None
for result in document_results:
if result["GROUP"] != group:
continue
self.debug(f"checking result {str(result)}")
cluster = clusters.labels_[result["CLUSTER"]]
if cluster_sizes[cluster] > max_cluster_size:
max_cluster_size = cluster_sizes[cluster]
max_cluster_result = result
if max_cluster_result is not None:
self.debug(f'max result for group {group} was from cluster {max_cluster_result["CLUSTER"]}')
final_document_results += list(filter(lambda r: r["GROUP"] != group or r is max_cluster_result, document_results))
self.debug("finished finalizing results...")
self.debug(f"{str(document_results)}")
with open(f"{self.output_path}/{basename}_final_results.txt", "w+", encoding="utf8") as f:
f.write(str(final_document_results))
def get_cluster_sizes(self, clusters):
cluster_sizes = [0] * clusters.n_clusters
for label in clusters.labels_:
cluster_sizes[label] += 1
return cluster_sizes
"""
Given a list of results in the format returned by 'get_geoname_hierarchy', will find the region
where the most entities reside and filter out entities not in that region. Level is the key for the
dictionary that indicates the regional level to filter. Works with "CONT" and "PCLI"
"""
def remove_region_outliers(self, doc_results, level):
self.debug(f"Removing {level} outliers")
cont_counts = {}
max_val = 1
num_results = len(doc_results)
for result in doc_results:
if result[level] in cont_counts.keys():
cont_counts[result[level]] += 1
if cont_counts[result[level]] > max_val:
max_val = cont_counts[result[level]]
else:
cont_counts[result[level]] = 1
self.debug(f"counts: {cont_counts}")
filtered_results = []
for result in doc_results:
if cont_counts[result[level]] >= max_val * LOCATION_SIZE_THRESHOLD:
filtered_results.append(result)
self.debug("added to result")
return filtered_results
"""
Given the geoname id of a location, will construct a dictionary that contains the ID, CONT, PCLI, LAT, and LONG
of the location from geonames and return the dictionary. If any of those fields aren't available from geonames,
then None is returned
"""
def get_geoname_hierarchy(self, id):
result = {"ID": id, "CONT": None, "PCLI": None, "LAT": -1, "LNG": -1}
r = requests.get(GEONAME_URL.format(id))
if r.status_code != 200:
print(f"Invalid status code returned! {r.status_code}")
GEONAME_BREAKER = True
exit()
if "geonames" in json.loads(r.text):
data = json.loads(r.text)["geonames"]
if len(data) == 0:
return None
else:
return None
result["LAT"] = data[-1]["lat"]
result["LNG"] = data[-1]["lng"]
admin = ""
for name in data:
if "fcode" in name.keys() and name["fcode"] == "CONT":
result["CONT"] = name["name"]
elif "fcode" in name.keys() and name["fcode"] == "PCLI":
result["PCLI"] = name["name"]
elif "fcode" in name.keys() and name["fcode"] == "ADM1":
admin = name["name"]
if result["PCLI"] == "United States":
result["PCLI"] = admin
if result["CONT"] is None or result["PCLI"] is None:
self.debug("Got NONE result")
return None
else:
return result
def cluster_locations(self, X):
# X is a 2xN (np) matrix of points
clusters = None
self.debug(f"Fitting points: {X}")
n_clusters = int(X.shape[0] * NUM_CLUSTERS_PERCENT)
ward = AgglomerativeClustering(n_clusters=n_clusters, linkage='ward')
try:
clusters = ward.fit(X)
except:
self.debug("Couldn't cluster locations!")
return clusters
# reranked is a dictionary mapping locations to lists of reranked tuples of locations
# only keeps the closest matches
# "location": [(loc1, geonorm_id), ...]
def rerank_results(self, output):
locations = output.split("\n\n")
reranked = {}
for loc in locations:
reranked.update(self.rerank_location(loc.strip()))
return reranked
def rerank_location(self, output):
lines = output.split("\n")
selected_locs = [(loc.split(":")[0].strip(), loc.split(":")[1].strip()) for loc in lines[1:]]
results = []
for loc in selected_locs:
similarity = fuzz.ratio(lines[0].strip(), loc[0])
# Ignore results that are too different from the parsed entity
if similarity >= FUZZY_SIMILARITY_THRESHOLD:
results.append((loc[0], loc[1]))
results.sort(key=lambda x: x[1], reverse=True)
result_dict = {lines[0].strip(): results}
return result_dict
if __name__ == "__main__":
if len(sys.argv) <= 1:
print("Too few arguments given!")
ner = NER("test/")
else:
path = sys.argv[1]
outpath = sys.argv[2] if len(sys.argv) == 3 else "./output/"
ner = NER(path, outpath)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from unittest import TestCase
from auror_azkaban_jobtype_{{cookiecutter.project_name.lower()}}.v2.params import {{cookiecutter.project_name}}
class {{cookiecutter.project_name}}ParamsTest(TestCase):
def test_example(self):
self.assertEqual("true", "true") |
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import logging
from cohesity_management_sdk.api_helper import APIHelper
from cohesity_management_sdk.configuration import Configuration
from cohesity_management_sdk.controllers.base_controller import BaseController
from cohesity_management_sdk.http.auth.auth_manager import AuthManager
from cohesity_management_sdk.models.role import Role
from cohesity_management_sdk.exceptions.request_error_error_exception import RequestErrorErrorException
class RolesController(BaseController):
"""A Controller to access Endpoints in the cohesity_management_sdk API."""
def __init__(self, client=None, call_back=None):
super(RolesController, self).__init__(client, call_back)
self.logger = logging.getLogger(__name__)
def delete_roles(self,
body=None):
"""Does a DELETE request to /public/roles.
Returns Success if all the specified Roles are deleted.
Args:
body (RoleDeleteParameters, optional): Request to delete one or
more Roles.
Returns:
void: Response from the API. No Content
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('delete_roles called.')
# Prepare query URL
self.logger.info('Preparing query URL for delete_roles.')
_url_path = '/public/roles'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for delete_roles.')
_headers = {
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for delete_roles.')
_request = self.http_client.delete(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'delete_roles')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for delete_roles.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def get_roles(self,
name=None,
tenant_ids=None,
all_under_hierarchy=None):
"""Does a GET request to /public/roles.
If the 'name' parameter is not specified, all roles defined on the
Cohesity Cluster are returned. In addition, information about each
role
is returned such as the name, description, assigned privileges, etc.
If an exact role name (such as COHESITY_VIEWER) is specified in the
'name' parameter, only information about that single role is
returned.
Args:
name (string, optional): Specifies the name of the role.
tenant_ids (list of string, optional): TenantIds contains ids of
the tenants for which objects are to be returned.
all_under_hierarchy (bool, optional): AllUnderHierarchy specifies
if objects of all the tenants under the hierarchy of the
logged in user's organization should be returned.
Returns:
list of Role: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_roles called.')
# Prepare query URL
self.logger.info('Preparing query URL for get_roles.')
_url_path = '/public/roles'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'name': name,
'tenantIds': tenant_ids,
'allUnderHierarchy': all_under_hierarchy
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for get_roles.')
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for get_roles.')
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'get_roles')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for get_roles.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, Role.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def create_role(self,
body=None):
"""Does a POST request to /public/roles.
Returns the new custom role that was created.
A custom role is a user-defined role that is created using the REST
API,
the Cohesity Cluster or the CLI.
Args:
body (RoleCreateParameters, optional): Request to create a new
custom Role.
Returns:
Role: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('create_role called.')
# Prepare query URL
self.logger.info('Preparing query URL for create_role.')
_url_path = '/public/roles'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for create_role.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for create_role.')
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'create_role')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for create_role.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, Role.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def update_role(self,
name,
body=None):
"""Does a PUT request to /public/roles/{name}.
For example, you could update the privileges assigned to a Role.
Returns the updated role.
Args:
name (string): Specifies the name of the role to update.
body (RoleUpdateParameters, optional): Request to update a custom
role.
Returns:
Role: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('update_role called.')
# Validate required parameters
self.logger.info('Validating required parameters for update_role.')
self.validate_parameters(name=name)
# Prepare query URL
self.logger.info('Preparing query URL for update_role.')
_url_path = '/public/roles/{name}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'name': name
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for update_role.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for update_role.')
_request = self.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'update_role')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for update_role.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, Role.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
|
"API Datasets resource."
import http.client
import flask
import flask_cors
from datagraphics.datasets import (get_datasets_public,
get_datasets_all,
get_datasets_owner,
get_datasets_editor)
import datagraphics.user
from datagraphics import constants
from datagraphics import utils
from datagraphics.api import schema_definitions
blueprint = flask.Blueprint("api_datasets", __name__)
@blueprint.route("/public")
@flask_cors.cross_origin(methods=["GET"])
def public():
"Get all public datasets."
datasets = []
for dataset in get_datasets_public(full=True):
datasets.append({"href": flask.url_for("api_dataset.serve",
iuid=dataset["_id"],
_external=True),
"title": dataset["title"],
"owner": dataset["owner"],
"modified": dataset["modified"]})
return utils.jsonify({"datasets": datasets},
schema=flask.url_for("api_schema.datasets",
_external=True))
@blueprint.route("/user/<username>")
def user(username):
"Get the datasets owned by the given user."
if not datagraphics.user.am_admin_or_self(username=username):
flask.abort(http.client.FORBIDDEN)
datasets = []
for iuid, title, modified in get_datasets_owner(username):
datasets.append({"href": flask.url_for("api_dataset.serve",
iuid=iuid,
_external=True),
"title": title,
"modified": modified})
return utils.jsonify({"datasets": datasets},
schema=flask.url_for("api_schema.datasets",
_external=True))
@blueprint.route("/user/<username>/editor")
def editor(username):
"Get the datasets which the given user is editor of."
if not datagraphics.user.am_admin_or_self(username=username):
flask.abort(http.client.FORBIDDEN)
datasets = []
for iuid, title, modified in get_datasets_editor(username):
datasets.append({"href": flask.url_for("api_dataset.serve",
iuid=iuid,
_external=True),
"title": title,
"modified": modified})
return utils.jsonify({"datasets": datasets},
schema=flask.url_for("api_schema.datasets",
_external=True))
@blueprint.route("/all")
def all():
"Get all datasets."
if not flask.g.am_admin:
flask.abort(http.client.FORBIDDEN)
datasets = []
for iuid, title, owner, modified in get_datasets_all():
datasets.append({"href": flask.url_for("api_dataset.serve",
iuid=iuid,
_external=True),
"title": title,
"owner": owner,
"modified": modified})
return utils.jsonify({"datasets": datasets},
schema=flask.url_for("api_schema.datasets",
_external=True))
schema = {
"$schema": constants.JSON_SCHEMA_URL,
"title": "JSON Schema for API Datasets resource.",
"type": "object",
"properties": {
"$id": {"type": "string", "format": "uri"},
"timestamp": {"type": "string", "format": "date-time"},
"datasets": {
"type": "array",
"items": schema_definitions.link
}
},
"required": ["$id", "timestamp", "datasets"],
"additionalProperties": False
}
|
# -*- coding: utf-8 -*-
# Imports
import numpy as np
import vtk
def read_vti(fname):
reader = vtk.vtkXMLPImageDataReader()
reader.SetFileName(fname)
reader.Update()
data = reader.GetOutput()
pointData = data.GetPointData()
sh = data.GetDimensions()[::-1]
ndims = len(sh)
# get vector field
v = np.array(pointData.GetVectors("Velocity")).reshape(sh + (ndims,))
vec = []
for d in range(ndims):
a = v[..., d]
vec.append(a)
# get scalar field
sca = np.array(pointData.GetScalars('Pressure')).reshape(sh + (1,))
# Generate grid
# nPoints = data.GetNumberOfPoints()
(xmin, xmax, ymin, ymax, zmin, zmax) = data.GetBounds()
grid3D = np.mgrid[xmin:xmax + 1, ymin:ymax + 1, zmin:zmax + 1]
return np.transpose(np.array(vec), (0,3,2,1)), np.transpose(sca, (0,3,2,1)), grid3D
def read_vtr(fname):
reader = vtk.vtkXMLPRectilinearGridReader()
reader.SetFileName(fname)
reader.Update()
data = reader.GetOutput()
pointData = data.GetPointData()
sh = data.GetDimensions()[::-1]
ndims = len(sh)
# get vector field
v = np.array(pointData.GetVectors("Velocity")).reshape(sh + (ndims,))
vec = []
for d in range(ndims):
a = v[..., d]
vec.append(a)
vec = np.array(vec)
# get scalar field
sca = np.array(pointData.GetScalars('Pressure')).reshape(sh + (1,))
# get grid
x = np.array(data.GetXCoordinates())
y = np.array(data.GetYCoordinates())
z = np.array(data.GetZCoordinates())
return np.transpose(vec, (0,3,2,1)), np.transpose(sca, (3,2,1,0))[0,:,:,:], np.array([x, y, z], dtype=object)
|
# PGD code, generic
import time
import pandas as pd
import numpy as np
from numpy import (array, dot, arccos, clip)
from numpy.linalg import norm
pd.options.display.precision = 2
pd.set_option('display.precision', 2)
np.random.seed(42)
df_b = pd.read_excel('data_10PQ.xlsx', sheet_name="buses")
df_l = pd.read_excel('data_10PQ.xlsx', sheet_name="lines")
# BEGIN INITIALIZATION OF DATA
n_b = 0
n_pq = 0
n_pv = 0
pq = []
pv = []
pq0 = [] # store pq buses indices relative to its own
pv0 = [] # store pv buses indices relative to its own
d_pq = {} # dict of pq
d_pv = {} # dict of pv
for i in range(len(df_b)):
if df_b.iloc[i, 4] == "slack": # index 0 is reserved for the slack bus
pass
elif df_b.iloc[i, 4] == "PQ":
pq0.append(n_pq)
d_pq[df_b.iloc[i, 0]] = n_pq
n_b += 1
n_pq += 1
pq.append(df_b.iloc[i, 0] - 1)
elif df_b.iloc[i, 4] == "PV":
pv0.append(n_pv)
d_pv[df_b.iloc[i, 0]] = n_pv
n_b += 1
n_pv += 1
pv.append(df_b.iloc[i, 0] - 1)
n_l = len(df_l) # number of lines
V0 = df_b.iloc[0, 3] # the slack is always positioned in the first row
I0_pq = np.zeros(n_pq, dtype=complex)
I0_pv = np.zeros(n_pv, dtype=complex)
Y = np.zeros((n_b, n_b), dtype=complex) # I will build it with block matrices
Y11 = np.zeros((n_pq, n_pq), dtype=complex) # pq pq
Y12 = np.zeros((n_pq, n_pv), dtype=complex) # pq pv
Y21 = np.zeros((n_pv, n_pq), dtype=complex) # pv pq
Y22 = np.zeros((n_pv, n_pv), dtype=complex) # pv pv
for i in range(n_l):
Ys = 1 / (df_l.iloc[i, 2] + 1j * df_l.iloc[i, 3]) # series element
Ysh = df_l.iloc[i, 4] + 1j * df_l.iloc[i, 5] # shunt element
t = df_l.iloc[i, 6] * np.cos(df_l.iloc[i, 7]) + 1j * df_l.iloc[i, 6] * np.sin(df_l.iloc[i, 7]) # tap as a complex number
a = df_l.iloc[i, 0]
b = df_l.iloc[i, 1]
if a == 0:
if b - 1 in pq:
I0_pq[d_pq[b]] += V0 * Ys / t
Y11[d_pq[b], d_pq[b]] += Ys + Ysh
if b - 1 in pv:
I0_pv[d_pv[b]] += V0 * Ys / t
Y22[d_pv[b], d_pv[b]] += Ys + Ysh
elif b == 0:
if a - 1 in pq:
I0_pq[d_pq[a]] += V0 * Ys / np.conj(t)
Y11[d_pq[a], d_pq[a]] += (Ys + Ysh) / (t * np.conj(t))
if a - 1 in pv:
I0_pv[d_pv[a]] += V0 * Ys / np.conj(t)
Y22[d_pv[a], d_pv[a]] += (Ys + Ysh) / (t * np.conj(t))
else:
if a - 1 in pq and b - 1 in pq:
Y11[d_pq[a], d_pq[a]] += (Ys + Ysh) / (t * np.conj(t))
Y11[d_pq[b], d_pq[b]] += Ys + Ysh
Y11[d_pq[a], d_pq[b]] += - Ys / np.conj(t)
Y11[d_pq[b], d_pq[a]] += - Ys / t
if a - 1 in pq and b - 1 in pv:
Y11[d_pq[a], d_pq[a]] += (Ys + Ysh) / (t * np.conj(t))
Y22[d_pv[b], d_pv[b]] += Ys + Ysh
Y12[d_pq[a], d_pv[b]] += - Ys / np.conj(t)
Y21[d_pv[b], d_pq[a]] += - Ys / t
if a - 1 in pv and b - 1 in pq:
Y22[d_pv[a], d_pv[a]] += (Ys + Ysh) / (t * np.conj(t))
Y11[d_pq[b], d_pq[b]] += Ys + Ysh
Y21[d_pv[a], d_pq[b]] += - Ys / np.conj(t)
Y12[d_pq[b], d_pv[a]] += - Ys / t
if a - 1 in pv and b - 1 in pv:
Y22[d_pv[a], d_pv[a]] += (Ys + Ysh) / (t * np.conj(t))
Y22[d_pv[b], d_pv[b]] += Ys + Ysh
Y22[d_pv[a], d_pv[b]] += - Ys / np.conj(t)
Y22[d_pv[b], d_pv[a]] += - Ys / t
for i in range(len(df_b)): # add shunts connected directly to the bus
a = df_b.iloc[i, 0]
if a - 1 in pq:
# print(d_pq[a])
Y11[d_pq[a], d_pq[a]] += df_b.iloc[i, 5] + 1j * df_b.iloc[i, 6]
elif a - 1 in pv:
# print(d_pv[a])
Y22[d_pv[a], d_pv[a]] += df_b.iloc[i, 5] + 1j * df_b.iloc[i, 6]
Y = np.block([[Y11, Y12], [Y21, Y22]])
Yinv = np.linalg.inv(Y)
Ydf = pd.DataFrame(Y)
V_mod = np.zeros(n_pv, dtype=float)
P_pq = np.zeros(n_pq, dtype=float)
P_pv = np.zeros(n_pv, dtype=float)
Q_pq = np.zeros(n_pq, dtype=float)
for i in range(len(df_b)):
if df_b.iloc[i, 4] == "PV":
V_mod[d_pv[df_b.iloc[i, 0]]] = df_b.iloc[i, 3]
P_pv[d_pv[df_b.iloc[i, 0]]] = df_b.iloc[i, 1]
elif df_b.iloc[i, 4] == "PQ":
Q_pq[d_pq[df_b.iloc[i, 0]]] = df_b.iloc[i, 2]
P_pq[d_pq[df_b.iloc[i, 0]]] = df_b.iloc[i, 1]
# END INITIALIZATION OF DATA
# DECOMPOSITION OF APPARENT POWERS
SSk = []
SSp = []
SSq = []
n_buses = np.shape(Y)[0] # number of buses
n_scale = 1001 # number of discretized points, arbitrary
Qmax = 1.00 # maximum reactive power of the capacitor
Qmin = 0 # minimum reactive power of the capacitor
SKk0 = P_pq + Q_pq * 1j # original load
SPp0 = np.ones(n_buses) # positions of standard the loads
SQq0 = np.ones(n_scale) # always multiply by a factor of 1, the original loads do not change
SSk.append(np.conj(SKk0))
SSp.append(np.conj(SPp0))
SSq.append(np.conj(SQq0))
print(SSk)
# go over all positions where we could have a capacitor (from bus 2 to 102)
"""
for ii in range(2, n_buses):
SKk1 = np.zeros(n_buses, dtype=complex) # power amplitude, in this case, it will be reactive
SKk1[ii] = Qmax * 1j # put a 1 because only one capacitor at a time. This is the maximum capacitor power
SPp1 = np.zeros(n_buses) # possible positions of the capacitor
SPp1[ii] = 1
SQq1 = np.arange(Qmin, Qmax, Qmax / n_scale) # scale the powers in a discrete form
SSk.append(np.conj(SKk1))
SSp.append(np.conj(SPp1))
SSq.append(np.conj(SQq1))
"""
# keep it simple, consider by now less cases
# ----------
# SKk1 = np.zeros(n_buses, dtype=complex)
# SKk1[0] = Qmax * 1j
SKk1 = 1 * np.random.rand(n_buses) # generator of random active power
SPp1 = np.zeros(n_buses)
# for ii in range(2, n_buses):
for ii in range(n_buses): # only a few buses
SPp1[ii] = ii / n_buses # for instance
SQq1 = np.arange(Qmin, Qmax, Qmax / n_scale)
SSk.append(np.conj(SKk1))
SSp.append(np.conj(SPp1))
SSq.append(np.conj(SQq1))
print(SSk)
# ----------
# DECOMPOSITION OF VOLTAGES
Kkv = np.ones(n_buses, dtype=complex) # amplitude vector
Ppv = np.ones(n_buses) # position vector
Qqv = np.ones(n_scale) # scaling vector
VVk = []
VVp = []
VVq = []
VVk.append(np.conj(Kkv))
VVp.append(np.conj(Ppv))
VVq.append(np.conj(Qqv))
# DECOMPOSITION OF CURRENTS
IIk = []
IIp = []
IIq = []
# CREATION OF C (auxiliary variables).
# THIS FUNCTION HAS TO BE CALLED EVERY TIME WE COMPUTE A NEW RESIDUE OF I, AND ALWAYS FOLLOWS THIS
def fun_C(SSk, SSp, SSq, VVk, VVp, VVq, IIk, IIp, IIq):
"""
:param SSk:
:param SSp:
:param SSq:
:param VVk:
:param VVp:
:param VVq:
:param IIk:
:param IIp:
:param IIq:
:return:
"""
Ns = len(SSk)
Nv = len(VVk)
n = len(IIk)
Nc = Ns + Nv * n
# CCk = SSk # initialize with the S* decomposed variables
# CCp = SSp
# CCq = SSq
CCk = SSk.copy()
CCp = SSp.copy()
CCq = SSq.copy()
for ii in range(Nv):
for jj in range(n):
CCk.append(- VVk[ii] * IIk[jj])
CCp.append(- VVp[ii] * IIp[jj])
CCq.append(- VVq[ii] * IIq[jj])
return CCk, CCp, CCq, Nc, Nv, n
# DEFINITION OF NUMBER OF ITERATIONS, CAN CHANGE ARBITRARILY
n_gg = 5 # outer
n_mm = 8 # intermediate
n_kk = 10 # inner
for gg in range(n_gg): # outer loop
# add the blank initialization of C:
# CCk = []
# CCp = []
# CCq = []
# IIk = []
# IIp = []
# IIq = []
for mm in range(n_mm): # intermediate loop
# define the new C
CCk, CCp, CCq, Nc, Nv, n = fun_C(SSk, SSp, SSq, VVk, VVp, VVq, IIk, IIp, IIq)
# initialize the residues we have to find
IIk1 = (np.random.rand(n_buses) - np.random.rand(n_buses)) * 1 # could also try to set IIk1 = VVk1
IIp1 = (np.random.rand(n_buses) - np.random.rand(n_buses)) * 1
IIq1 = (np.random.rand(n_scale) - np.random.rand(n_scale)) * 1
for kk in range(n_kk): # inner loop
# compute IIk1 (residues on Ik)
prodRK = 0
RHSk = np.zeros(n_buses, dtype=complex)
for ii in range(Nc):
prodRK = np.dot(IIp1, CCp[ii]) * np.dot(IIq1, CCq[ii])
RHSk += prodRK * CCk[ii]
prodLK = 0
LHSk = np.zeros(n_buses, dtype=complex)
for ii in range(Nv):
prodLK = np.dot(IIp1, VVp[ii] * IIp1) * np.dot(IIq1, VVq[ii] * IIq1)
LHSk += prodLK * VVk[ii]
IIk1 = RHSk / LHSk
# compute IIp1 (residues on Ip)
prodRP = 0
RHSp = np.zeros(n_buses, dtype=complex)
for ii in range(Nc):
prodRP = np.dot(IIk1, CCk[ii]) * np.dot(IIq1, CCq[ii])
RHSp += prodRP * CCp[ii]
prodLP = 0
LHSp = np.zeros(n_buses, dtype=complex)
for ii in range(Nv):
prodLP = np.dot(IIk1, VVk[ii] * IIk1) * np.dot(IIq1, VVq[ii] * IIq1)
LHSp += prodLP * VVp[ii]
IIp1 = RHSp / LHSp
# compute IIq1 (residues on Iq)
prodRQ = 0
RHSq = np.zeros(n_scale, dtype=complex)
for ii in range(Nc):
prodRQ = np.dot(IIk1, CCk[ii]) * np.dot(IIp1, CCp[ii])
RHSq += prodRQ * CCq[ii]
prodLQ = 0
LHSq = np.zeros(n_scale, dtype=complex)
for ii in range(Nv):
prodLQ = np.dot(IIk1, VVk[ii] * IIk1) * np.dot(IIp1, VVp[ii] * IIp1)
LHSq += prodLQ * VVq[ii]
IIq1 = RHSq / LHSq
if gg == 0 and mm == 0 and kk >= 0:
print(IIk1[:10])
# print(IIp1[:10])
# print(IIq1[:10])
IIk.append(IIk1)
IIp.append(IIp1)
IIq.append(IIq1)
VVk = []
VVp = []
VVq = []
PP1 = np.ones(n_buses)
QQ1 = np.ones(n_scale)
for ii in range(n_mm):
# VVk.append(np.conj(np.dot(Yinv, IIk[ii] + I0_pq)))
VVk.append(np.conj(np.dot(Yinv, IIk[ii])))
VVp.append(IIp[ii])
VVq.append(IIq[ii])
# VVp = np.copy(IIp)
# VVq = np.copy(IIq)
# print(VVk[0][:10])
# print(VVk[1][:10])
# try to add I0 this way:
VVk.append(np.conj(np.dot(Yinv, I0_pq)))
VVp.append(PP1)
VVq.append(QQ1)
# CHART OF VOLTAGES
# full_map = np.multiply.outer(VVk[0], np.multiply.outer(VVp[0], VVq[0])) # initial tridimensional representation
V_map = np.multiply.outer(np.multiply.outer(VVp[0], VVk[0]), VVq[0]) # the tridimensional representation I am looking for
for i in range(1, len(VVk)):
V_map += np.multiply.outer(np.multiply.outer(VVp[i], VVk[i]), VVq[i]) # the tridimensional representation I am looking for
# writer = pd.ExcelWriter('Map_V.xlsx')
# for i in range(n_buses):
# V_map_df = pd.DataFrame(V_map[:][i][:])
# V_map_df.to_excel(writer, sheet_name=str(i))
# writer.save()
# CHART OF CURRENTS
I_map = np.multiply.outer(np.multiply.outer(IIp[0], IIk[0]), IIq[0])
for i in range(1, len(IIk)):
I_map += np.multiply.outer(np.multiply.outer(IIp[i], IIk[i]), IIq[i])
# writer = pd.ExcelWriter('Map_I.xlsx')
# for i in range(n_buses):
# I_map_df = pd.DataFrame(I_map[:][i][:])
# I_map_df.to_excel(writer, sheet_name=str(i))
# writer.save()
# CHART OF POWERS
S_map = np.multiply.outer(np.multiply.outer(SSp[0], SSk[0]), SSq[0])
for i in range(1, len(SSk)):
S_map += np.multiply.outer(np.multiply.outer(SSp[i], SSk[i]), SSq[i])
# writer = pd.ExcelWriter('Map_S.xlsx')
# for i in range(n_buses):
# S_map_df = pd.DataFrame(S_map[:][i][:])
# S_map_df.to_excel(writer, sheet_name=str(i))
# writer.save()
print(np.shape(SSk))
print(n_buses) |
#Execute configuration commands and save configuration
from netmiko import ConnectHandler
device = {
'device_type': 'cisco_ios',
'host': '192.168.100.20',
'username': 'admin',
'password': 'cisco',
'port' : 22, # optional, defaults to 22
}
#Establish an SSH connection to the device
net_connect = ConnectHandler(**device)
config_commands = [ 'int lo0',
'ip add 172.16.10.1 255.255.255.0',]
output = net_connect.send_config_set(config_commands)
net_connect = net_connect.send_command('wr')
print(output)
print(net_connect) |
# Tai Sakuma <tai.sakuma@gmail.com>
from ..progressbar import ProgressReport
##__________________________________________________________________||
class EventLoopProgressReportWriter(object):
"""A progress report writer of an event loop
"""
def __repr__(self):
return '{}()'.format(
self.__class__.__name__
)
def write(self, taskid, config, event):
return ProgressReport(
name = config.name,
done = event.iEvent + 1,
total = event.nEvents,
taskid = taskid
)
##__________________________________________________________________||
|
from analyze.calc_run import *
# burst mode
d=-5 # delay
base = 'G:/Prive/MIJN-Documenten/TU/62-Stage/20180131-compare/'
# run 1 and 2 are only spectra, no waveforms.
calc_run(base + 'run1',
REACTOR_GLASS_SHORT_QUAD,
meas=SHORT_MEAS_LEN,
scope_dir=None,
)
calc_run(base + 'run2',
REACTOR_GLASS_SHORT_QUAD,
meas=LONG_MEAS_LEN,
scope_dir=None,
)
calc_run(base + 'run1-2',
REACTOR_GLASS_SHORT_QUAD,
meas=SHORT_MEAS_LEN,
scope_dir=None,
)
calc_run(base + 'run2-2',
REACTOR_GLASS_SHORT_QUAD,
meas=LONG_MEAS_LEN,
scope_dir=None,
) |
import faker.config
from django.conf import settings
from elasticsearch.helpers.test import get_test_client
from elasticsearch_dsl.connections import add_connection
from pytest import fixture
from rest_framework.test import APIClient
from connections.tests.factories import ApartmentMinimalFactory
faker.config.DEFAULT_LOCALE = "fi_FI"
@fixture
def api_client():
return APIClient()
def setup_elasticsearch():
test_client = get_test_client()
add_connection("default", test_client)
if test_client.indices.exists(index=settings.APARTMENT_INDEX_NAME):
test_client.indices.delete(index=settings.APARTMENT_INDEX_NAME)
test_client.indices.create(index=settings.APARTMENT_INDEX_NAME)
return test_client
def teardown_elasticsearch(test_client):
if test_client.indices.exists(index=settings.APARTMENT_INDEX_NAME):
test_client.indices.delete(index=settings.APARTMENT_INDEX_NAME)
@fixture(scope="module")
def elasticsearch():
test_client = setup_elasticsearch()
yield test_client
teardown_elasticsearch(test_client)
@fixture(scope="module")
def elastic_apartments(elasticsearch):
yield ApartmentMinimalFactory.create_for_sale_batch(10)
@fixture
def elastic_single_project_with_apartments(elasticsearch):
apartments = []
apartments.append(
ApartmentMinimalFactory(
apartment_state_of_sale="FOR_SALE",
_language="fi",
)
)
for _ in range(10):
apartments.append(
ApartmentMinimalFactory(
apartment_state_of_sale="FOR_SALE",
_language="fi",
project_uuid=apartments[0].project_uuid,
)
)
yield apartments
for apartment in apartments:
apartment.delete(refresh=True)
|
from .time_utils import *
from .printing import *
|
# -*- coding: utf-8 -*-
class DataInterface:
def __init__(self, mac_addr, ip_addr, port):
self.mac_addr = mac_addr
self.ip_addr = ip_addr
self.port = port
self.available = True
def get_ip_addr(self):
return self.ip_addr
def get_mac_addr(self):
return self.mac_addr
def get_port(self):
return self.port
def is_available(self):
return self.available
def reserve(self):
self.available = False
def release(self):
self.available = True
|
import os
import yaml
import segmentation_models_pytorch as smp
import torch
import argparse
import torch.nn as nn
def prepare_model(opt):
with open(opt.hyp) as f:
experiment_dict = yaml.load(f, Loader=yaml.FullLoader)
model = smp.Linknet(encoder_name=experiment_dict["model"]["name"], encoder_depth=5, encoder_weights='imagenet',
decoder_use_batchnorm=True, in_channels=3, classes=experiment_dict["model"]["classes"],
aux_params=None)
device=torch.device("cuda:0")
model=nn.DataParallel(model)
model.load_state_dict(torch.load(experiment_dict["savepath"])["model_state_dict"])
model=model.module
torch.save(model, experiment_dict["final_model_path"])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--hyp', type=str, default='configs/baseline_config.yaml', help='hyperparameters path')
opt = parser.parse_args()
prepare_model(opt) |
# -*- coding: utf-8 -*-
#
# split line with TNM codes (for clinical texts)
#
import sys, os
import re
tnm_re = re.compile("((?:c|p|yc|r)?(?:T(?:is|1(?:mi|[abc])?|2[ab]?|3|4)|N[0-3]|M(?:0|1[abc]?)))")
def split_by_tnm(txt):
return filter(lambda x: x, re.split(tnm_re, txt))
def main():
for line in sys.stdin:
line = line.rstrip()
print("\n".join(split_by_tnm(line)))
if __name__ == "__main__":
main()
|
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
from algolib.graph import Undirected
from algolib.graph import Directed
from algolib.graph import DFS
from algolib.graph import BFS
from algolib.graph import bipartite
from algolib.graph import top_sort
from algolib.graph import cut_edges, cut_vertices
from algolib.graph import strong_components
from algolib.graph import prim
from algolib.graph import kruskal
from algolib.graph import dijkstra, dijkstra_path
from algolib.graph import floyd
from algolib.graph import edmonds_karp
|
import os
from typing import Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from kayddrl.agents.base import BaseAgent
from kayddrl.comp.ounoise import OUNoise
from kayddrl.configs.default import Configs, default
from kayddrl.envs.base import BaseEnv
from kayddrl.memory import ReplayBuffer
from kayddrl.utils.logging import logger
from kayddrl.utils.utils import describe
class DDPGAgent(BaseAgent):
"""ActorCritic interacting with environment.
Attributes:
memory (ReplayBuffer): replay memory
noise (OUNoise): random noise for exploration
hyper_params (dict): hyper-parameters
actor (nn.Module): actor model to select actions
actor_target (nn.Module): target actor model to select actions
critic (nn.Module): critic model to predict state values
critic_target (nn.Module): target critic model to predict state values
actor_optimizer (Optimizer): optimizer for training actor
critic_optimizer (Optimizer): optimizer for training critic
curr_state (np.ndarray): temporary storage of the current state
total_step (int): total step numbers
episode_step (int): step number of the current episode
i_episode (int): current episode number
"""
def __init__(self, env: BaseEnv, models: tuple, optims: tuple, noise: OUNoise, configs: Configs = default()):
super(DDPGAgent, self).__init__(env, configs)
self.actor, self.actor_target, self.critic, self.critic_target = models
self.actor_optimizer, self.critic_optimizer = optims
self.curr_state = np.zeros((1,))
self.noise = noise
self.total_step = 0
self.episode_step = 0
self.i_episode = 0
if configs.glob.load_from is not None and os.path.exists(configs.glob.load_from):
self.load_params(configs.glob.load_from)
self._initialize()
logger.info(describe(self))
def _initialize(self):
"""Initialize non-common things."""
if not self._configs.glob.test:
# replay memory
self.memory = ReplayBuffer(self._configs)
def select_action(self, state: np.ndarray) -> np.ndarray:
"""Select an action from the input space."""
self.curr_state = state
state = self._preprocess_state(state)
# if initial random action should be conducted
if (
self.total_step < self._hparams.initial_random_action
and not self._configs.glob.test
):
return self._env.action_space.sample()
selected_action = self.actor(state).detach().cpu().numpy()
if not self._configs.glob.test:
noise = self.noise.sample()
selected_action = np.clip(selected_action + noise, -1.0, 1.0)
return selected_action
def _preprocess_state(self, state: np.ndarray) -> torch.Tensor:
"""Preprocess state so that actor selects an action."""
state = torch.FloatTensor(state).to(self._configs.glob.device)
return state
def step(self, action: np.ndarray) -> Tuple[torch.Tensor, ...]:
"""Take an action and return the response of the env."""
next_state, reward, done, _ = self._env.step(action)
if not self._configs.glob.test:
# if the last state is not a terminal state, store done as false
done_bool = (
False if self.episode_step == self._configs.glob.max_episode_steps else done
)
transition = (self.curr_state, action, reward, next_state, done_bool)
self.memory.update(*transition)
return next_state, reward, done
def update_model(self) -> Tuple[torch.Tensor, torch.Tensor]:
"""Train the model after each episode."""
experiences = self.memory.sample()
states, actions, rewards, next_states, dones = experiences
# G_t = r + gamma * v(s_{t+1}) if state != Terminal
# = r otherwise
masks = 1 - dones
next_actions = self.actor_target(next_states)
next_values = self.critic_target(torch.cat((next_states, next_actions), dim=-1))
curr_returns = rewards + self._hparams.gamma * next_values * masks
curr_returns = curr_returns.to(self._configs.glob.device)
# train critic
gradient_clip_cr = self._hparams.gradient_clip_cr
values = self.critic(torch.cat((states, actions), dim=-1))
critic_loss = F.mse_loss(values, curr_returns)
self.critic_optimizer.zero_grad()
critic_loss.backward()
nn.utils.clip_grad_norm_(self.critic.parameters(), gradient_clip_cr)
self.critic_optimizer.step()
# train actor
gradient_clip_ac = self._hparams.gradient_clip_ac
actions = self.actor(states)
actor_loss = -self.critic(torch.cat((states, actions), dim=-1)).mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
nn.utils.clip_grad_norm_(self.actor.parameters(), gradient_clip_ac)
self.actor_optimizer.step()
# update target networks
tau = self._hparams.tau
self.soft_update(self.actor, self.actor_target, tau)
self.soft_update(self.critic, self.critic_target, tau)
return actor_loss.item(), critic_loss.item()
def soft_update(self, local: nn.Module, target: nn.Module, tau: float):
"""Soft-update: target = tau*local + (1-tau)*target."""
for t_param, l_param in zip(target.parameters(), local.parameters()):
t_param.data.copy_(tau * l_param.data + (1.0 - tau) * t_param.data)
def load_params(self, path: str):
"""Load model and optimizer parameters."""
if not os.path.exists(path):
logger.fatal("the input path does not exist. ->", path)
return
params = torch.load(path)
self.actor.load_state_dict(params["actor_state_dict"])
self.actor_target.load_state_dict(params["actor_target_state_dict"])
self.critic.load_state_dict(params["critic_state_dict"])
self.critic_target.load_state_dict(params["critic_target_state_dict"])
self.actor_optimizer.load_state_dict(params["actor_optim_state_dict"])
self.critic_optimizer.load_state_dict(params["critic_optim_state_dict"])
logger.info("loaded the model and optimizer from", path)
def save_params(self, n_episode: int):
"""Save model and optimizer parameters."""
params = {
"actor_state_dict": self.actor.state_dict(),
"actor_target_state_dict": self.actor_target.state_dict(),
"critic_state_dict": self.critic.state_dict(),
"critic_target_state_dict": self.critic_target.state_dict(),
"actor_optim_state_dict": self.actor_optimizer.state_dict(),
"critic_optim_state_dict": self.critic_optimizer.state_dict(),
}
BaseAgent.save_params(self, params, n_episode)
def write_log(self, i: int, loss: np.ndarray, score: int, avg_score):
"""Write log about loss and score"""
total_loss = loss.sum()
logger.info(
"episode %d:\t episode step: %d | total step: %d | total score: %d |\t"
"total loss: %f | actor_loss: %.3f | critic_loss: %.3f\n"
% (
i,
self.episode_step,
self.total_step,
score,
total_loss,
loss[0],
loss[1],
) # actor loss # critic loss
)
if self._configs.glob.log:
logger.log_scalar("scores/score", score, i)
logger.log_scalar("scores/avg_score", avg_score, i)
logger.log_scalar("losses/total_loss", total_loss, i)
logger.log_scalar("losses/actor_loss", loss[0], i)
logger.log_scalar("losses/critic_loss", loss[1], i)
def train(self):
"""Train the agent."""
logger.warn("Start training")
for self.i_episode in range(1, self._configs.glob.num_episodes + 1):
state = self._env.reset()
done = False
score = 0
total_score = []
self.episode_step = 0
losses = list()
while not done:
self._env.render()
action = self.select_action(state)
next_state, reward, done = self.step(action)
self.total_step += 1
self.episode_step += 1
self._configs.glob.global_step += 1
if len(self.memory) >= self._configs.memory.batch_size:
for _ in range(self._hparams.multiple_learn):
loss = self.update_model()
losses.append(loss) # for logging
state = next_state
score += reward
# logging
if losses:
total_score.append(score)
avg_loss = np.vstack(losses).mean(axis=0)
self.write_log(self.i_episode, avg_loss, score, np.mean(total_score))
losses.clear()
if self.i_episode % self._configs.glob.save_period == 0:
self.save_params(self.i_episode)
self.interim_test()
# termination
self._env.close()
self.save_params(self.i_episode)
self.interim_test()
# ddpg_agent = DDPGAgent(None, (1,2,3,4), (1,2), None)
|
import requests
from blablacarapi.debug import Debug
from blablacarapi.api_exceptions import BlaBlaCarRequestApiException
__author__ = 'ivan.arar@gmail.com'
def bind_request(**request_data):
class ApiRequest(object):
model = request_data.get('model')
api_path = request_data.get('api_path')
method = request_data.get('method', 'GET')
query_parameters = request_data.get('query_parameters')
def __init__(self, client, debug, *path_params, **query_params):
client.request = self
self.debug = debug
self.client = client
self.parameters = {'query': {}, 'path': []}
self._set_parameters(*path_params, **query_params)
def _set_parameters(self, *path_params, **query_params):
"""
Prepares the list of query parameters
:path_params: list of path parameters
:query_params: dict of query parameters
:return: None
"""
for key, value in query_params.items():
if value is None:
continue
if key in self.query_parameters.values():
self.parameters['query'][key] = str(value).encode('utf-8')
elif key in self.query_parameters.keys():
self.parameters['query'][self.query_parameters[key]] = str(value).encode('utf-8')
for value in path_params:
self.parameters['path'].append(value.encode('utf-8'))
self.parameters['query']['_format'] = self.client.format
self.parameters['query']['key'] = self.client.api_key
self.parameters['query']['locale'] = self.client.locale
self.parameters['query']['cur'] = self.client.currency
def _prepare_request(self):
"""
Prepares url and query parameters for the request
:return: Tuple with two elements, url and query parameters
"""
url_parts = {
'protocol': self.client.protocol,
'base_url': self.client.base_url,
'base_path': self.client.base_path,
'api_path': self.api_path,
}
url = '{protocol}://{base_url}{base_path}{api_path}'.format(**url_parts)
url_parts = self.parameters['path']
url_parts.insert(0, url)
url = '/'.join([part if type(part) == str else part.decode('utf-8') for part in url_parts])
self.debug.ok('url', url)
self.debug.ok('query_parameters', self.parameters['query'])
return url, self.parameters['query']
def _do_request(self, url, params):
"""
Makes the request to BlaBlaCar Api servers
:url: Url for the request
:params: Query parameters
:return: Tuple with two elements, status code and content
"""
if self.method == 'GET':
response = requests.get(url, params=params)
self.debug.ok('response_object', response)
return response.status_code, response.json()
else:
# For future POST, PUT, DELETE requests
pass
def _proccess_response(self, status_code, response):
"""
Process response using models
:status_code: Response status code
:response: Content
:return: Model with the data from the response
"""
if status_code != 200:
self.debug.error('status_code', status_code)
self.debug.error('response', response)
if 'message' in response:
raise BlaBlaCarRequestApiException(response['message'])
if 'error' in response:
raise BlaBlaCarRequestApiException(response['error'].get('message', 'Unknown error occurred!'))
else:
raise BlaBlaCarRequestApiException('Unknown error occurred!')
else:
self.debug.ok('status_code', status_code)
self.debug.ok('response', response)
return self.model.proccess(response)
def _call(self):
"""
Makes the API call
:return: Return value from self._proccess_response()
"""
url, params = self._prepare_request()
status_code, response = self._do_request(url, params)
return self._proccess_response(status_code, response)
def call(client, *path_params, **query_params):
"""
Binded method for API calls
:path_params: list of path parameters
:query_params: dict of query parameters
:return: Return value from ApiRequest._call()
"""
with Debug(client=client) as debug:
request = ApiRequest(client, debug, *path_params, **query_params)
return request._call()
return call
|
import mysql.connector
class UseDataBase:
#This is a constructor
def __init__(self, config: dict) -> None:
self.configuration = config
def __enter__(self) -> 'cursor':
try:
self.conn = mysql.connector.connect(**self.configuration)
self.cursor = self.conn.cursor()
return self.cursor
#Here use mysql error and my connection error
except mysql.connector.errors.InterfaceError as err:
raise ConnectionError(err)
# The last 3 argument i have to know more about them
def __exit__(self, exc_type, exc_value, exc_trace) -> None:
self.conn.commit()
self.cursor.close()
self.conn.close()
|
#!/usr/bin/env python3
"""
This script allows you to manually control the simulator
using the keyboard arrows.
"""
import sys
import argparse
import pyglet
import math
from pyglet.window import key
from pyglet import clock
import numpy as np
import gym
import gym_miniworld
import csv
parser = argparse.ArgumentParser()
parser.add_argument('--env-name', default='MiniWorld-Hallway-v0')
parser.add_argument('--domain-rand', action='store_true', help='enable domain randomization')
parser.add_argument('--no-time-limit', action='store_true', help='ignore time step limits')
parser.add_argument('--top_view', action='store_true', help='show the top view instead of the agent view')
parser.add_argument('--path-dir', default=None)
args = parser.parse_args()
env = gym.make(args.env_name)
if args.no_time_limit:
env.max_episode_steps = math.inf
if args.domain_rand:
env.domain_rand = True
view_mode = 'top' if args.top_view else 'agent'
env.reset()
path = None
pos_data = []
dir_data = []
if args.path_dir:
with open(args.path_dir, newline='') as f:
reader = csv.reader(f)
data = list(reader)
pos_data = []
dir_data = []
for d in data:
int_data = [float(i) for i in d]
pos_data.append(int_data[:3])
dir_data.append(int_data[3])
pos_data = pos_data[::5]
dir_data = dir_data[::5]
# Create the display window
env.render('pyglet', view=view_mode, path=pos_data, dir=dir_data)
def step(action):
print('step {}/{}: {}'.format(env.step_count+1, env.max_episode_steps, env.actions(action).name))
obs, reward, done, info = env.step(action)
if reward > 0:
print('reward={:.2f}'.format(reward))
if done:
print('done!')
env.reset()
env.render('pyglet', view=view_mode)
@env.unwrapped.window.event
def on_key_press(symbol, modifiers):
"""
This handler processes keyboard commands that
control the simulation
"""
if symbol == key.BACKSPACE or symbol == key.SLASH:
print('RESET')
env.reset()
env.render('pyglet', view=view_mode)
return
if symbol == key.ESCAPE:
env.close()
sys.exit(0)
if symbol == key.UP:
step(env.actions.move_forward)
elif symbol == key.DOWN:
step(env.actions.move_back)
elif symbol == key.LEFT:
step(env.actions.turn_left)
elif symbol == key.RIGHT:
step(env.actions.turn_right)
elif symbol == key.PAGEUP or symbol == key.P:
step(env.actions.pickup)
elif symbol == key.PAGEDOWN or symbol == key.D:
step(env.actions.drop)
elif symbol == key.ENTER:
step(env.actions.done)
elif symbol == key.W:
step(env.actions.move_upward)
elif symbol == key.S:
step(env.actions.move_downward)
@env.unwrapped.window.event
def on_key_release(symbol, modifiers):
pass
@env.unwrapped.window.event
def on_draw():
env.render('pyglet', view=view_mode, path=pos_data, dir=dir_data)
@env.unwrapped.window.event
def on_close():
pyglet.app.exit()
# Enter main event loop
pyglet.app.run()
env.close()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Functions for working with i3cols versions of I3Particle (I3PARTICLE_T) and
I3MCTree (FLAT_PARTICLE_T).
See dataclasses/private/dataclasses/physics/I3MCTreePhysicsLibrary.cxx
and dataclasses/private/dataclasses/physics/I3Particle.cxx
"""
from __future__ import absolute_import, division, print_function
__author__ = "Justin L. Lanfranchi for the IceCube Collaboration"
__all__ = [
"get_null_particle",
"get_best_filter",
"true_filter",
"is_cascade",
"is_neutrino",
"is_nucleus",
"is_track",
"is_muon",
"more_energetic",
"get_most_energetic",
"get_most_energetic_neutrino",
"get_most_energetic_muon",
"get_most_energetic_track",
]
import copy
import numba
import numpy as np
from i3cols import dtypes, enums
@numba.njit(cache=True, error_model="numpy")
def get_null_particle():
"""Get a null particle for use when an invalid / n/a result is desired.
Returns
-------
null_particle : shape () ndarray of dtype I3PARTICLE_T
"""
null_particle = np.empty(shape=1, dtype=dtypes.I3PARTICLE_T)[0]
# TODO: set majorID, minorID to random values?
null_particle["id"]["majorID"] = 0
null_particle["id"]["minorID"] = 0
null_particle["pdg_encoding"] = 0
null_particle["shape"] = enums.ParticleShape.Null
null_particle["pos"]["x"] = np.nan
null_particle["pos"]["y"] = np.nan
null_particle["pos"]["z"] = np.nan
null_particle["dir"]["zenith"] = np.nan
null_particle["dir"]["azimuth"] = np.nan
null_particle["time"] = np.nan
null_particle["energy"] = np.nan
null_particle["length"] = np.nan
null_particle["speed"] = np.nan
null_particle["fit_status"] = enums.FitStatus.NotSet
null_particle["location_type"] = enums.LocationType.Anywhere
return null_particle
@numba.njit(error_model="numpy")
def get_best_filter(particles, filter_function, cmp_function):
"""Get best particle according to `cmp_function`, only looking at particles
for which `filter_function` returns `True`. If no particle meeting these
criteria is found, returns a copy of `NULL_I3PARTICLE`.
See dataclasses/public/dataclasses/physics/I3MCTreeUtils.h
Parameters
----------
particles : ndarray of dtyppe I3PARTICLE_T
filter_function : numba Callable(I3PARTICLE_T)
cmp_function : numba Callable(I3PARTICLE_T, I3PARTICLE_T)
Returns
-------
best_particle : shape () ndarray of dtype I3PARTICLE_T
"""
best_particle = get_null_particle()
for particle in particles:
if filter_function(particle) and cmp_function(test=particle, ref=best_particle):
best_particle = particle
return best_particle
@numba.njit(cache=True, error_model="numpy")
def true_filter(test): # pylint: disable=unused-argument
"""Simply return True regardless of the input.
Designed to be used with `get_best_filter` where no filtering is desired;
intended to have same effect as `IsParticle` function defined in
dataclasses/private/dataclasses/physics/I3MCTreePhysicsLibrary.cxx
Parameters
----------
test
Returns
-------
True : bool
"""
return True
@numba.njit(cache=True, error_model="numpy")
def is_cascade(particle):
"""Test if particle is a cascade.
See dataclasses/private/dataclasses/physics/I3Particle.cxx
Parameters
----------
particle : shape () ndarray of dtype I3PARTICLE_T
Returns
-------
is_cascade : bool
"""
return (
particle["shape"]
in (enums.ParticleShape.Cascade, enums.ParticleShape.CascadeSegment,)
or particle["pdg_encoding"]
in (
enums.ParticleType.EPlus,
enums.ParticleType.EMinus,
enums.ParticleType.Brems,
enums.ParticleType.DeltaE,
enums.ParticleType.PairProd,
enums.ParticleType.NuclInt,
enums.ParticleType.Hadrons,
enums.ParticleType.Pi0,
enums.ParticleType.PiPlus,
enums.ParticleType.PiMinus,
)
or (
particle["shape"] != enums.ParticleShape.Primary
and (
is_nucleus(particle)
or particle["pdg_encoding"]
in (
enums.ParticleType.PPlus,
enums.ParticleType.PMinus,
enums.ParticleType.Gamma,
)
)
)
)
@numba.njit(cache=True, error_model="numpy")
def is_neutrino(particle):
"""Test if particle is a neutrino.
See dataclasses/private/dataclasses/physics/I3Particle.cxx
Parameters
----------
particle : shape () ndarray of dtype I3PARTICLE_T
Returns
-------
is_neutrino : bool
"""
return particle["pdg_encoding"] in (
enums.ParticleType.NuE,
enums.ParticleType.NuEBar,
enums.ParticleType.NuMu,
enums.ParticleType.NuMuBar,
enums.ParticleType.NuTau,
enums.ParticleType.NuTauBar,
enums.ParticleType.Nu,
)
@numba.njit(cache=True, error_model="numpy")
def is_nucleus(particle):
"""Test if particle is a nucleus.
See dataclasses/private/dataclasses/physics/I3Particle.cxx
Parameters
----------
particle : shape () ndarray of dtype I3PARTICLE_T
Returns
-------
is_nucleus : bool
"""
return 1000000000 <= abs(particle["pdg_encoding"]) <= 1099999999
@numba.njit(cache=True, error_model="numpy")
def is_track(particle):
"""Test if particle is a track.
See dataclasses/private/dataclasses/physics/I3Particle.cxx
Parameters
----------
particle : shape () ndarray of dtype I3PARTICLE_T
Returns
-------
is_track : bool
"""
return (
particle["shape"]
in (
enums.ParticleShape.InfiniteTrack,
enums.ParticleShape.StartingTrack,
enums.ParticleShape.StoppingTrack,
enums.ParticleShape.ContainedTrack,
)
or particle["pdg_encoding"]
in (
enums.ParticleType.MuPlus,
enums.ParticleType.MuMinus,
enums.ParticleType.TauPlus,
enums.ParticleType.TauMinus,
enums.ParticleType.STauPlus,
enums.ParticleType.STauMinus,
enums.ParticleType.SMPPlus,
enums.ParticleType.SMPMinus,
enums.ParticleType.Monopole,
enums.ParticleType.Qball,
)
or (
particle["shape"] == enums.ParticleShape.Primary
and (
is_nucleus(particle)
or particle["pdg_encoding"]
in (
enums.ParticleType.PPlus,
enums.ParticleType.PMinus,
enums.ParticleType.Gamma,
)
)
)
)
@numba.njit(cache=True, error_model="numpy")
def is_muon(particle):
"""Test if particle is a muon.
See dataclasses/private/dataclasses/physics/I3Particle.cxx
Parameters
----------
particle : shape () ndarray of dtype I3PARTICLE_T
Returns
-------
is_muon : bool
"""
return (
particle["pdg_encoding"] == enums.ParticleType.MuPlus
or particle["pdg_encoding"] == enums.ParticleType.MuMinus
)
@numba.njit(cache=True, error_model="numpy")
def more_energetic(test, ref):
"""Is `test` particle more energetic than `ref` particle?
Not if `test` energy is NaN, always returns False.
Designed to be used with `get_best_filter`.
See function `MoreEnergetic` in
dataclasses/private/dataclasses/physics/I3MCTreePhysicsLibrary.cxx
Parameters
----------
test : I3PARTICLE_T
ref : I3PARTICLE_T
Returns
-------
is_most_energetic : bool
"""
if np.isnan(test["energy"]):
return False
if np.isnan(ref["energy"]):
return True
return test["energy"] > ref["energy"]
# return not np.isnan(test["energy"]) and (
# np.isnan(ref["energy"]) or test["energy"] > ref["energy"]
# )
@numba.njit(error_model="numpy")
def get_most_energetic(particles):
"""Get most energetic particle. If no particle with a non-NaN energy is
found, returns a copy of `NULL_I3PARTICLE`.
Parameters
----------
particles : ndarray of dtyppe I3PARTICLE_T
Returns
-------
most_energetic : shape () ndarray of dtype I3PARTICLE_T
"""
return get_best_filter(
particles=particles, filter_function=true_filter, cmp_function=more_energetic,
)
@numba.njit(error_model="numpy")
def get_most_energetic_neutrino(particles):
"""Get most energetic neutrino.
Parameters
----------
particles : ndarray of dtype I3PARTICLE_T
Returns
-------
most_energetic_neutrino : shape () ndarray of dtype I3PARTICLE_T
"""
return get_best_filter(
particles=particles, filter_function=is_neutrino, cmp_function=more_energetic,
)
@numba.njit(error_model="numpy")
def get_most_energetic_muon(particles):
"""Get most energetic muon.
Parameters
----------
particles : ndarray of dtype I3PARTICLE_T
Returns
-------
most_energetic_muon : shape () ndarray of dtype I3PARTICLE_T
"""
return get_best_filter(
particles=particles, filter_function=is_muon, cmp_function=more_energetic,
)
@numba.njit(error_model="numpy")
def get_most_energetic_track(particles):
"""Get most energetic track.
Parameters
----------
particles : ndarray of dtype I3PARTICLE_T
Returns
-------
most_energetic_track : shape () ndarray of dtype I3PARTICLE_T
"""
return get_best_filter(
particles=particles, filter_function=is_track, cmp_function=more_energetic,
)
|
#!/usr/bin/env python
# BUSCO_phylogenomics.py
# 2019 Jamie McGowan <jamie.mcgowan@mu.ie>
#
# Utility script to construct species phylogenies using BUSCO results.
# Can perform ML supermatrix or generate datasets for supertree methods.
# Works directly from BUSCO output, as long as the same BUSCO dataset
# has been used for each genome
#
# Dependencies:
# - BioPython
# - MUSCLE
# - trimAL
# - IQ-TREE
#
import argparse
import multiprocessing as mp
import os
import sys
from time import gmtime, strftime
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
# If these programs aren't in $PATH, replace the string below with full
# paths to the programs, including the program name
muscle = "muscle"
iqtree = "iqtree"
trimal = "trimal"
# astral = "astral.jar"
# TODO Add FastTree support
def main():
parser = argparse.ArgumentParser(description="Perform phylogenomic reconstruction using BUSCOs")
parser.add_argument("--supermatrix",
help="Concatenate alignments of ubuquitious single copy BUSCOs and perform supermatrix "
"species phylogeny reconstruction using IQTREE/ML",
action="store_true")
parser.add_argument("--supertree",
help="Generate individual ML phylogenies of each BUSCO persent in at least 4 genomes for "
"supertree species phylogeny reconstruction with ASTRAL",
action="store_true")
parser.add_argument("-t", "--threads", type=int, help="Number of threads to use", required=True)
parser.add_argument("-d", "--directory", type=str, help="Directory containing completed BUSCO runs", required=True)
parser.add_argument("-o", "--output", type=str, help="Output directory to store results", required=True)
parser.add_argument("-l", "--lineage", type=str, help="Name of lineage used to run BUSCO", required=False)
parser.add_argument("-psc", "--percent_single_copy", type=float, action="store", dest="psc",
help="BUSCOs that are present and single copy in N percent of species will be included in the "
"concatenated alignment")
parser.add_argument("--stop_early",
help="Stop pipeline early after generating datasets (before phylogeny inference)",
action="store_true")
args = parser.parse_args()
start_directory = os.path.abspath(args.directory)
working_directory = os.path.abspath(args.output)
threads = int(args.threads)
supermatrix = args.supermatrix
supertree = args.supertree
stop_early = args.stop_early
lineage = args.lineage
if args.psc is None:
percent_single_copy = 100
print(percent_single_copy)
else:
percent_single_copy = float(args.psc)
print(percent_single_copy)
if not supermatrix and not supertree:
print("Error! Please select at least one of '--supermatrix' or '--supertree'")
sys.exit(1)
# Check input directory exists
if os.path.isdir(start_directory):
os.chdir(start_directory)
else:
print("Error! " + start_directory + " is not a directory!")
# Check if output directory already exists
if os.path.isdir(working_directory):
print("Error! " + working_directory + " already exists")
sys.exit(1)
else:
os.mkdir(working_directory)
if lineage == None:
lineage = ""
# TODO check dependencies are installed
print_message("Starting BUSCO Phylogenomics Pipeline")
# Scan start directory to identify BUSCO runs (begin with 'run_')
busco_dirs = []
for item in os.listdir("."):
if item[0:4] == "run_":
if os.path.isdir(item):
busco_dirs.append(item)
print("Found " + str(len(busco_dirs)) + " BUSCO runs:")
for directory in busco_dirs:
print("\t" + directory)
print("")
buscos = {}
all_species = []
for directory in busco_dirs:
os.chdir(start_directory)
species = directory.split("run_")[1]
all_species.append(species)
os.chdir(directory)
# os.chdir("run_" + lineage) # Issue with BUSCO version >= 4?
os.chdir("busco_sequences")
os.chdir("single_copy_busco_sequences")
print(species)
for busco in os.listdir("."):
if busco.endswith(".faa"):
#print(busco)
busco_name = busco[0:len(busco) - 4]
record = SeqIO.read(busco, "fasta")
new_record = SeqRecord(Seq(str(record.seq)), id=species, description="")
if busco_name not in buscos:
buscos[busco_name] = []
buscos[busco_name].append(new_record)
print("BUSCO\t # Species Single Copy")
for busco in buscos:
print(busco + " " + str(len(buscos[busco])))
print_message((str(len(buscos))) + " BUSCOs were found")
print("")
if supertree:
print_message("Beginning SUPERTREE Analysis")
print("")
# Identify BUSCOs that are present (single copy) in at least 4 species
four_single_copy = []
for busco in buscos:
if len(buscos[busco]) >= 4:
four_single_copy.append(busco)
if len(four_single_copy) == 0:
print_message("0 BUSCOs are present and single copy in at least 4 species")
# Should break out or quit here
else:
print_message(str(len(four_single_copy)) + " BUSCOs are single copy and present in at least 4 species")
os.chdir(working_directory)
os.mkdir("proteins_4")
os.mkdir("alignments_4")
os.mkdir("trimmed_alignments_4")
os.mkdir("trees_4")
os.mkdir("trees_4/iqtree_files")
print("")
print_message("Writing protein sequences to: " + os.path.join(working_directory, "proteins_4"))
for busco in four_single_copy:
busco_seqs = buscos[busco]
SeqIO.write(busco_seqs, os.path.join("proteins_4", busco + ".faa"), "fasta")
print("")
print_message("Aligning protein sequences using MUSCLE with", threads, "threads to:",
os.path.join(working_directory, "alignments_4"))
mp_commands = []
for busco in four_single_copy:
mp_commands.append(
[os.path.join("proteins_4", busco + ".faa"), os.path.join("alignments_4", busco + ".aln")])
pool = mp.Pool(processes=threads)
results = pool.map(run_muscle, mp_commands)
print("")
print_message("Trimming alignments using trimAl (-automated1) with", threads, "threads to: ",
os.path.join(working_directory, "trimmed_alignments_4"))
mp_commands = []
for busco in four_single_copy:
mp_commands.append([os.path.join("alignments_4", busco + ".aln"),
os.path.join("trimmed_alignments_4", busco + ".trimmed.aln")])
pool = mp.Pool(processes=threads)
results = pool.map(run_trimal, mp_commands)
print("")
print_message("Generating phylogenies using IQ-TREE (with model testing) for each BUSCO family with",
threads, "threads to:", os.path.join(working_directory, "trees_4"))
mp_commands = []
for busco in four_single_copy:
mp_commands.append([os.path.join("trimmed_alignments_4", busco + ".trimmed.aln")])
pool = mp.Pool(processes=threads)
results = pool.map(run_iqtree, mp_commands)
# Move all IQ-TREE generated files to trees_4 folder
os.system("mv trimmed_alignments_4/*.treefile trees_4")
os.system("mv trimmed_alignments_4/*.trimmed.aln.* trees_4/iqtree_files")
print("")
print_message("Concatenating all TREEs to: ", os.path.join(working_directory, "ALL.trees"))
os.chdir(working_directory)
os.system("cat trees_4/*.treefile > ALL.trees")
print("")
print_message("Finished generating dataset for supertree analysis. Use programs such as Astral or CLANN "
"to infer species tree from trees_4/ALL.trees")
print("")
if supermatrix:
single_copy_buscos = []
if args.psc is None:
print_message("Identifying BUSCOs that are single copy in all " + str(len(all_species)) + " species")
for busco in buscos:
if len(buscos[busco]) == len(all_species):
single_copy_buscos.append(busco)
if len(single_copy_buscos) == 0:
print_message("0 BUSCO families were present and single copy in all species")
print_message("Exiting")
sys.exit(0)
else:
print(str(len(single_copy_buscos)) + " BUSCOs are single copy in all " + str(len(all_species)) + " species")
else:
psc = args.psc
# Identify BUSCOs that are single copy and present in psc% of species
for busco in buscos:
percent_species_with_single_copy = (len(buscos[busco]) / (len(all_species) * 1.0)) * 100
if percent_species_with_single_copy >= psc:
single_copy_buscos.append(busco)
print(str(len(single_copy_buscos)) + " BUSCOs are single copy in >= " + str(psc) + " of species")
os.chdir(working_directory)
os.mkdir("proteins")
os.mkdir("alignments")
os.mkdir("trimmed_alignments")
print("")
print_message("Writing protein sequences to: " + os.path.join(working_directory, "proteins"))
for busco in single_copy_buscos:
busco_seqs = buscos[busco]
SeqIO.write(busco_seqs, os.path.join(working_directory, "proteins", busco + ".faa"), "fasta")
print("")
print_message("Aligning protein sequences using MUSCLE with", threads, "threads to: ",
os.path.join(working_directory))
mp_commands = []
for busco in single_copy_buscos:
mp_commands.append([os.path.join(working_directory, "proteins", busco + ".faa"),
os.path.join(working_directory, "alignments", busco + ".aln")])
pool = mp.Pool(processes=threads)
results = pool.map(run_muscle, mp_commands)
print("")
print_message("Trimming alignments using trimAl (-automated1) with", threads, "threads to: ",
os.path.join(working_directory, "trimmed_alignments"))
mp_commands = []
for busco in single_copy_buscos:
mp_commands.append([os.path.join(working_directory, "alignments", busco + ".aln"),
os.path.join(working_directory, "trimmed_alignments", busco + ".trimmed.aln")])
pool = mp.Pool(processes=threads)
results = pool.map(run_trimal, mp_commands)
print("")
print_message("Concatenating all trimmed alignments for SUPERMATRIX analysis")
os.chdir(os.path.join(working_directory, "trimmed_alignments"))
alignments = {}
for species in all_species:
alignments[species] = ""
# if psc isn't set, or is == 100, we can simple just concatenate alignments
if args.psc is None:
for alignment in os.listdir("."):
for record in SeqIO.parse(alignment, "fasta"):
alignments[str(record.id)] += str(record.seq)
else:
# We need to check if a species is missing from a family, if so append with "-" to represent missing data
for alignment in os.listdir("."):
# Keep track of which species are present and missing
check_species = all_species[:]
for record in SeqIO.parse(alignment, "fasta"):
alignments[str(record.id)] += str(record.seq)
check_species.remove(str(record.id))
if len(check_species) > 0:
# There are missing species, fill with N * "?"
seq_len = len(str(record.seq))
for species in check_species:
alignments[species] += ("?" * seq_len)
os.chdir(working_directory)
fo = open("SUPERMATRIX.aln", "w")
for species in alignments:
fo.write(">" + species + "\n")
fo.write(alignments[species] + "\n")
fo.close()
print_message("Supermatrix alignment is " + str(len(alignments[species])) + " amino acids in length")
if stop_early:
print_message("Stopping early")
sys.exit(0)
print_message("Reconstructing species phylogeny using IQ-TREE with model selection from ModelFinder, "
"1000 ultrafast bootstrap approximations and 1000 SH-aLRTs: SUPERMATRIX.aln.treefile")
print("")
os.system("iqtree -s SUPERMATRIX.aln -bb 1000 -alrt 1000 -nt AUTO -ntmax " + str(threads) + " > /dev/null")
print("")
print_message("SUPERMATRIX phylogeny construction complete! See treefile: SUPERMATRIX.aln.treefile")
def run_muscle(io):
os.system("muscle -in " + io[0] + " -out " + io[1] + " > /dev/null 2>&1")
def run_trimal(io):
os.system("trimal -in " + io[0] + " -out " + io[1] + " -automated1 ")
def run_iqtree(io):
os.system("iqtree -s " + io[0] + " > /dev/null 2>&1")
def print_message(*message):
print(strftime("%d-%m-%Y %H:%M:%S", gmtime()) + "\t" + " ".join(map(str, message)))
if __name__ == "__main__":
main()
|
print("""
086) Crie um programa que crie uma matriz de dimensรฃo 3x3 e preencha
com valores lidos pelo teclado. No final, mostre a matriz na tela com
a formataรงรฃo correta.
""")
tamanhoDaMatriz = 3
matriz = []
saida = ''
titulo = f' Usando nรบmeros inteiros preencha \
a matriz {tamanhoDaMatriz}x{tamanhoDaMatriz} a seguir '
print('-'*len(titulo))
print(titulo)
print('-'*len(titulo))
for linha in range(tamanhoDaMatriz):
matriz.append([])
for coluna in range(tamanhoDaMatriz):
numero = int(input(f'Cรฉlula [{linha},{coluna}]: ').strip())
matriz[linha].append(numero)
saida += f'[ {matriz[linha][coluna]:^3} ]'
saida += '\n'
print('-'*len(titulo))
print(saida[:-1])
print('-'*len(titulo))
|
# Generated by Django 2.2.13 on 2021-11-18 13:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('i_rorelse', '0017_remove_story_media_container_height'),
]
operations = [
migrations.RemoveField(
model_name='story',
name='active_chapter_background_color',
),
migrations.RemoveField(
model_name='story',
name='author_email_or_website',
),
migrations.RemoveField(
model_name='story',
name='author_name',
),
migrations.RemoveField(
model_name='story',
name='narrative_background_color',
),
migrations.RemoveField(
model_name='story',
name='narrative_link_color',
),
migrations.RemoveField(
model_name='story',
name='narrative_text_color',
),
migrations.RemoveField(
model_name='story',
name='pixels_after_final_chapter',
),
]
|
# Generated by Django 3.0.7 on 2020-06-05 04:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coleta', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='point',
name='image',
field=models.ImageField(null=True, upload_to='point'),
),
]
|
# -*- coding: utf-8 -*-
"""
@author: Hugh Bird
@copyright Copyright 2016, Hugh Bird
@lisence: MIT
@status: alpha
"""
import Elements as Elements
import numpy as np
class ElemMesh:
def __init__(self):
self.nodes = {}
self.elems = {}
self.nodes_in_physical_groups = {}
self.phys_group_names = {}
self.elems_in_physical_groups = {}
def enrich_elems(self, physgrp, enrich, deriv_enrich, ngp, eclass, ident):
""" Enrich a set of elements within a physical group
physgrp - string representing group to enrich.\n
enrich - enrichment function - function of form f(x) where x is an
array.\n
deriv_enrich - tuple of derivatives of the enrichment function.\n
ngp - number of gauss points per dimension for integration.\n
ident - the identity of the enrichment. Shares degrees of freedom with
enrichment of the same id.
"""
try:
grp_num = [key for key, value in self.phys_group_names.items() \
if value == physgrp][0]
elemset = set(self.elems_in_physical_groups[grp_num])
except:
print("##########################################################"
+ "#####################")
print("ElemMesh:\tFATAL ERROR!")
print("ElemMesh:\tTrying to build enrichement with identity "
+ str(ident) + " on physical group " + physgrp + ".")
print("ElemMesh:\tCould not find group " + physgrp + ".")
print("ElemMesh:\tAvailible groups are:")
for key, val in self.phys_group_names.items():
print(str(key)+":\t"+str(val))
print("##########################################################"
+ "#####################")
raise KeyError
enrichment = Elements.Enrichment(eclass, ident)
enrichment.define_func(enrich)
enrichment.define_deriv_func(deriv_enrich)
enrichment.define_gauss_order(ngp)
for eleid in elemset:
enrichment.enrich_elem(self.elems[eleid])
print("ElemMesh:\tApplied enrichment with id "+str(ident)+\
" to " + str(len(elemset)) +" elements in set " + physgrp + ".")
def build_from_gmsh(self, file_path):
""" Build elemement mesh from gmsh ascii .msh file
Arg: takes path to .msh file
"""
import Translators.gmshtranslator as gmshtranslator
self.nodes.clear()
self.elems.clear()
self.nodes_in_physical_groups.clear()
gt = gmshtranslator.gmshTranslator(file_path)
# Setup for phys groups
for grp in gt.physical_groups:
self.nodes_in_physical_groups[grp] = []
logical_nodes_in_physical_groups = gt.nodes_in_physical_groups
self.phys_group_names = gt.physical_group_names
# Check that physical group names are not repeated:
grp_nams = set(self.phys_group_names.keys())
for key in self.phys_group_names.keys():
try:
grp_nams.remove(key)
except:
print("gmshTranslator:\tParent: Physical group name repeated.")
print("gmshTranslator:\tParent: Phys grp name repetition " +
"lead to errors later on.")
print("FATAL ERROR")
raise ValueError
del grp_nams
# Element to gmsh element mapping:
ele_to_Element_dict = {
gt.line_2_node:Elements.ElemLine2,
gt.line_3_node:Elements.ElemLine3,
gt.quadrangle_4_node:Elements.ElemQuad4,
gt.quadrangle_8_node:Elements.ElemQuad8,
gt.quadrangle_9_node:Elements.ElemQuad9,
gt.triangle_3_node:Elements.ElemTri3,
gt.triangle_6_node:Elements.ElemTri6}
def elem_supported(eletag,eletype,physgrp,nodes):
if eletype in ele_to_Element_dict.keys():
return True
else:
print("FATAL ERROR.")
print(str(eletype) + " is not implemented")
raise NotImplementedError
return False
def add_elem(eletag,eletype,physgrp,nodes):
Element_obj = ele_to_Element_dict[eletype]
self.elems[eletag] = Element_obj(self.nodes, nodes)
def node_always_true(tag,x,y,z,physgroups):
return True
class node_idx:
c = 0
def add_node(tag,x,y,z):
self.nodes[tag] = np.array((x,y,z))
for grp_name, grp_nodes in logical_nodes_in_physical_groups.items():
# Oddly there is offset of 1 in logical arrays...
if logical_nodes_in_physical_groups[grp_name][node_idx.c+1] == 1:
self.nodes_in_physical_groups[grp_name].append(tag)
node_idx.c += 1
gt.add_nodes_rule(node_always_true, add_node)
gt.add_elements_rule(elem_supported, add_elem)
gt.parse()
def export_to_vtk(self, export_path, NodeData={}):
""" Export mesh as VTK unstructured grid
Uses evtk
Argument: export_path - path to save mesh to.\n
NodeData: List of data dictionaries. {'varname':data}\n
Data is {nid : float64}.
"""
import Translators.pyevtk.vtk as vtk
# Check we have a mesh - forgetting to load one confused me once.
if len(self.elems) == 0:
print("##########################################################"
+ "#####################")
print("ElemMesh:\tFATAL ERROR!")
print("ElemMesh:\tTrying to export mesh as VTU to file " +
export_path)
print("ElemMesh:\tMesh is empty. Nothing to export!")
print("ElemMesh:\tDid you remember to build or load a mesh?")
print("##########################################################"
+ "#####################")
raise(AssertionError)
def inform(text ):
print("mesh2vtk:\t" + text)
# Convert nodes to consecutive numbering.
# Additionally sort out the NodeData to somethat can be exported.
idx = 0
cnodes_x = []
cnodes_y = []
cnodes_z = []
nid_to_idx = {}
# Prep data variable
expt_data = {}
for key in NodeData.keys():
expt_data[key] = list()
for nid, coord in self.nodes.items():
cnodes_x.append(coord[0])
cnodes_y.append(coord[1])
cnodes_z.append(coord[2])
nid_to_idx[nid] = idx
idx += 1
# And now our export data;
for key, dict in NodeData.items():
expt_data[key].append(dict[nid])
cnodes = (np.array(cnodes_x), np.array(cnodes_y), np.array(cnodes_z))
# nid_to_idx[node_tag] returns node index
# cnodes contains (x, y, z) for nodes
# lists to np.arrays in export data:
for key in expt_data.keys():
expt_data[key] = np.array(expt_data[key], dtype=np.float64)
expt = vtk.VtkFile(export_path, vtk.VtkUnstructuredGrid)
inform("Writing to "+expt.getFileName() + ".")
expt.openGrid()
expt.openPiece(npoints=len(cnodes_x),ncells=len(self.elems.keys()))
del cnodes_x, cnodes_y, cnodes_z
# Add nodes to file
expt.openElement("Points")
expt.addData("Points", cnodes)
expt.closeElement("Points")
# the VTK file wants the ordering as a continious array of node ids,
# with offsets which contain the final idx of each element along with
# an array of element types.
ele_cnids = []
offsets = []
vtkeletype = []
eletype_to_vtk = {
Elements.ElemLine2: vtk.VtkLine,
Elements.ElemLine3: vtk.VtkQuadraticEdge,
Elements.ElemTri3: vtk.VtkTriangle,
Elements.ElemTri6: vtk.VtkQuadraticTriangle,
Elements.ElemQuad4: vtk.VtkQuad,
Elements.ElemQuad8: vtk.VtkQuadraticQuad,
Elements.ElemQuad9: vtk.VtkBiQuadraticQuad}
for elem in self.elems.values():
for nid in elem.nodes:
ele_cnids.append(nid_to_idx[nid])
try:
offsets.append(len(elem.nodes) + offsets[-1])
except:
offsets.append(len(elem.nodes))
vtkeletype.append(eletype_to_vtk[elem.__class__].tid)
ele_cnids = np.array(ele_cnids, dtype=np.int32)
offsets = np.array(offsets, dtype=np.uint32)
vtkeletype= np.array(vtkeletype, dtype=np.uint8)
expt.openElement("Cells")
expt.addData("connectivity", ele_cnids)
expt.addData("offsets", offsets)
expt.addData("types", vtkeletype)
expt.closeElement("Cells")
# Add point data info to file
if len(expt_data.keys()) > 0:
_addDataToFile(expt, cellData=None, pointData=expt_data)
expt.closePiece()
expt.closeGrid()
# ACTUAL DATA APPENDING
expt.appendData(cnodes)
expt.appendData(ele_cnids).appendData(offsets).appendData(vtkeletype)
# Add point data data to file
if len(expt_data.keys()) > 0:
_appendDataToFile(expt, cellData=None, pointData=expt_data)
expt.save()
inform("Saved data.")
def elem_quad9_to_quad8(self):
""" Substitutes elem quad 9s for quad 8s """
counter = 0
for eleid, elem in self.elems.items():
if isinstance(elem, Elements.ElemQuad9):
old = elem
nodes = elem.nodes
self.elems[eleid] = Elements.ElemQuad8(self.nodes, nodes[0:8])
counter += 1
print("ElemMesh:\tSwapped " + str(counter) + " quad8 elements " + \
"for quad9s.")
def disp_nodes(self):
""" Draws dots where the nodes are in XY plane """
colours = ['red', 'blue', 'green', 'magenta', 'cyan']
idx=0
for grp in self.nodes_in_physical_groups.values():
X = [self.nodes[a][0] for a in grp]
Y = [self.nodes[a][1] for a in grp]
plt.scatter(X,Y, color=colours[idx], alpha=0.5)
idx+=1
plt.show()
def calc_elems_in_physgrps(self):
""" Calculates the elements attached to nodes in physical groups.
"""
for grp_num, grp_nodes in self.nodes_in_physical_groups.items():
self.elems_in_physical_groups[grp_num] = set()
grp_n_set = set(grp_nodes)
for eleid, elem in self.elems.items():
if grp_n_set.intersection(set(elem.nodes)):
self.elems_in_physical_groups[grp_num].add(eleid)
for key in self.elems_in_physical_groups.keys():
self.elems_in_physical_groups[key]=[a for a \
in self.elems_in_physical_groups[key]]
def print_elem_counts(self):
""" Prints out the counts of different element types.
"""
def rep_str(txt):
print("ElemMesh:\t" + txt)
rep_str("Mesh has following contents...")
counts = {}
for elem in self.elems.values():
try:
counts[elem.__class__] += 1
except:
counts[elem.__class__] = 1
for key, val in counts.items():
rep_str("\t" + str(val) + " elements of type " + str(key) + ".")
def print_group_elem_counts(self):
""" Prints out the counts of nodes and elements in physical groups
"""
def rep_str(txt):
print("ElemMesh:\t" + txt)
rep_str("Groups contain following number of elements...")
for num, name in self.phys_group_names.items():
elec = len(self.elems_in_physical_groups[num])
nidc = len(self.nodes_in_physical_groups[num])
rep_str("\t" + name + " contained " + str(elec) + " elements and "
+ str(nidc) + " nodes.")
def remove_line_elems(self):
""" Removes 1D elements from mesh
"""
to_remove = []
for eletag, elem in self.elems.items():
if elem.nd() == 1:
to_remove.append(eletag)
print("ElemMesh:\tRemoving " + str(len(to_remove)) + " line elements.")
for tag in to_remove:
self.elems.pop(tag)
def project_points(self, ext_points, failure_rule=None):
""" Map nodes from external points onto this mesh.
ext_points: a dictionary of {point_id : (numpy_array)point_coord}
failure_rule: default to Assertion error, ='closest' assigns
save value as nearest point.
Outputs: {point_id : (<element>, <local_coord>)}
"""
print("ElemMesh:\tCalculating element local " +
"coordinates for interpolating nodes.")
print("ElemMesh:\tMay take a while... (1 time cost)")
to_place = set(ext_points.keys())
rntp = np.floor(len(to_place)/100)
placed = set()
mapping = {}
def print_completion(comp):
print("\rElemMesh:\tDone " + str(comp) + "%",
end="")
for eleid, elem in self.elems.items():
# Get nids near elem... A must for speed!
trial_set = elem.is_near(to_place.difference(placed),
ext_points)
for nid in trial_set:
# Find the node's coordinate in the element's
# coordinate system.
coord = ext_points[nid]
loc_coord = elem.global_to_local(coord)
# If the node is in the element, add it to dict
# and remove from test set.
if elem.local_in_element(loc_coord):
mapping[nid] = (elem, loc_coord)
placed.add(nid)
if len(placed) % rntp == 0:
print_completion(len(placed)/rntp)
print("") # Next line after completion thing.
# See if we have any homeless nodes:
if len(to_place.difference(placed)) != 0:
print("ElemMesh:\tExport mesh contains nodes " +
"outside of XFEM/FEM mesh!")
print("ElemMesh:\tRemaining nodes count:" + str(
len(to_place.difference(placed))) + " of " +
str(len(ext_points.keys())))
if failure_rule == 'closest':
print("ElemMesh:\tSetting to match nearest"
+ " known node")
maxerr = 0
for nid in to_place.difference(placed):
coord = ext_points[nid]
dist = 9e99
nearest_node = None
dvect = np.zeros(3)
# Find the nearest node...
for nid_ext in placed:
ext_coord = ext_points[nid_ext]
dvect = coord - ext_coord
dist_ext = np.linalg.norm(dvect)
if dist_ext < dist:
nearest_node = nid_ext
dist = dist_ext
if dist > maxerr:
maxerr = dist
mapping[nid] = \
mapping[nid_ext]
elif failure_rule is None:
# Handle default.
print("ElemMesh:\tNo failure rule set.")
print("ElemMesh:\tHomeless projected nodes stops simulation.")
print("ElemMesh:\tConsider choosing 'closest' rule.")
raise Error
else:
# Handle invalid.
print("ElemMesh:\tInvalid failure rule chosen for point" \
+ " projection with nodes out of bounds!")
print("ElemMesh:\tGiven failure rule was:" + str(failure_rule))
print("ElemMesh:\tRaising error.")
raise InputError
return mapping
def _addDataToFile(vtkFile, cellData, pointData):
# Point data
if pointData is not None:
keys = list(pointData.keys())
vtkFile.openData("Point", scalars=keys[0])
for key in keys:
data = pointData[key]
vtkFile.addData(key, data)
vtkFile.closeData("Point")
# Cell data
if cellData is not None:
keys = list(cellData.keys())
vtkFile.openData("Cell", scalars=keys[0])
for key in keys:
data = cellData[key]
vtkFile.addData(key, data)
vtkFile.closeData("Cell")
def _appendDataToFile(vtkFile, cellData, pointData):
# Append data to binary section
if pointData is not None:
keys = list(pointData.keys())
for key in keys:
data = pointData[key]
vtkFile.appendData(data)
if cellData is not None:
keys = list(cellData.keys())
for key in keys:
data = cellData[key]
vtkFile.appendData(data)
if __name__ == "__main__":
print("QUICK TEST OF ELEMMESH.PY")
a = ElemMesh()
print("\tBUINDING ./RMesh/structuredSquare.msh")
a.build_from_gmsh("./RMesh//structuredSquare.msh")
print("\tDISPLAYING NODES")
a.disp_nodes()
print("\tEXPORTING to ./vtkfile")
a.export_to_vtk("./vtkfile")
print("\tDONE\n")
|
# -*- coding: utf-8 -*-
import scrapy
import datetime
from cmccb2b.items import BidNoticeItem
from scrapy.exceptions import NotSupported, CloseSpider
from cmccb2b.utils.html2text import filter_tags
class BidNoticeSpider(scrapy.Spider):
name = 'BidNotice'
domain = 'https://b2b.10086.cn'
# Bug102: 2019.5.30็ฝ็ซๅ็บง๏ผAjax็formdataๅขๅ _qtๅญๆฎต๏ผ้ฎๅผ้่ๅจไธปHTMLไธญ๏ผๅๆถๅขๅ ไบCookieๆฃๆต
# ไธบๆญคๅขๅ ไบpre_parse()ๆญฅ้ชค๏ผ่ฏปๅไธปHTMLไธญ_qt็ๅ
ๅฎน๏ผๅนถๅกซๅ
ๅฐparse()็formdataๅญๅ
ธ
start_url = 'https://b2b.10086.cn/b2b/main/listVendorNotice.html?noticeType=2'
base_query_url = 'https://b2b.10086.cn/b2b/main/listVendorNoticeResult.html?noticeBean.noticeType=' # +[12357]
base_content_url = 'https://b2b.10086.cn/b2b/main/viewNoticeContent.html?noticeBean.id=' # +id(int)
# Bug101: 2018.8.10็ฝ็ซๅ็บง๏ผๅขๅ ไบUser-Agentๆ ผๅผๅReferer่ทจ็ซ่ๆฌ็ๆฃๆตๅ่ฝ๏ผๅนถ่ฐๆดไบnotice_type
base_headers = {
'User-Agent': 'Mozilla/5.0 (Linux; Android 5.0; SM-G900P Build/LRX21T) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/68.0.3440.106 Mobile Safari/537.36',
'Referer': 'https://b2b.10086.cn/b2b/main/listVendorNotice.html?noticeType=2',
}
notice_type_list = ['1', '2', '3', '7', '8', '16']
def __init__(self, type_id, *args, **kwargs):
"""
Construct
:param notice_type: scrapy crawl $spider -a type_id=?
1:ๅไธๆฅๆบ้่ดญๅ
ฌๅ
2:้่ดญๅ
ฌๅ(default value)
3:่ตๆ ผ้ขๅฎกๅ
ฌๅ
4:N/A๏ผๆต่ฏๆฏ2015ๅนด็ๆฐๆฎ๏ผไผผไนๅทฒ็ป่ขซๅบๅผ
5:N/A
6:N/A
7:ๅ้ไบบๅ
ฌ็คบ (bug101ๆดๆฐ๏ผไปฅๅ็ๆฌๆฏๆๆ ็ปๆๅ
ฌ็คบ)
8:ไพๅบๅไฟกๆฏๆถ้ๅ
ฌๅ
9:N/A
16: ไธญ้็ปๆๅ
ฌ็คบ๏ผbug101ๆฐๅข๏ผ
"""
super(BidNoticeSpider, self).__init__(*args, **kwargs)
self.type_id = str(int(type_id))
if self.type_id not in self.notice_type_list:
self.logger.error(u"Unsupported type_id with {0} and abort!!!".format(type_id))
raise NotSupported
else:
self.logger.info(u"Set crawler argument with type_id={0}".format(self.type_id))
self.query_url = self.base_query_url + str(self.type_id)
self.current_page = 1
self.page_size = 20
self.form_data = {
'page.currentPage': str(self.current_page),
'page.perPageSize': str(self.page_size),
'noticeBean.sourceCH': '',
'noticeBean.source': '',
'noticeBean.title': '',
'noticeBean.startDate': '',
'noticeBean.endDate': ''
}
def start_requests(self): # ็จstart_requests()ๆนๆณ,ไปฃๆฟstart_urls
""" From Bug102๏ผ้ฆๅ
่ฏทๆฑๆฅ่ฏขไธป้กต้ข๏ผ่ทๅcookie๏ผๅนถๅ่ฐๅฐpre_parse """
return [scrapy.Request(
url=self.start_url,
meta={'cookiejar': 1},
headers=self.base_headers,
callback=self.pre_parse)]
def pre_parse(self, response):
""" ๅๆไธปHTML๏ผๆๅ้่็_qtๅญๆฎต๏ผๅนถๅ่ฐparseๆๅAjaxๆฐๆฎ """
# for Bug102
# qt = response.xpath("//input[@name='_qt']/@value").extract_first()
# Bug 103: 2018-08-22 ็ซ็นๅๆฌกๅ็บงๅ็ฌ่ซ็ญ็ฅ๏ผๅฐqt้่ๅจjs่ๆฌ๏ผๅนถๅขๅ ๆณจ้่ฏญๅฅๆททๆทไฟกๆฏ
try:
lines = response.text.split("formData")[2].split("\n") # ็ฎๅ็ฒๆดๅฏปๆพๅ
ณ้ฎๅญ๏ผๅนถๅ่กๅๅฒ
qt = lines[5].split("'")[1] + lines[6].split("'")[1] # ๆผๆฅไธคๆฎตๅญ็ฌฆไธฒ
except IndexError:
self.logger.error(u"Can't find _qt key in pre_paese stage, spider will abort!")
raise CloseSpider("qt_key_not_found")
except Exception as err:
self.logger.error(u"Unknown error in pre_parse stage, err msg is {0}. Spider will abort!".format(err))
raise CloseSpider("unknown_error")
finally:
if len(qt) == 0:
self.logger.error(u"_qt key is empty in pre_paese stage, spider will abort!")
raise CloseSpider("qt_key_empty")
self.form_data['_qt'] = qt
self.logger.info(u"Sucess to find key of _qt and fill in formdata๏ผvalue={0}.".format(qt))
return [scrapy.FormRequest(
url=self.query_url,
formdata=self.form_data,
headers=self.base_headers,
meta={'cookiejar': response.meta['cookiejar']}, # ่ทๅๅๅบCookie
callback=self.parse
)]
def parse(self, response):
""" ่ฏปๅAjax็HTMLๅ
ๅฎน๏ผๅนถๆๅๅ่กจไฟกๆฏ """
try:
table = response.xpath("//table")[0]
except IndexError:
self.logger.error(u"Can't find <table> in page %i, this spider abort! response=\n%s",
self.current_page, response.body)
raise CloseSpider("html_format_error")
# -------------------------------------------------------------
# - Get <tr> and bypass top 2 line for table head
# - In Python program, default use unicode string, when dump file, just write value as memory.
# if you cannot read chinese word, check it as .decode('unicode-escape')
# - In Python, time() always locate in UTC Zone 0, 8 hours before PEK.
# - Instead of scrapy.log(), Scrapy 1.4 use scrapy.logger(), which is based on python log system logging.log().
# log error with 5 levels: critical, error, warning, info, debug
# - bid notice ID 64996, source_ch is empty, due to fix error!!!
# -------------------------------------------------------------
rec = 0
for tr in table.xpath("tr[position() > 2]"):
item = BidNoticeItem() # Notice: scrapy.request metaๆฏๆต
ๅคๅถ๏ผๅฟ
้กปๅจๅพช็ฏๅ
ๅๅงๅclass
try:
item['spider'] = self.name
item['type_id'] = self.type_id
item['nid'] = tr.xpath("@onclick").extract_first().split('\'')[1]
item['source_ch'] = tr.xpath("td[1]/text()").extract_first()
item['notice_type'] = tr.xpath("td[2]/text()").extract_first()
item['title'] = tr.xpath("td[3]/a/text()").extract_first()
# Transfer $published_date from string to datetime
published_date = tr.xpath("td[4]/text()").extract_first()
item['published_date'] = datetime.datetime.strptime(published_date, '%Y-%m-%d')
# Set timestamp with UTC๏ผ8hours
item['timestamp'] = datetime.datetime.utcnow() + datetime.timedelta(hours=8)
except IndexError:
self.logger.warning(u'Some <td> may be empty in page %i, please check HTML as:\n%s',
self.current_page, tr.extract())
else:
rec += 1
# Get context from another parse and append field in item[]
yield scrapy.Request(
url=self.base_content_url+str(item['nid']),
headers=self.base_headers,
meta={'item': item},
callback=self.parse_of_content)
if rec == 0:
self.logger.info(u"Find the end of query and close spider now! current page is %i.", self.current_page)
return
self.logger.info(u"Current page is %i, and read %i records successful!", self.current_page, rec)
if rec % self.page_size == 0:
self.current_page += rec // self.page_size
else:
self.current_page += rec // self.page_size + 1
# Notice: formdata fields must be str, int type will occur yield failed!!
self.form_data['page.currentPage'] = str(self.current_page)
yield scrapy.FormRequest(
url=self.query_url,
formdata=self.form_data,
headers=self.base_headers,
meta={'cookiejar': response.meta['cookiejar']}, # ่ทๅๅๅบCookie
callback=self.parse
)
def parse_of_content(self, response):
""" ่งฃๆ๏ผๅนถๅญๅจๅ
ฌๅHTMLๆๆฌ """
item = response.meta['item']
item['notice_url'] = response.url
item['notice_content'] = filter_tags(response.body.decode('utf-8')) # HTMLๅ้คscript็ญๆ ็ญพ
# ่งฃๆhtml๏ผๅนถไปฅๆฐ็ปๆนๅผไฟๅญ้ไปถๆไปถไฟกๆฏ
item['attachment_urls'] = []
for doc in response.xpath("//a[contains(@href, '/b2b/main/commonDownload.html?')]"):
url = self.domain + doc.xpath("@href").extract_first()
description = doc.xpath('font/text()').extract_first()
item['attachment_urls'].append({
'url': url,
'description': description
})
yield item
|
from flask import render_template,request,redirect,url_for,abort
from . import main
from ..models import User,Pitches,Comments,Upvote,Downvote
from flask_login import login_required,current_user
from .forms import UpdateProfile,PitchForm,CommentsForm
from .. import db,photos
from ..search import get_pitches,get_pitch
# Views functions
@main.route('/',methods = ["GET","POST"])
def index():
'''
Function that returns the home page
'''
form = PitchForm()
if form.validate_on_submit():
new_pitch = Pitches(pitch_category = form.pitch_category.data, pitch_title = form.pitch_title.data,pitch = form.pitch.data,user=current_user)
new_pitch.save_pitch()
title = "Impression in 60 seconds"
return render_template('index.html',title = title,pitch_form = form)
@main.route('/game')
def gamepitch():
'''
Function that returns data displayed in the gamepitch template
'''
game_pitches = get_pitches("Game pitch")
title = "Impression in 60 seconds-gamepitch"
return render_template('pitch-categories/game.html',title = title, game_pitches = game_pitches)
@main.route('/interview')
def interviewpitch():
'''
Function that returns the data of interview pitch
'''
interview_pitches = get_pitches("Interview pitch")
title = "Impression in 60 seconds-interviewpitch"
return render_template('pitch-categories/interview.html',title = title,interview_pitches = interview_pitches )
@main.route('/pickuplines')
def pickuplinespitch():
'''
Function that returns pickuplines contents
'''
pickuplines_pitches = get_pitches("Pick-up lines")
title = "Impression in 60 seconds-pickuplinespitch"
return render_template('pitch-categories/pickuplines.html',title = title, pickuplines_pitches = pickuplines_pitches)
@main.route('/project')
def projectpitch():
'''
Function that returns the projectpitch page and its contents
'''
project_pitches = get_pitches("Project pitch")
title = "Impression in 60 seconds-projectpitch"
return render_template('pitch-categories/project.html',title = title, project_pitches = project_pitches)
@main.route('/user/<uname>')
def profile(uname):
'''Function that returns a profile page displaying user(s) data'''
user = User.query.filter_by(username = uname).first()
if user is None: #confirming a user is registered
abort(404)
pitches = Pitches.query.filter_by(user_id = user.id)
return render_template("profile/profile.html",user = user,pitches = pitches)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_user_profile(uname):
'''Function that renders profile/update template and updates the database'''
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.biodata = form.biodata.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/user/<uname>/update/pic',methods=['POST'])
@login_required
def update_picture(uname):
'''Function to update a profile picture of an already registered user'''
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname = uname))
@main.route('/comments/<int:id>',methods = ['GET','POST'])
@login_required
def new_comment(id):
form = CommentsForm()
pitch = get_pitch(id)
print(pitch)
if form.validate_on_submit():
# Updated comment instance
new_comment = Comments(comment = form.comment.data,user=current_user,pitch_id = id)
new_comment.save_comment()
comments_found = Comments.get_comments(id)
title = 'Comments'
return render_template('new_comment.html', title = title, comments_form = form, pitch = pitch,comments_found = comments_found)
@main.route('/like/<int:id>', methods = ['POST', 'GET'])
def like(id):
get_pitches = Upvote.get_upvotes(id)
for pitch in get_pitches:
continue
new_vote = Upvote( pitch_id=id)
new_vote.save()
return redirect(url_for('main.index'))
@main.route('/dislike/<int:id>', methods = ['POST','GET'])
def dislike(id):
get_pitch = Downvote.get_downvotes(id)
for pitch in get_pitch:
continue
new_downvote = Downvote(pitch_id=id)
new_downvote.save()
return redirect(url_for('main.index')) |
import torch
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import cv2
import uuid
import os, time, json, pickle
import argparse
from networks.openpose import OpenPoseNet
from utils.decode import decode_pose
from utils.transform import transform
def load_weight(net, filename="./models/pose_model_scratch.pth"):
weights = torch.load(filename, map_location={"cuda:0": "cpu"})
keys = list(weights.keys())
load_w = {}
for i in range(len(keys)):
load_w[list(net.state_dict().keys())[i]] = weights[list(keys)[i]]
state = net.state_dict()
state.update(load_w)
net.load_state_dict(state)
net.eval()
return net
def show_img(img):
fig = plt.figure()
plt.imshow(img)
plt.xticks([])
plt.yticks([])
plt.show()
os.makedirs("results", exist_ok=True)
id = uuid.uuid4()
fig.savefig(f"results/{str(id)[:4]}.png")
print(f"saving result image for path results/{str(id)[:4]}.png ")
def detect(net, img_tensor):
with torch.no_grad():
output = net(img_tensor)
heatmap = output[-1][0].detach().cpu().numpy().transpose(1, 2, 0)
pafs = output[-2][0].detach().cpu().numpy().transpose(1, 2, 0)
return heatmap, pafs
def main(img_path: str):
# ใขใใซใฎ่ชญใฟ่พผใฟ
net = OpenPoseNet()
net = load_weight(net)
# ็ปๅใฎๅๅฆ็
img = Image.open(img_path)
oriImg = cv2.cvtColor(np.uint8(img), cv2.COLOR_BGR2RGB)
img_tensor = transform(img).unsqueeze(0)
# ๆจ่ซ
heatmap, pafs = detect(net, img_tensor)
_, result_img, _, _ = decode_pose(oriImg, heatmap, pafs)
# ็ปๅใฎ่กจ็คบ
show_img(result_img)
if __name__ = "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("arg1", help="image path name", type=str, default="1.jpg")
args = parser.parse_args()
root_path = "img/"
main(str(root_path+args.arg1))
|
import numpy as np
from config import ERASE_FACTOR
from loggers import logger_her
class Buffer:
"""
Experience replay buffer
"""
factor = ERASE_FACTOR
def __init__(self, size):
self.max_size = size
self.data = []
@property
def size(self):
return len(self.data)
def add(self, experience):
self.data.extend(experience)
if len(self.data) >= self.max_size:
self.data = self.data[int(Buffer.factor * self.max_size):]
def sample(self, size):
replace_mode = size > len(self.data)
index = np.random.choice(self.size, size=size, replace=replace_mode)
return [self.data[idx] for idx in index]
def log_stats(self):
reward_count = np.zeros(2)
for (s, a, r, sn) in self.data:
reward_count[-r] += 1
reward_count /= reward_count.sum()
logger_her.info("0/-1 reward: {}/{}".format(reward_count[0], reward_count[1]))
logger_her.info("Stored experience: {}".format(self.size)) |
import math
import torch
import torch.nn as nn
class TrimmedConv(nn.Module):
def __init__(self,
in_features,
out_features,
bias=False,
tperc=0.45):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.w = nn.Linear(in_features, out_features, bias=bias)
self.tperc = tperc
def reset_parameters(self):
self.w.reset_parameters()
def forward(self, x, nbrs):
h = self.w(x)
aggregation = []
for nbr in nbrs:
message, _ = torch.sort(h[nbr], dim=0)
remove = math.floor(message.size(0) * self.tperc)
if remove > 0:
message = message[remove:-remove]
message = torch.mean(message, dim=0)
aggregation.append(message)
output = torch.stack(aggregation)
return output
def __repr__(self):
return f"{self.__class__.__name__}({self.in_features}, {self.out_features}, tperc={self.tperc})"
|
# SPDX-FileCopyrightText: Fondazione Istituto Italiano di Tecnologia
# SPDX-License-Identifier: BSD-3-Clause
import os
import json
import matplotlib.pyplot as plt
# =============
# CONFIGURATION
# =============
# Path to the data for regenerating Fig.8 in the paper
data_path = "../datasets/inference/fig_8_blending_coefficients/blending_coefficients.txt"
# =============
# RETRIEVE DATA
# =============
# Load data
script_directory = os.path.dirname(os.path.abspath(__file__))
data_path = os.path.join(script_directory, data_path)
with open(data_path, 'r') as openfile:
blending_coefficients = json.load(openfile)
# Retrieve blending coefficients
w1 = blending_coefficients["w_1"]
w2 = blending_coefficients["w_2"]
w3 = blending_coefficients["w_3"]
w4 = blending_coefficients["w_4"]
# ====
# PLOT
# ====
plt.figure()
# Convert inference calls to time (seconds)
timesteps = list(range(len(w1)))
time = [timestep/50.0 for timestep in timesteps]
plt.plot(time, blending_coefficients["w_1"], 'r', label="theta_1")
plt.plot(time, blending_coefficients["w_2"], 'b', label="theta_2")
plt.plot(time, blending_coefficients["w_3"], 'g', label="theta_3")
plt.plot(time, blending_coefficients["w_4"], 'y', label="theta_4")
# Plot configuration
plt.xlim([0, time[-1]])
plt.title("Blending coefficients profiles")
plt.ylabel("Blending coefficients")
plt.xlabel("Time [s]")
plt.legend()
# Plot
plt.show()
plt.pause(1) |
#! /usr/bin/python
import sys
from string import split
import numpy as np
#------------------------FL_image.py--------------------
#------Jennifer S. Sims, Columbia University, 2016------
# This program can be run on any histogram exported from Photoshop and transposed such that Col0=numerals 0-255 representing pixel intensities and Col1=numbers of pixels at each intensity.
# It is intended for use on the "FIELD" histogram describing the entire illuminated surgical field, in order that an ROI within that field be compared to its contextual pixel intensity.,
# Keep this program in the same directory as FL_rundirectory.py in order to run it using FL_rundirectory.py.
#-------------- NOTES ON PARAMETERS OF ANALYSIS-------
# Background Threshold = median of all pixels, unless this median is greated than 26. A pixel intensity of 26 is 10% of the possible intensity range. This can be adjusted (Line 103-104).
# MaxRange = a specified percentage of pixel intensities. For example, if the maximum intensity in the field is 250, MaxRange is set at 5%, and MaxType is set at 'mean', the "maximum" used for calculating fractional fluorescence intensity would be the mean intensity of all pixels 237.5 < x < 250.
#------------- NOTES ON OUTPUT COLUMNS---------------(SE=self explanatory)
# 'TotalPixels' -- SE
# 'ZeroPixels' -- Number of pixels of intensity 0
# 'NonZeroPixels' -- Number of pixels of intensity > 0
# Maximum (size and type) -- percent of intensities included, maximum calculated as their 'mean' or 'median'. By default 'mean.'
# 'MaxRange' -- based on above parameters, the range of pixel intensities included to calculate a maximum for the field
# 'MeanIntensity' -- mean intesnity value of all non-zero pixels; defines low vs. medium threshold
# 'MedianIntensity' -- median intensity value of all non-zero pixels; defines background vs. low threshold
# 'MidPoint' -- midpoint value between the maximum and background values; defines medium vs. high threshold
# 'Background' -- same as MedianIntensity
# 'BgPixels' -- Number of pixels in background bin
# 'LowRange' -- Numerical intensity values of Low Intensity bin
# 'LowPixels' -- Number of pixels in Low Intensity bin
# 'MidRange' -- Numerical intensity values of Medium Intensity bin
# 'MidPixels' -- Number of pixels in Medium Intensity bin
# 'HighRange' -- Numerical intensity values of Medium Intensity bin
# 'HighPixels' -- Number of pixels in High Intensity bin
# This is intended to provide rudimentary image analysis for use in conjunction with more sophisticated commercial software, for the convenience of garnering quantitative information from screenshots taken on various cameras, and employs no novel algorithms. For your convenience adjusting this code to your own needs, I have included print lines throughout, generally after the declaration of key variables, which can be uncommented as needed (#).
#------------------ INPUT --------------------------
infile = sys.argv[1] # histogram file name (from FIELD) *** NO HEADER ***
image_outfile = sys.argv[2] # outfile name -- this temp file will contain the columns described above for this single histogram input
percentmax = int(sys.argv[3]) # size (percentage of 255) of 'MaxRange' described above. Default input in FL_rundirectory.py is 5, and can be changed in FL_rundirectory.py.
maxtype = sys.argv[4] # for pixels in 'MaxRange' described above, choose 'mean' or 'median'. Default input in FL_rundirectory.py is 'mean', and can be changed in FL_rundirectory.py.
instrip = infile.rstrip('.tsv')
insplit = instrip.rsplit('_',1)
#---------- read histogram of pixels by intensity (0-255) into a dictionary
dINT = {}
input = open(infile,'r')
for line in input.readlines():
llist = split(line)
val = int(llist[0])
try:
num = int(llist[1])
if num > 0:
dINT[val] = num
else:
pass
except IndexError:
pass
input.close()
# print len(dINT.keys())
total = sum(dINT.values())
# print 'total pixels',total
#--------- make the synthetic subpopulation pixels that are greater than zero and sort them
totlist = []
for val in dINT.keys():
if val != 0:
num = dINT[val]
for x in range(0,num):
totlist.append(val)
#--------- sort the population
totlist.sort(reverse=True)
# print 'nonzero pixels',len(totlist)
#--------- to calculate the maximum, based on the percentage you inputted, first import the intensities as a numpy array
pxlints = np.array(totlist).astype(float)
maxval = int(np.amax(pxlints))
maxrange1 = percentmax*maxval/100
maxpxlints = pxlints[np.where(pxlints > maxval-maxrange1)]
# print 'new pixels', maxpxlints
if maxtype == 'median':
maxmid = np.median(maxpxlints)
elif maxtype == 'mean':
maxmid = np.mean(maxpxlints)
# print 'maxvalues',maxval,'-',maxrange1
# print 'MAX',maxtype,maxmid
#---------- calculate the other statistics on the non-zero pixel population
intmean = np.mean(pxlints)
intmed = np.median(pxlints)
intstd = np.std(pxlints)
# print 'mean intensity :',intmean
# print 'median intensity :',intmed
# print 'stdev intensity :',intstd
#------------------- DEFINE THE OBJECTIVE INTENSITY BINS ----------------------
# 'maxmid' is the mean or median of the 'MaxRange'
# The median of the non-zero pixels defines the background vs. low threshold.
# The mean of the non-zero pixels defines the low vs. medium threshold.
# The midpoint between 'maxmid' and background (non-zero median) defines the medium vs. high threshold.
valrange = maxmid - intmed
midval = int(intmed+0.5*valrange)
if intmed > 26:
bgval = 26
else:
bgval = int(intmed)
lowrange = intmean - intmed
lowval = int(intmean)
bglist = []
lolist = []
midlist = []
hilist = []
bg = 0
l = 0
m = 0
h = 0
for val in dINT.keys():
num = dINT[val]
if val <= bgval:
for x in range(0,num):
bglist.append(val)
bg = bg + 1
elif val <= lowval:
for x in range(0,num):
lolist.append(val)
l = l + 1
elif val <= midval:
for x in range(0,num):
midlist.append(val)
m = m + 1
elif val > midval:
for x in range(0,num):
hilist.append(val)
h = h + 1
# print 'Bkgd','0 :',bgval,bg
# print 'Low',bgval,':',lowval,l
# print 'Med',lowval,':',midval,m
# print 'Hi',midval,':',maxval,h
#-------------------- WRITE THE OUTPUT ---------------------
# outfile is one the inputs
output = open(image_outfile,'w')
h1 = 'TotalPixels'
h2 = 'ZeroPixels'
h3 = 'NonZeroPixels'
h4 = 'Max' + str(maxtype)
h5 = 'MaxRange'
h6 = 'MeanIntensity'
h7 = 'MedianIntensity'
h8 = 'MidPoint'
h9 = 'Background'
h10 = 'BgPixels'
h11 = 'LowRange'
h12 = 'LowPixels'
h13 = 'MidRange'
h14 = 'MidPixels'
h15 = 'HighRange'
h16 = 'HighPixels'
output.write('%(h1)s \t%(h2)s \t%(h3)s \t%(h4)s \t%(h5)s \t%(h6)s \t%(h7)s \t%(h8)s \t%(h9)s \t%(h10)s \t%(h11)s \t%(h12)s \t%(h13)s \t%(h14)s \t%(h15)s \t%(h16)s \n' %vars())
pt1 = total
pt2 = dINT[0]
pt3 = len(totlist)
pt4 = maxmid
pt5 = maxrange1
pt6 = intmean
pt7 = intmed
pt8 = midval
pt9 = bgval
pt10 = bg
pt11 = str(int(bgval)) + '-' + str(int(lowval))
pt12 = l
pt13 = str(int(lowval+1)) + '-' + str(int(midval))
pt14 = m
pt15 = str(int(midval+1)) + '-' + str(int(maxval))
pt16 = h
output.write('%(pt1)s \t%(pt2)s \t%(pt3)s \t%(pt4)s \t%(pt5)s \t%(pt6)s \t%(pt7)s \t%(pt8)s \t%(pt9)s \t%(pt10)s \t%(pt11)s \t%(pt12)s \t%(pt13)s \t%(pt14)s \t%(pt15)s \t%(pt16)s \n' %vars())
output.close()
print str(image_outfile),'PixelRange',len(dINT.keys()),'midval',midval,'...image analysis complete!' |
import json
import random
def get_random_object_type() -> str:
with open('/app/resources/object_types.json', 'r') as f_in:
return random.choice(json.load(f_in))
|
"""
RiscEmu (c) 2021 Anton Lydike
SPDX-License-Identifier: MIT
"""
from .Config import RunConfig
from .helpers import *
from collections import defaultdict
from .Exceptions import InvalidRegisterException
class Registers:
"""
Represents a bunch of registers
"""
def __init__(self, conf: RunConfig):
"""
Initialize the register configuration, respecting the RunConfig conf
:param conf: The RunConfig
"""
self.vals = defaultdict(lambda: 0)
self.last_set = None
self.last_read = None
self.conf = conf
def dump(self, full=False):
"""
Dump all registers to stdout
:param full: If True, floating point registers are dumped too
"""
named_regs = [self._reg_repr(reg) for reg in Registers.named_registers()]
lines = [[] for i in range(12)]
if not full:
regs = [('a', 8), ('s', 12), ('t', 7)]
else:
regs = [
('a', 8),
('s', 12),
('t', 7),
('ft', 8),
('fa', 8),
('fs', 12),
]
for name, s in regs:
for i in range(12):
if i >= s:
lines[i].append(" " * 15)
else:
reg = '{}{}'.format(name, i)
lines[i].append(self._reg_repr(reg))
print("Registers[{},{}](".format(
FMT_ORANGE + FMT_UNDERLINE + 'read' + FMT_NONE,
FMT_ORANGE + FMT_BOLD + 'written' + FMT_NONE
))
if not full:
print("\t" + " ".join(named_regs[0:3]))
print("\t" + " ".join(named_regs[3:]))
print("\t" + "--------------- " * 3)
else:
print("\t" + " ".join(named_regs))
print("\t" + "--------------- " * 6)
for line in lines:
print("\t" + " ".join(line))
print(")")
def dump_reg_a(self):
"""
Dump the a registers
"""
print("Registers[a]:" + " ".join(self._reg_repr('a{}'.format(i)) for i in range(8)))
def _reg_repr(self, reg):
txt = '{:4}=0x{:08X}'.format(reg, self.get(reg, False))
if reg == 'fp':
reg = 's0'
if reg == self.last_set:
return FMT_ORANGE + FMT_BOLD + txt + FMT_NONE
if reg == self.last_read:
return FMT_ORANGE + FMT_UNDERLINE + txt + FMT_NONE
if reg == 'zero':
return txt
if self.get(reg, False) == 0 and reg not in Registers.named_registers():
return FMT_GRAY + txt + FMT_NONE
return txt
def set(self, reg, val, mark_set=True) -> bool:
"""
Set a register content to val
:param reg: The register to set
:param val: The new value
:param mark_set: If True, marks this register as "last accessed" (only used internally)
:return: If the operation was successful
"""
if reg == 'zero':
return False
#if reg not in Registers.all_registers():
# raise InvalidRegisterException(reg)
# replace fp register with s1, as these are the same register
if reg == 'fp':
reg = 's1'
if mark_set:
self.last_set = reg
# check 32 bit signed bounds
self.vals[reg] = bind_twos_complement(val)
return True
def get(self, reg, mark_read=True):
"""
Retuns the contents of register reg
:param reg: The register name
:param mark_read: If the register should be markes as "last read" (only used internally)
:return: The contents of register reg
"""
#if reg not in Registers.all_registers():
# raise InvalidRegisterException(reg)
if reg == 'fp':
reg = 's0'
if mark_read:
self.last_read = reg
return self.vals[reg]
@staticmethod
def all_registers():
"""
Return a list of all valid registers
:return: The list
"""
return ['zero', 'ra', 'sp', 'gp', 'tp', 's0', 'fp',
't0', 't1', 't2', 't3', 't4', 't5', 't6',
's1', 's2', 's3', 's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11',
'a0', 'a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7',
'ft0', 'ft1', 'ft2', 'ft3', 'ft4', 'ft5', 'ft6', 'ft7',
'fs0', 'fs1', 'fs2', 'fs3', 'fs4', 'fs5', 'fs6', 'fs7', 'fs8', 'fs9', 'fs10', 'fs11',
'fa0', 'fa1', 'fa2', 'fa3', 'fa4', 'fa5', 'fa6', 'fa7']
@staticmethod
def named_registers():
"""
Return all named registers
:return: The list
"""
return ['zero', 'ra', 'sp', 'gp', 'tp', 'fp']
|
def load(h):
return ({'abbr': 129, 'code': 129, 'title': 'Z Geopotential', 'units': 'm**2 s**-2'},
{'abbr': 130, 'code': 130, 'title': 'T Temperature', 'units': 'K'},
{'abbr': 131,
'code': 131,
'title': 'U U component of wind',
'units': 'm s**-1'},
{'abbr': 132,
'code': 132,
'title': 'V V component of wind',
'units': 'm s**-1'},
{'abbr': 133,
'code': 133,
'title': 'Q Specific humidity',
'units': 'kg kg**-1'},
{'abbr': 134, 'code': 134, 'title': 'SP Surface pressure', 'units': 'Pa'},
{'abbr': 137,
'code': 137,
'title': 'TCWV Total column water vapour',
'units': 'kg m**-2'},
{'abbr': 138,
'code': 138,
'title': 'VO Vorticity (relative)',
'units': 's**-1'},
{'abbr': 141,
'code': 141,
'title': 'SD Snow depth',
'units': 'm of water equivalent'},
{'abbr': 142,
'code': 142,
'title': 'LSP Large-scale precipitation',
'units': 'm'},
{'abbr': 143,
'code': 143,
'title': 'CP Convective precipitation',
'units': 'm'},
{'abbr': 144,
'code': 144,
'title': 'SF Snowfall',
'units': 'm of water equivalent'},
{'abbr': 146,
'code': 146,
'title': 'SSHF Surface sensible heat flux',
'units': 'J m**-2'},
{'abbr': 147,
'code': 147,
'title': 'SLHF Surface latent heat flux',
'units': 'J m**-2'},
{'abbr': 149, 'code': 149, 'title': 'TSW Total soil wetness', 'units': 'm'},
{'abbr': 151,
'code': 151,
'title': 'MSL Mean sea level pressure',
'units': 'Pa'},
{'abbr': 155, 'code': 155, 'title': 'D Divergence', 'units': 's**-1'},
{'abbr': 164, 'code': 164, 'title': 'TCC Total cloud cover', 'units': '0 - 1'},
{'abbr': 165,
'code': 165,
'title': '10U 10 metre U wind component',
'units': 'm s**-1'},
{'abbr': 166,
'code': 166,
'title': '10V 10 metre V wind component',
'units': 'm s**-1'},
{'abbr': 167, 'code': 167, 'title': '2T 2 metre temperature', 'units': 'K'},
{'abbr': 168,
'code': 168,
'title': '2D 2 metre dewpoint temperature',
'units': 'K'},
{'abbr': 172, 'code': 172, 'title': 'LSM Land-sea mask', 'units': '0 - 1'},
{'abbr': 176,
'code': 176,
'title': 'SSR Surface net solar radiation',
'units': 'J m**-2'},
{'abbr': 177,
'code': 177,
'title': 'STR Surface net thermal radiation',
'units': 'J m**-2'},
{'abbr': 178,
'code': 178,
'title': 'TSR Top net solar radiation',
'units': 'J m**-2'},
{'abbr': 179,
'code': 179,
'title': 'TTR Top net thermal radiation',
'units': 'J m**-2'},
{'abbr': 180,
'code': 180,
'title': 'EWSS Eastward turbulent surface stress',
'units': 'N m**-2 s'},
{'abbr': 181,
'code': 181,
'title': 'NSSS Northward turbulent surface stress',
'units': 'N m**-2 s'},
{'abbr': 182,
'code': 182,
'title': 'E Evaporation',
'units': 'm of water equivalent'},
{'abbr': 205, 'code': 205, 'title': 'RO Runoff', 'units': 'm'},
{'abbr': None, 'code': 255, 'title': '- Indicates a missing value'})
|
import pdb
import numpy as np
from cvxopt import spmatrix, matrix, solvers
from numpy import linalg as la
from scipy import linalg
from scipy import sparse
from cvxopt.solvers import qp
import datetime
from Utilities import Curvature
from numpy import hstack, inf, ones
from scipy.sparse import vstack
# from osqp import OSQP
solvers.options['show_progress'] = False
class ControllerLMPC():
"""Create the controllerLMPC
Attributes:
solve: given x0 computes the control action
addTrajectory: given a ClosedLoopData object adds the trajectory to SS, Qfun, uSS and updates the iteration index
addPoint: this function allows to add the closed loop data at iteration j to the SS of iteration (j-1)
update: this function can be used to set SS, Qfun, uSS and the iteration index.
"""
def __init__(self, numSS_Points, numSS_it, N, Qslack, Qlane, Q, R, dR, dt, map, Laps, TimeLMPC, Solver, inputConstraints):
"""Initialization
Arguments:
numSS_Points: number of points selected from the previous trajectories to build SS
numSS_it: number of previois trajectories selected to build SS
N: horizon length
Q,R: weight to define cost function h(x,u) = ||x||_Q + ||u||_R
dR: weight to define the input rate cost h(x,u) = ||x_{k+1}-x_k||_dR
n,d: state and input dimensiton
shift: given the closest point x_t^j to x(t) the controller start selecting the point for SS from x_{t+shift}^j
map: map
Laps: maximum number of laps the controller can run (used to avoid dynamic allocation)
TimeLMPC: maximum time [s] that an lap can last (used to avoid dynamic allocation)
Solver: solver used in the reformulation of the controllerLMPC as QP
"""
self.numSS_Points = numSS_Points
self.numSS_it = numSS_it
self.N = N
self.Qslack = Qslack
self.Qlane = Qlane
self.Q = Q
self.R = R
self.dR = dR
self.n = Q.shape[1]
self.d = R.shape[1]
self.dt = dt
self.map = map
self.Solver = Solver
self.LapTime = 0
self.itUsedSysID = 1
self.inputConstraints = inputConstraints
self.OldInput = np.zeros((1,2))
# Initialize the following quantities to avoid dynamic allocation
NumPoints = int(TimeLMPC / dt) + 1
self.LapCounter = 10000 * np.ones(Laps).astype(int) # Time at which each j-th iteration is completed
self.TimeSS = 10000 * np.ones(Laps).astype(int) # Time at which each j-th iteration is completed
self.SS = 10000 * np.ones((NumPoints, 6, Laps)) # Sampled Safe SS
self.uSS = 10000 * np.ones((NumPoints, 2, Laps)) # Input associated with the points in SS
self.Qfun = 0 * np.ones((NumPoints, Laps)) # Qfun: cost-to-go from each point in SS
self.SS_glob = 10000 * np.ones((NumPoints, 6, Laps)) # SS in global (X-Y) used for plotting
self.zVector = np.array([0.0, 0.0, 0.0, 0.0, 10.0, 0.0])
# Initialize the controller iteration
self.it = 0
# Build matrices for inequality constraints
self.F, self.b = _LMPC_BuildMatIneqConst(self)
self.xPred = []
def solve(self, x0, uOld = np.zeros([0, 0])):
"""Computes control action
Arguments:
x0: current state position
"""
n = self.n; d = self.d
F = self.F; b = self.b
SS = self.SS; Qfun = self.Qfun
uSS = self.uSS; TimeSS = self.TimeSS
Q = self.Q; R = self.R
dR =self.dR; OldInput = self.OldInput
N = self.N; dt = self.dt
it = self.it
numSS_Points = self.numSS_Points
Qslack = self.Qslack
LinPoints = self.LinPoints
LinInput = self.LinInput
map = self.map
# Select Points from SS
if (self.zVector[4]-x0[4] > map.TrackLength/2):
self.zVector[4] = np.max([self.zVector[4] - map.TrackLength,0])
self.LinPoints[4,-1] = self.LinPoints[4,-1]- map.TrackLength
sortedLapTime = np.argsort(self.Qfun[0, 0:it])
SS_PointSelectedTot = np.empty((n, 0))
Succ_SS_PointSelectedTot = np.empty((n, 0))
Succ_uSS_PointSelectedTot = np.empty((d, 0))
Qfun_SelectedTot = np.empty((0))
for jj in sortedLapTime[0:self.numSS_it]:
SS_PointSelected, uSS_PointSelected, Qfun_Selected = _SelectPoints(self, jj, self.zVector, numSS_Points / self.numSS_it + 1)
Succ_SS_PointSelectedTot = np.append(Succ_SS_PointSelectedTot, SS_PointSelected[:,1:], axis=1)
Succ_uSS_PointSelectedTot = np.append(Succ_uSS_PointSelectedTot, uSS_PointSelected[:,1:], axis=1)
SS_PointSelectedTot = np.append(SS_PointSelectedTot, SS_PointSelected[:,0:-1], axis=1)
Qfun_SelectedTot = np.append(Qfun_SelectedTot, Qfun_Selected[0:-1], axis=0)
self.SS_PointSelectedTot = SS_PointSelectedTot
self.Qfun_SelectedTot = Qfun_SelectedTot
startTimer = datetime.datetime.now()
Atv, Btv, Ctv, indexUsed_list = _LMPC_EstimateABC(self, sortedLapTime)
endTimer = datetime.datetime.now(); deltaTimer = endTimer - startTimer
L, npG, npE = _LMPC_BuildMatEqConst(self, Atv, Btv, Ctv, N, n, d)
self.linearizationTime = deltaTimer
# Build Terminal cost and Constraint
G, E = _LMPC_TermConstr(self, npG, npE, N, n, d, SS_PointSelectedTot)
M, q = _LMPC_BuildMatCost(self, Qfun_SelectedTot, numSS_Points, N, Qslack, Q, R, dR, OldInput)
# Solve QP
startTimer = datetime.datetime.now()
if self.Solver == "CVX":
res_cons = qp(M, matrix(q), F, matrix(b), G, E * matrix(x0) + L)
if res_cons['status'] == 'optimal':
feasible = 1
else:
feasible = 0
print(res_cons['status'])
Solution = np.squeeze(res_cons['x'])
# elif self.Solver == "OSQP":
# # Adaptation for OSQP from https://github.com/alexbuyval/RacingLMPC/
# res_cons, feasible = osqp_solve_qp(sparse.csr_matrix(M), q, sparse.csr_matrix(F), b, sparse.csr_matrix(G), np.add(np.dot(E,x0),L[:,0]))
# Solution = res_cons.x
self.feasible = feasible
endTimer = datetime.datetime.now(); deltaTimer = endTimer - startTimer
self.solverTime = deltaTimer
# Extract solution and set linerizations points
xPred, uPred, lambd, slack = _LMPC_GetPred(Solution, n, d, N, np)
self.zVector = np.dot(Succ_SS_PointSelectedTot, lambd)
self.uVector = np.dot(Succ_uSS_PointSelectedTot, lambd)
self.xPred = xPred.T
if self.N == 1:
self.uPred = np.array([[uPred[0], uPred[1]]])
self.LinInput = np.array([[uPred[0], uPred[1]]])
else:
self.uPred = uPred.T
self.LinInput = np.vstack((uPred.T[1:, :], self.uVector))
self.LinPoints = np.vstack((xPred.T[1:,:], self.zVector))
self.OldInput = uPred.T[0,:]
def addTrajectory(self, ClosedLoopData):
"""update iteration index and construct SS, uSS and Qfun
Arguments:
ClosedLoopData: ClosedLoopData object
"""
it = self.it
self.TimeSS[it] = ClosedLoopData.SimTime
self.LapCounter[it] = ClosedLoopData.SimTime
self.SS[0:(self.TimeSS[it] + 1), :, it] = ClosedLoopData.x[0:(self.TimeSS[it] + 1), :]
self.SS_glob[0:(self.TimeSS[it] + 1), :, it] = ClosedLoopData.x_glob[0:(self.TimeSS[it] + 1), :]
self.uSS[0:self.TimeSS[it], :, it] = ClosedLoopData.u[0:(self.TimeSS[it]), :]
self.Qfun[0:(self.TimeSS[it] + 1), it] = _ComputeCost(ClosedLoopData.x[0:(self.TimeSS[it] + 1), :],
ClosedLoopData.u[0:(self.TimeSS[it]), :], self.map.TrackLength)
for i in np.arange(0, self.Qfun.shape[0]):
if self.Qfun[i, it] == 0:
self.Qfun[i, it] = self.Qfun[i - 1, it] - 1
if self.it == 0:
self.LinPoints = self.SS[1:self.N + 2, :, it]
self.LinInput = self.uSS[1:self.N + 1, :, it]
self.it = self.it + 1
def addPoint(self, x, u):
"""at iteration j add the current point to SS, uSS and Qfun of the previous iteration
Arguments:
x: current state
u: current input
i: at the j-th iteration i is the time at which (x,u) are recorded
"""
Counter = self.TimeSS[self.it - 1]
self.SS[Counter, :, self.it - 1] = x + np.array([0, 0, 0, 0, self.map.TrackLength, 0])
self.uSS[Counter, :, self.it - 1] = u
# The above two lines are needed as the once the predicted trajectory has crossed the finish line the goal is
# to reach the end of the lap which is about to start
if self.Qfun[Counter, self.it - 1] == 0:
self.Qfun[Counter, self.it - 1] = self.Qfun[Counter, self.it - 1] - 1
self.TimeSS[self.it - 1] = self.TimeSS[self.it - 1] + 1
def update(self, SS, uSS, Qfun, TimeSS, it, LinPoints, LinInput):
"""update controller parameters. This function is useful to transfer information among controllerLMPC controller
with different tuning
Arguments:
SS: sampled safe set
uSS: input associated with the points in SS
Qfun: Qfun: cost-to-go from each point in SS
TimeSS: time at which each j-th iteration is completed
it: current iteration
LinPoints: points used in the linearization and system identification procedure
LinInput: inputs associated with the points used in the linearization and system identification procedure
"""
self.SS = SS
self.uSS = uSS
self.Qfun = Qfun
self.TimeSS = TimeSS
self.it = it
self.LinPoints = LinPoints
self.LinInput = LinInput
# ======================================================================================================================
# ======================================================================================================================
# =============================== Internal functions for controllerLMPC reformulation to QP ======================================
# ======================================================================================================================
# ======================================================================================================================
def osqp_solve_qp(P, q, G=None, h=None, A=None, b=None, initvals=None):
"""
Solve a Quadratic Program defined as:
minimize
(1/2) * x.T * P * x + q.T * x
subject to
G * x <= h
A * x == b
using OSQP <https://github.com/oxfordcontrol/osqp>.
Parameters
----------
P : scipy.sparse.csc_matrix Symmetric quadratic-cost matrix.
q : numpy.array Quadratic cost vector.
G : scipy.sparse.csc_matrix Linear inequality constraint matrix.
h : numpy.array Linear inequality constraint vector.
A : scipy.sparse.csc_matrix, optional Linear equality constraint matrix.
b : numpy.array, optional Linear equality constraint vector.
initvals : numpy.array, optional Warm-start guess vector.
Returns
-------
x : array, shape=(n,)
Solution to the QP, if found, otherwise ``None``.
Note
----
OSQP requires `P` to be symmetric, and won't check for errors otherwise.
Check out for this point if you e.g. `get nan values
<https://github.com/oxfordcontrol/osqp/issues/10>`_ in your solutions.
"""
osqp = OSQP()
if G is not None:
l = -inf * ones(len(h))
if A is not None:
qp_A = vstack([G, A]).tocsc()
qp_l = hstack([l, b])
qp_u = hstack([h, b])
else: # no equality constraint
qp_A = G
qp_l = l
qp_u = h
osqp.setup(P=P, q=q, A=qp_A, l=qp_l, u=qp_u, verbose=False, polish=True)
else:
osqp.setup(P=P, q=q, A=None, l=None, u=None, verbose=False)
if initvals is not None:
osqp.warm_start(x=initvals)
res = osqp.solve()
if res.info.status_val != osqp.constant('OSQP_SOLVED'):
print("OSQP exited with status '%s'" % res.info.status)
feasible = 0
if res.info.status_val == osqp.constant('OSQP_SOLVED') or res.info.status_val == osqp.constant('OSQP_SOLVED_INACCURATE') or res.info.status_val == osqp.constant('OSQP_MAX_ITER_REACHED'):
feasible = 1
return res, feasible
def _LMPC_BuildMatCost(controllerLMPC, Sel_Qfun, numSS_Points, N, Qslack, Q, R, dR, uOld):
n = Q.shape[0]
P = Q
vt = 2
Qlane = controllerLMPC.Qlane
b = [Q] * (N)
Mx = linalg.block_diag(*b)
c = [R + 2 * np.diag(dR)] * (N) # Need to add dR for the derivative input cost
Mu = linalg.block_diag(*c)
# Need to condider that the last input appears just once in the difference
Mu[Mu.shape[0] - 1, Mu.shape[1] - 1] = Mu[Mu.shape[0] - 1, Mu.shape[1] - 1] - dR[1]
Mu[Mu.shape[0] - 2, Mu.shape[1] - 2] = Mu[Mu.shape[0] - 2, Mu.shape[1] - 2] - dR[0]
# Derivative Input Cost
OffDiaf = -np.tile(dR, N-1)
np.fill_diagonal(Mu[2:], OffDiaf)
np.fill_diagonal(Mu[:, 2:], OffDiaf)
# np.savetxt('Mu.csv', Mu, delimiter=',', fmt='%f')
M00 = linalg.block_diag(Mx, P, Mu)
quadLaneSlack = Qlane[0] * np.eye(2*controllerLMPC.N)
M0 = linalg.block_diag(M00, np.zeros((numSS_Points, numSS_Points)), Qslack, quadLaneSlack)
# np.savetxt('M0.csv', M0, delimiter=',', fmt='%f')
xtrack = np.array([vt, 0, 0, 0, 0, 0])
q0 = - 2 * np.dot(np.append(np.tile(xtrack, N + 1), np.zeros(R.shape[0] * N)), M00)
# Derivative Input
q0[n*(N+1):n*(N+1)+2] = -2 * np.dot( uOld, np.diag(dR) )
# np.savetxt('q0.csv', q0, delimiter=',', fmt='%f')
linLaneSlack = Qlane[1] * np.ones(2*controllerLMPC.N)
q = np.append(np.append(np.append(q0, Sel_Qfun), np.zeros(Q.shape[0])), linLaneSlack)
# np.savetxt('q.csv', q, delimiter=',', fmt='%f')
M = 2 * M0 # Need to multiply by two because CVX considers 1/2 in front of quadratic cost
if controllerLMPC.Solver == "CVX":
M_sparse = spmatrix(M[np.nonzero(M)], np.nonzero(M)[0].astype(int), np.nonzero(M)[1].astype(int), M.shape)
M_return = M_sparse
else:
M_return = M
return M_return, q
def _LMPC_BuildMatIneqConst(controllerLMPC):
N = controllerLMPC.N
n = controllerLMPC.n
numSS_Points = controllerLMPC.numSS_Points
# Buil the matrices for the state constraint in each region. In the region i we want Fx[i]x <= bx[b]
Fx = np.array([[0., 0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0., -1.]])
bx = np.array([[controllerLMPC.map.halfWidth], # max ey
[controllerLMPC.map.halfWidth]]) # max ey
# Buil the matrices for the input constraint in each region. In the region i we want Fx[i]x <= bx[b]
Fu = np.array([[1., 0.],
[-1., 0.],
[0., 1.],
[0., -1.]])
bu = np.array([[controllerLMPC.inputConstraints[0,0]], # Max Steering
[controllerLMPC.inputConstraints[0,1]], # Min Steering
[controllerLMPC.inputConstraints[1,0]], # Max Acceleration
[controllerLMPC.inputConstraints[1,1]]]) # Min Acceleration
# Now stuck the constraint matrices to express them in the form Fz<=b. Note that z collects states and inputs
# Let's start by computing the submatrix of F relates with the state
rep_a = [Fx] * (N)
Mat = linalg.block_diag(*rep_a)
NoTerminalConstr = np.zeros((np.shape(Mat)[0], n)) # No need to constraint also the terminal point
Fxtot = np.hstack((Mat, NoTerminalConstr))
bxtot = np.tile(np.squeeze(bx), N)
# Let's start by computing the submatrix of F relates with the input
rep_b = [Fu] * (N)
Futot = linalg.block_diag(*rep_b)
butot = np.tile(np.squeeze(bu), N)
# Let's stack all together
rFxtot, cFxtot = np.shape(Fxtot)
rFutot, cFutot = np.shape(Futot)
Dummy1 = np.hstack((Fxtot, np.zeros((rFxtot, cFutot))))
Dummy2 = np.hstack((np.zeros((rFutot, cFxtot)), Futot))
FDummy = np.vstack((Dummy1, Dummy2))
I = -np.eye(numSS_Points)
FDummy2 = linalg.block_diag(FDummy, I)
Fslack = np.zeros((FDummy2.shape[0], n))
F_hard = np.hstack((FDummy2, Fslack))
LaneSlack = np.zeros((F_hard.shape[0], 2 * N))
colIndexPositive = []
rowIndexPositive = []
colIndexNegative = []
rowIndexNegative = []
for i in range(0, N):
colIndexPositive.append(i * 2 + 0)
colIndexNegative.append(i * 2 + 1)
rowIndexPositive.append(i * Fx.shape[0] + 0) # Slack on second element of Fx
rowIndexNegative.append(i * Fx.shape[0] + 1) # Slack on third element of Fx
LaneSlack[rowIndexPositive, colIndexPositive] = -1.0
LaneSlack[rowIndexNegative, rowIndexNegative] = -1.0
F_1 = np.hstack((F_hard, LaneSlack))
I = - np.eye(2*N)
Zeros = np.zeros((2*N, F_hard.shape[1]))
Positivity = np.hstack((Zeros, I))
F = np.vstack((F_1, Positivity))
# np.savetxt('F.csv', F, delimiter=',', fmt='%f')
# pdb.set_trace()
b_1 = np.hstack((bxtot, butot, np.zeros(numSS_Points)))
b = np.hstack((b_1, np.zeros(2*N)))
# np.savetxt('b.csv', b, delimiter=',', fmt='%f')
if controllerLMPC.Solver == "CVX":
F_sparse = spmatrix(F[np.nonzero(F)], np.nonzero(F)[0].astype(int), np.nonzero(F)[1].astype(int), F.shape)
F_return = F_sparse
else:
F_return = F
return F_return, b
def _SelectPoints(controllerLMPC, it, x0, numSS_Points):
SS = controllerLMPC.SS
uSS = controllerLMPC.uSS
TimeSS = controllerLMPC.TimeSS
SS_glob = controllerLMPC.SS_glob
Qfun = controllerLMPC.Qfun
xPred = controllerLMPC.xPred
map = controllerLMPC.map
TrackLength = map.TrackLength
currIt = controllerLMPC.it
x = SS[:, 0:(TimeSS[it]-1), it]
u = uSS[:, 0:(TimeSS[it]-1), it]
x_glob = SS_glob[:, :, it]
oneVec = np.ones((x.shape[0], 1))
x0Vec = (np.dot(np.array([x0]).T, oneVec.T)).T
diff = x - x0Vec
norm = la.norm(diff, 1, axis=1)
MinNorm = np.argmin(norm)
if (MinNorm - numSS_Points/2 >= 0):
# print("one")
# print(numSS_Points, MinNorm)
indexSSandQfun = range(-int(numSS_Points/2) + MinNorm, int(numSS_Points/2) + MinNorm + 1)
# print(indexSSandQfun)
# pdb.set_trace()
else:
# print("two")
# print(numSS_Points)
# pdb.set_trace()
indexSSandQfun = range(MinNorm, MinNorm + int(numSS_Points))
SS_Points = x[indexSSandQfun, :].T
SSu_Points = u[indexSSandQfun, :].T
SS_glob_Points = x_glob[indexSSandQfun, :].T
Sel_Qfun = Qfun[indexSSandQfun, it]
# Modify the cost if the predicion has crossed the finisch line
if xPred == []:
Sel_Qfun = Qfun[indexSSandQfun, it]
elif (np.all((xPred[:, 4] > TrackLength) == False)):
Sel_Qfun = Qfun[indexSSandQfun, it]
elif it < currIt - 1:
Sel_Qfun = Qfun[indexSSandQfun, it] + Qfun[0, it + 1]
else:
sPred = xPred[:, 4]
predCurrLap = controllerLMPC.N - sum(sPred > TrackLength)
currLapTime = controllerLMPC.LapTime
Sel_Qfun = Qfun[indexSSandQfun, it] + currLapTime + predCurrLap
return SS_Points, SSu_Points, Sel_Qfun
def _ComputeCost(x, u, TrackLength):
Cost = 10000 * np.ones((x.shape[0])) # The cost has the same elements of the vector x --> time +1
# Now compute the cost moving backwards in a Dynamic Programming (DP) fashion.
# We start from the last element of the vector x and we sum the running cost
for i in range(0, x.shape[0]):
if (i == 0): # Note that for i = 0 --> pick the latest element of the vector x
Cost[x.shape[0] - 1 - i] = 0
elif x[x.shape[0] - 1 - i, 4]< TrackLength:
Cost[x.shape[0] - 1 - i] = Cost[x.shape[0] - 1 - i + 1] + 1
else:
Cost[x.shape[0] - 1 - i] = 0
return Cost
def _LMPC_TermConstr(controllerLMPC, G, E, N ,n ,d , SS_Points):
# Update the matrices for the Equality constraint in the controllerLMPC. Now we need an extra row to constraint the terminal point to be equal to a point in SS
# The equality constraint has now the form: G_LMPC*z = E_LMPC*x0 + TermPoint.
# Note that the vector TermPoint is updated to constraint the predicted trajectory into a point in SS. This is done in the FTOCP_LMPC function
TermCons = np.zeros((n, (N + 1) * n + N * d))
TermCons[:, N * n:(N + 1) * n] = np.eye(n)
G_enlarged = np.vstack((G, TermCons))
G_lambda = np.zeros(( G_enlarged.shape[0], SS_Points.shape[1] + n))
G_lambda[G_enlarged.shape[0] - n:G_enlarged.shape[0], :] = np.hstack((-SS_Points, np.eye(n)))
G_LMPC0 = np.hstack((G_enlarged, G_lambda))
G_ConHull = np.zeros((1, G_LMPC0.shape[1]))
G_ConHull[-1, G_ConHull.shape[1]-SS_Points.shape[1]-n:G_ConHull.shape[1]-n] = np.ones((1,SS_Points.shape[1]))
G_LMPC_hard = np.vstack((G_LMPC0, G_ConHull))
SlackLane = np.zeros((G_LMPC_hard.shape[0], 2*N))
G_LMPC = np.hstack((G_LMPC_hard, SlackLane))
E_LMPC = np.vstack((E, np.zeros((n + 1, n))))
# np.savetxt('G.csv', G_LMPC, delimiter=',', fmt='%f')
# np.savetxt('E.csv', E_LMPC, delimiter=',', fmt='%f')
if controllerLMPC.Solver == "CVX":
G_LMPC_sparse = spmatrix(G_LMPC[np.nonzero(G_LMPC)], np.nonzero(G_LMPC)[0].astype(int), np.nonzero(G_LMPC)[1].astype(int), G_LMPC.shape)
E_LMPC_sparse = spmatrix(E_LMPC[np.nonzero(E_LMPC)], np.nonzero(E_LMPC)[0].astype(int), np.nonzero(E_LMPC)[1].astype(int), E_LMPC.shape)
G_LMPC_return = G_LMPC_sparse
E_LMPC_return = E_LMPC_sparse
else:
G_LMPC_return = G_LMPC
E_LMPC_return = E_LMPC
return G_LMPC_return, E_LMPC_return
def _LMPC_BuildMatEqConst(controllerLMPC, A, B, C, N, n, d):
# Buil matrices for optimization (Convention from Chapter 15.2 Borrelli, Bemporad and Morari MPC book)
# We are going to build our optimization vector z \in \mathbb{R}^((N+1) \dot n \dot N \dot d), note that this vector
# stucks the predicted trajectory x_{k|t} \forall k = t, \ldots, t+N+1 over the horizon and
# the predicte input u_{k|t} \forall k = t, \ldots, t+N over the horizon
Gx = np.eye(n * (N + 1))
Gu = np.zeros((n * (N + 1), d * (N)))
E = np.zeros((n * (N + 1), n))
E[np.arange(n)] = np.eye(n)
L = np.zeros((n * (N + 1) + n + 1, 1)) # n+1 for the terminal constraint
L[-1] = 1 # Summmation of lamba must add up to 1
for i in range(0, N):
ind1 = n + i * n + np.arange(n)
ind2x = i * n + np.arange(n)
ind2u = i * d + np.arange(d)
Gx[np.ix_(ind1, ind2x)] = -A[i]
Gu[np.ix_(ind1, ind2u)] = -B[i]
L[ind1, :] = C[i]
G = np.hstack((Gx, Gu))
if controllerLMPC.Solver == "CVX":
L_sparse = spmatrix(L[np.nonzero(L)], np.nonzero(L)[0].astype(int), np.nonzero(L)[1].astype(int), L.shape)
L_return = L_sparse
else:
L_return = L
return L_return, G, E
def _LMPC_GetPred(Solution,n,d,N, np):
xPred = np.squeeze(np.transpose(np.reshape((Solution[np.arange(n*(N+1))]),(N+1,n))))
uPred = np.squeeze(np.transpose(np.reshape((Solution[n*(N+1)+np.arange(d*N)]),(N, d))))
lambd = Solution[(n*(N+1)+d*N):(Solution.shape[0]-n-2*N)]
slack = Solution[Solution.shape[0]-n-2*N:Solution.shape[0]-2*N]
laneSlack = Solution[Solution.shape[0]-2*N:]
# print np.sum(np.abs(laneSlack))
# if np.sum(np.abs(laneSlack)) > 0.5:
# pdb.set_trace()
return xPred, uPred, lambd, slack
# ======================================================================================================================
# ======================================================================================================================
# ========================= Internal functions for Local Regression and Linearization ==================================
# ======================================================================================================================
# ======================================================================================================================
def _LMPC_EstimateABC(controllerLMPC, sortedLapTime):
LinPoints = controllerLMPC.LinPoints
LinInput = controllerLMPC.LinInput
N = controllerLMPC.N
n = controllerLMPC.n
d = controllerLMPC.d
TimeSS = controllerLMPC.TimeSS
LapCounter = controllerLMPC.LapCounter
PointAndTangent = controllerLMPC.map.PointAndTangent
dt = controllerLMPC.dt
it = controllerLMPC.it
SS = controllerLMPC.SS
uSS = controllerLMPC.uSS
ParallelComputation = 0
Atv = []; Btv = []; Ctv = []; indexUsed_list = []
usedIt = sortedLapTime[0:controllerLMPC.itUsedSysID] # range(controllerLMPC.it-controllerLMPC.itUsedSysID, controllerLMPC.it)
MaxNumPoint = 40 # Need to reason on how these points are selected
for i in range(0, N):
Ai, Bi, Ci, indexSelected = RegressionAndLinearization(LinPoints, LinInput, usedIt, SS, uSS, TimeSS,
MaxNumPoint, qp, n, d, matrix, PointAndTangent, dt, i)
Atv.append(Ai)
Btv.append(Bi)
Ctv.append(Ci)
indexUsed_list.append(indexSelected)
return Atv, Btv, Ctv, indexUsed_list
def RegressionAndLinearization(LinPoints, LinInput, usedIt, SS, uSS, LapCounter, MaxNumPoint, qp, n, d, matrix, PointAndTangent, dt, i):
x0 = LinPoints[i, :]
Ai = np.zeros((n, n))
Bi = np.zeros((n, d))
Ci = np.zeros((n, 1))
# Compute Index to use
h = 5
lamb = 0.0
stateFeatures = [0, 1, 2]
ConsiderInput = 1
if ConsiderInput == 1:
scaling = np.array([[0.1, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0]])
xLin = np.hstack((LinPoints[i, stateFeatures], LinInput[i, :]))
else:
scaling = np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]])
xLin = LinPoints[i, stateFeatures]
indexSelected = []
K = []
for ii in usedIt:
indexSelected_i, K_i = ComputeIndex(h, SS, uSS, LapCounter, ii, xLin, stateFeatures, scaling, MaxNumPoint,
ConsiderInput)
indexSelected.append(indexSelected_i)
K.append(K_i)
# =========================
# ====== Identify vx ======
inputFeatures = [1]
Q_vx, M_vx = Compute_Q_M(SS, uSS, indexSelected, stateFeatures, inputFeatures, usedIt, np, matrix, lamb, K)
yIndex = 0
b = Compute_b(SS, yIndex, usedIt, matrix, M_vx, indexSelected, K, np)
Ai[yIndex, stateFeatures], Bi[yIndex, inputFeatures], Ci[yIndex] = LMPC_LocLinReg(Q_vx, b, stateFeatures,
inputFeatures, qp)
# =======================================
# ====== Identify Lateral Dynamics ======
inputFeatures = [0]
Q_lat, M_lat = Compute_Q_M(SS, uSS, indexSelected, stateFeatures, inputFeatures, usedIt, np, matrix, lamb, K)
yIndex = 1 # vy
b = Compute_b(SS, yIndex, usedIt, matrix, M_lat, indexSelected, K, np)
Ai[yIndex, stateFeatures], Bi[yIndex, inputFeatures], Ci[yIndex] = LMPC_LocLinReg(Q_lat, b, stateFeatures,
inputFeatures, qp)
yIndex = 2 # wz
b = Compute_b(SS, yIndex, usedIt, matrix, M_lat, indexSelected, K, np)
Ai[yIndex, stateFeatures], Bi[yIndex, inputFeatures], Ci[yIndex] = LMPC_LocLinReg(Q_lat, b, stateFeatures,
inputFeatures, qp)
# ===========================
# ===== Linearization =======
vx = x0[0]; vy = x0[1]
wz = x0[2]; epsi = x0[3]
s = x0[4]; ey = x0[5]
if s < 0:
print("s is negative, here the state: \n", LinPoints)
startTimer = datetime.datetime.now() # Start timer for controllerLMPC iteration
cur = Curvature(s, PointAndTangent)
den = 1 - cur * ey
# ===========================
# ===== Linearize epsi ======
# epsi_{k+1} = epsi + dt * ( wz - (vx * np.cos(epsi) - vy * np.sin(epsi)) / (1 - cur * ey) * cur )
depsi_vx = -dt * np.cos(epsi) / den * cur
depsi_vy = dt * np.sin(epsi) / den * cur
depsi_wz = dt
depsi_epsi = 1 - dt * (-vx * np.sin(epsi) - vy * np.cos(epsi)) / den * cur
depsi_s = 0 # Because cur = constant
depsi_ey = dt * (vx * np.cos(epsi) - vy * np.sin(epsi)) / (den ** 2) * cur * (-cur)
Ai[3, :] = [depsi_vx, depsi_vy, depsi_wz, depsi_epsi, depsi_s, depsi_ey]
Ci[3] = epsi + dt * (wz - (vx * np.cos(epsi) - vy * np.sin(epsi)) / (1 - cur * ey) * cur) - np.dot(Ai[3, :], x0)
# ===========================
# ===== Linearize s =========
# s_{k+1} = s + dt * ( (vx * np.cos(epsi) - vy * np.sin(epsi)) / (1 - cur * ey) )
ds_vx = dt * (np.cos(epsi) / den)
ds_vy = -dt * (np.sin(epsi) / den)
ds_wz = 0
ds_epsi = dt * (-vx * np.sin(epsi) - vy * np.cos(epsi)) / den
ds_s = 1 # + Ts * (Vx * cos(epsi) - Vy * sin(epsi)) / (1 - ey * rho) ^ 2 * (-ey * drho);
ds_ey = -dt * (vx * np.cos(epsi) - vy * np.sin(epsi)) / (den * 2) * (-cur)
Ai[4, :] = [ds_vx, ds_vy, ds_wz, ds_epsi, ds_s, ds_ey]
Ci[4] = s + dt * ((vx * np.cos(epsi) - vy * np.sin(epsi)) / (1 - cur * ey)) - np.dot(Ai[4, :], x0)
# ===========================
# ===== Linearize ey ========
# ey_{k+1} = ey + dt * (vx * np.sin(epsi) + vy * np.cos(epsi))
dey_vx = dt * np.sin(epsi)
dey_vy = dt * np.cos(epsi)
dey_wz = 0
dey_epsi = dt * (vx * np.cos(epsi) - vy * np.sin(epsi))
dey_s = 0
dey_ey = 1
Ai[5, :] = [dey_vx, dey_vy, dey_wz, dey_epsi, dey_s, dey_ey]
Ci[5] = ey + dt * (vx * np.sin(epsi) + vy * np.cos(epsi)) - np.dot(Ai[5, :], x0)
endTimer = datetime.datetime.now(); deltaTimer_tv = endTimer - startTimer
return Ai, Bi, Ci, indexSelected
def Compute_Q_M(SS, uSS, indexSelected, stateFeatures, inputFeatures, usedIt, np, matrix, lamb, K):
Counter = 0
it = 1
X0 = np.empty((0,len(stateFeatures)+len(inputFeatures)))
Ktot = np.empty((0))
for it in usedIt:
X0 = np.append( X0, np.hstack((np.squeeze(SS[np.ix_(indexSelected[Counter], stateFeatures, [it])]),
np.squeeze(uSS[np.ix_(indexSelected[Counter], inputFeatures, [it])], axis=2))), axis=0)
Ktot = np.append(Ktot, K[Counter])
Counter = Counter + 1
M = np.hstack((X0, np.ones((X0.shape[0], 1))))
Q0 = np.dot(np.dot(M.T, np.diag(Ktot)), M)
Q = matrix(Q0 + lamb * np.eye(Q0.shape[0]))
return Q, M
def Compute_b(SS, yIndex, usedIt, matrix, M, indexSelected, K, np):
Counter = 0
y = np.empty((0))
Ktot = np.empty((0))
for it in usedIt:
y = np.append(y, np.squeeze(SS[np.ix_(indexSelected[Counter] + 1, [yIndex], [it])]))
Ktot = np.append(Ktot, K[Counter])
Counter = Counter + 1
b = matrix(-np.dot(np.dot(M.T, np.diag(Ktot)), y))
return b
def LMPC_LocLinReg(Q, b, stateFeatures, inputFeatures, qp):
import numpy as np
from numpy import linalg as la
import datetime
# K = np.ones(len(index))
startTimer = datetime.datetime.now() # Start timer for controllerLMPC iteration
res_cons = qp(Q, b) # This is ordered as [A B C]
endTimer = datetime.datetime.now(); deltaTimer_tv = endTimer - startTimer
# print "Non removable time: ", deltaTimer_tv.total_seconds()
Result = np.squeeze(np.array(res_cons['x']))
A = Result[0:len(stateFeatures)]
B = Result[len(stateFeatures):(len(stateFeatures)+len(inputFeatures))]
C = Result[-1]
return A, B, C
def ComputeIndex(h, SS, uSS, LapCounter, it, x0, stateFeatures, scaling, MaxNumPoint, ConsiderInput):
import numpy as np
from numpy import linalg as la
import datetime
startTimer = datetime.datetime.now() # Start timer for controllerLMPC iteration
# What to learn a model such that: x_{k+1} = A x_k + B u_k + C
oneVec = np.ones( (SS[0:LapCounter[it], :, it].shape[0]-1, 1) )
x0Vec = (np.dot( np.array([x0]).T, oneVec.T )).T
if ConsiderInput == 1:
DataMatrix = np.hstack((SS[0:LapCounter[it]-1, stateFeatures, it], uSS[0:LapCounter[it]-1, :, it]))
else:
DataMatrix = SS[0:LapCounter[it]-1, stateFeatures, it]
# np.savetxt('A.csv', SS[0:TimeSS[it]-1, stateFeatures, it], delimiter=',', fmt='%f')
# np.savetxt('B.csv', SS[0:TimeSS[it], :, it][0:-1, stateFeatures], delimiter=',', fmt='%f')
# np.savetxt('SS.csv', SS[0:TimeSS[it], :, it], delimiter=',', fmt='%f')
diff = np.dot(( DataMatrix - x0Vec ), scaling)
# print 'x0Vec \n',x0Vec
norm = la.norm(diff, 1, axis=1)
indexTot = np.squeeze(np.where(norm < h))
# print indexTot.shape, np.argmin(norm), norm, x0
if (indexTot.shape[0] >= MaxNumPoint):
index = np.argsort(norm)[0:MaxNumPoint]
# MinNorm = np.argmin(norm)
# if MinNorm+MaxNumPoint >= indexTot.shape[0]:
# index = indexTot[indexTot.shape[0]-MaxNumPoint:indexTot.shape[0]]
# else:
# index = indexTot[MinNorm:MinNorm+MaxNumPoint]
else:
index = indexTot
K = ( 1 - ( norm[index] / h )**2 ) * 3/4
# K = np.ones(len(index))
return index, K |
# -*- coding: utf-8 -*-
from functools import total_ordering
@total_ordering
class Metric:
"""A Metric object to represent evaluation metrics.
Arguments:
name(str): A name for the metric that will be kept internally
after upper-casing
score(float): A floating point score
detailed_score(str, optional): A custom, more detailed string
representing the score given above (Default: "")
higher_better(bool, optional): If ``False``, the smaller the better
(Default: ``True``)
"""
def __init__(self, name, score, detailed_score="", higher_better=True):
self.name = name.upper()
self.score = score
self.detailed_score = detailed_score
self.higher_better = higher_better
def __eq__(self, other):
return self.score == other.score
def __lt__(self, other):
return self.score < other.score
def __repr__(self):
rhs = (self.detailed_score if self.detailed_score
else "%.2f" % self.score)
return self.name + ' = ' + rhs
|
def validate_npi(npi):
valid = False # assume in the beginning that number hasn't been validated
data_type = type(npi)
# check what datatype npi is
if data_type is str:
# parameter is a string
# check if it has 10 characters
if len(npi) == 10:
# number has the valid number of characters
try:
npi = int(npi)
data_type = type(npi) # update the datatype after successful conversion
except ValueError:
print('[Wrong input]: Could not convert to integer')
else:
print('[Wrong input]: Number should have 10 digits')
if data_type is int:
# parameter is an integer
# check if number has 10 digits
if count_digits(npi) == 10:
last_digit = npi % 10 # store the check digit (last digit from ltr)
npi = npi // 10 # discard the last digit
sum_digits = calculate_sum(npi)
check_digit = checksum(sum_digits)
valid = check_digit == last_digit
else:
print('[Wrong input]: Number should have 10 digits')
if not (data_type is int or data_type is str):
print('[Wrong input]: Input is not of the required type')
return valid
CONST = 24
def count_digits(num: int) -> int:
"""
Counts how many digits are present in a number
:param num:
:return: returns the number of digits in an integer
:rtype: int
"""
num_digits = 0
# so far as the number is greater than zero
while num > 0:
num_digits += 1
num = num // 10 # integer divide to get rid of the last digit
return num_digits
def calculate_sum(npi: int) -> int:
sum_digits = 0
while npi > 0:
even_digit = npi % 10 # gets every second digit going from rtl
npi = npi // 10 # discard stored digit
even_digit *= 2 # double the digit
# check if product greater than 9
if even_digit > 9:
# doubling a single digit would always result in at most a two digit number.
# Using modulo and integer division to get the unit's and ten's value respectively
even_digit = even_digit % 10 + even_digit // 10
odd_digit = npi % 10
npi = npi // 10 # discard the stored number
sum_digits = sum_digits + even_digit + odd_digit # calculate the sum
return sum_digits
def checksum(num: int) -> int:
"""
Calculates what the check digit is given
the sum of digits using Luhn's algorithm
:param num:
:return: returns the check digit
:rtype: int
"""
num += CONST # for 10 digits add constant
unit = num % 10 # get the units value
check_digit = 0
if unit > 0:
check_digit = 10 - unit
return check_digit
def test_suite(tests):
result = ('FAILED :(', 'PASSED :)',)
passes = 0
fails = 0
num_tests = len(tests)
for count, test in enumerate(tests):
print(f'Running test {count + 1} {40 * "."}')
outcome = validate_npi(test[0])
passed = outcome == test[1]
if passed:
passes += 1
else:
fails += 1
print(f'{test[2]} -> {test[0]}')
print(f'[{result[passed]}]: Expected - {test[1]} | Outcome - {outcome}')
print(f'{70 * "+"}')
print('\nTest Summary')
print(70 * '-')
print(f'Run {num_tests} test cases')
print(f'{passes}/{num_tests} cases passed' * (passes > 0))
print(f'{fails}/{num_tests} cases failed' * (fails > 0))
if __name__ == '__main__':
test_cases = [(1245319599, True, 'Testing valid npi integer'),
('1234567893', True, 'Testing valid npi string'),
(1245319594, False, 'Testing npi integer with invalid check digit'),
('1234567890', False, 'Testing npi string with invalid check digit'),
(1212343, False, 'Testing integer with less than 10 digits'),
('23577986', False, 'Testing string with less than 10 digits'),
(75435678744567, False, 'Testing integer with more than 10 digits'),
('965435445654', False, 'Testing string with more than 10 digits'),
(1003864190, True, 'Testing valid npi integer with 0 check digit'),
('1376104430', True, 'Testing valid npi string with 0 check digit'),
(1453.446565, False, 'Testing invalid input; type float'),
(True, False, 'Testing invalid input; type bool'),
('13463r6807', False, 'Testing valid string length containing invalid character(s)')]
test_suite(test_cases)
|
import numpy as np
from scipy.sparse import issparse
from sklearn.utils import sparsefuncs
import anndata
from typing import Union
from ..dynamo_logger import LoggerManager, main_tqdm
from ..utils import copy_adata
def lambda_correction(
adata: anndata.AnnData,
lambda_key: str = "lambda",
inplace: bool = True,
copy: bool = False,
) -> Union[anndata.AnnData, None]:
"""Use lambda (cell-wise detection rate) to estimate the labelled RNA.
Parameters
----------
adata:
adata object generated from dynast.
lambda_key:
The key to the cell-wise detection rate.
inplace:
Whether to inplace update the layers. If False, new layers that append '_corrected" to the existing will be
used to store the updated data.
copy:
Whether to copy the adata object or update adata object inplace.
Returns
-------
adata: :class:`~anndata.AnnData`
An new or updated anndata object, based on copy parameter, that are updated with Size_Factor, normalized
expression values, X and reduced dimensions, etc.
"""
logger = LoggerManager.gen_logger("dynamo-lambda_correction")
logger.log_time()
adata = copy_adata(adata) if copy else adata
logger.info("apply detection rate correction to adata...", indent_level=1)
if lambda_key not in adata.obs.keys():
raise ValueError(
f"the lambda_key {lambda_key} is not included in adata.obs! Please ensure you have calculated "
"per-cell detection rate!"
)
logger.info("retrieving the cell-wise detection rate..", indent_level=1)
detection_rate = adata.obs[lambda_key].values[:, None]
logger.info("identify the data type..", indent_level=1)
all_layers = adata.layers.keys()
has_ul = np.any([i.contains("ul_") for i in all_layers])
has_un = np.any([i.contains("un_") for i in all_layers])
has_sl = np.any([i.contains("sl_") for i in all_layers])
has_sn = np.any([i.contains("sn_") for i in all_layers])
has_l = np.any([i.contains("_l_") for i in all_layers])
has_n = np.any([i.contains("_n_") for i in all_layers])
if sum(has_ul + has_un + has_sl + has_sn) == 4:
datatype = "splicing_labeling"
elif sum(has_l + has_n):
datatype = "labeling"
logger.info(f"the data type identified is {datatype}", indent_level=2)
logger.info("retrieve relevant layers for detection rate correction", indent_level=1)
if datatype == "splicing_labeling":
layers, match_tot_layer = [], []
for layer in all_layers:
if "ul_" in layer:
layers += layer
match_tot_layer += "unspliced"
elif "un_" in layer:
layers += layer
match_tot_layer += "unspliced"
elif "sl_" in layer:
layers += layer
match_tot_layer += "spliced"
elif "sn_" in layer:
layers += layer
match_tot_layer += "spliced"
elif "spliced" in layer:
layers += layer
elif "unspliced" in layer:
layers += layer
if len(layers) != 6:
raise ValueError(
"the adata object has to include ul, un, sl, sn, unspliced, spliced, "
"six relevant layers for splicing and labeling quantified datasets."
)
elif datatype == "labeling":
layers, match_tot_layer = [], []
for layer in all_layers:
if "_l_" in layer:
layers += layer
match_tot_layer += ["total"]
elif "_n_" in layer:
layers += layer
match_tot_layer += ["total"]
elif "total" in layer:
layers += layer
if len(layers) != 3:
raise ValueError(
"the adata object has to include labeled, unlabeled, three relevant layers for labeling quantified "
"datasets."
)
logger.info("detection rate correction starts", indent_level=1)
for i, layer in enumerate(main_tqdm(layers, desc="iterating all relevant layers")):
if i < len(match_tot_layer):
cur_layer = adata.layers[layer] if inplace else adata.layers[layer].copy()
cur_total = adata.layers[match_tot_layer[i]]
# even layers is labeled RNA and odd unlabeled RNA
if i % 2 == 0:
# formula: min(L / lambda, (L + U)) from scNT-seq
if issparse(cur_layer):
sparsefuncs.inplace_row_scale(cur_layer, 1 / detection_rate)
else:
cur_layer /= detection_rate
if inplace:
adata.layers[layer] = sparse_mimmax(cur_layer, cur_total)
else:
adata.layers[layer + "_corrected"] = sparse_mimmax(cur_layer, cur_total)
else:
if inplace:
adata.layers[layer] = cur_total - adata.layers[layer[i - 1]]
else:
adata.layers[layer + "_corrected"] = cur_total - adata.layers[layer[i - 1]]
logger.finish_progress(progress_name="lambda_correction")
if copy:
return adata
return None
def sparse_mimmax(A, B, type="mim"):
"""Return the element-wise mimimum/maximum of sparse matrices `A` and `B`.
Parameters
----------
A:
The first sparse matrix
B:
The second sparse matrix
type:
The type of calculation, either mimimum or maximum.
Returns
-------
M:
A sparse matrix that contain the element-wise maximal or mimimal of two sparse matrices.
"""
AgtB = (A < B).astype(int) if type == "min" else (A > B).astype(int)
M = AgtB.multiply(A - B) + B
return M
|
# Standard scale crop flip, scale is important
train_augmentation = dict(
name='Compose',
transforms=[
dict(
name='ToTensor'
),
dict(
name='RandomScale',
min_scale=0.5,
max_scale=1.5
),
dict(
name='RandomCrop',
size=(321, 321)
),
dict(
name='RandomHorizontalFlip',
flip_prob=0.5
),
dict(
name='Normalize',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
]
)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-21 18:59
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('openid_provider', '0007_client'),
]
operations = [
migrations.RemoveField(
model_name='accesstoken',
name='client',
),
migrations.RemoveField(
model_name='accesstoken',
name='user',
),
migrations.DeleteModel(
name='AccessToken',
),
]
|
"""added guild_id column to reminders table
Revision ID: 97d93ee5aec8
Revises: 213b2c97361f
Create Date: 2021-09-19 14:22:15.752826
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '97d93ee5aec8'
down_revision = '213b2c97361f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('reminders',
sa.Column('id', sa.String(), nullable=False),
sa.Column('author', sa.String(), nullable=True),
sa.Column('channel_id', sa.String(), nullable=True),
sa.Column('guild_id', sa.String(), nullable=True),
sa.Column('jump_url', sa.String(), nullable=True),
sa.Column('content', sa.String(), nullable=True),
sa.Column('expiration', sa.DateTime(), nullable=True),
sa.Column('mentions', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('reminders')
# ### end Alembic commands ###
|
# This is a sample macro file with a single command. When NatSpeak has the
# focus, say "demo sample one". It should recognize the command and print it.
import natlink
from natlinkutils import *
class ThisGrammar(GrammarBase):
gramSpec = """
<start> exported = demo sample one;
"""
def initialize(self):
self.load(self.gramSpec)
self.activateAll()
def gotResults_start(self,words,fullResults):
print 'Detected "demo sample one"'
thisGrammar = ThisGrammar()
thisGrammar.initialize()
print 'Demo command loaded, say "demo sample one"!'
def unload():
global thisGrammar
if thisGrammar: thisGrammar.unload()
thisGrammar = None
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import requests
import urlparse
import re
### M3U8 Tag
TAG_PREFIX = '#EXT'
PLAYLIST_HEADER = '#EXTM3U'
TAG_ENDLIST = '#EXT-X-ENDLIST'
TAG_KEY = '#EXT-X-KEY'
TAG_MEDIA_DURATION = "#EXTINF";
TAG_STREAM_INF = '#EXT-X-STREAM-INF'
TAG_DISCONTINUITY = '#EXT-X-DISCONTINUITY'
### M3U8 Pattern Tag
REGEX_MEDIA_DURATION = TAG_MEDIA_DURATION + ':([\\d\\.]+)\\b'
URL='http://video.yjf138.com:8091/20180812/6yl0Q2YZ/index.m3u8'
### ่ฟๅm3u8ๆไปถ,ไธๆฏๆ็ป็
def parse_m3u8_info(url):
val = urlparse.urlsplit(url)
if (val.scheme != 'http') and (val.scheme != 'https'):
return 'Error protocol'
result = ''
request = requests.get(url, timeout=20)
for line in request.iter_lines():
result += line + '\n'
return result
### ่ฟๅๆ็ป็m3u8ๆไปถ
def parse_final_m3u8_info(url):
val = urlparse.urlsplit(url)
if (val.scheme != 'http') and (val.scheme != 'https'):
return 'Error protocol'
request = requests.get(url, timeout=20)
result = ''
hasStreamInf = False
for line in request.iter_lines():
if (line.startswith(TAG_PREFIX)):
result += line + '\n'
if (line.startswith(TAG_STREAM_INF)):
hasStreamInf = True
continue
if (hasStreamInf):
return parse_final_m3u8_info(get_final_url(url, line))
hasStreamInf = False
result += get_final_url(url, line) + '\n'
return result
### ๆ นๆฎm3u8ไธญ็ๅ็ๅๆณๅพๅฐๅฎๆด็url
def get_final_url(url, line):
val = urlparse.urlsplit(url)
hostUrl = url[0:url.index(val.netloc)+len(val.netloc)]
baseUrl = url[0:url.rindex('/')+1]
if (line.startswith('/')):
tempUrl = ''
if (line[1:].find('/') != -1):
tempIndex = line[1:].index('/')
tempUrl = line[0:tempIndex]
if (url.find(tempUrl) != -1):
tempIndex = url.index(tempUrl)
tempUrl = url[0:tempIndex] + line
else:
tempUrl = hostUrl + line[1:]
return tempUrl
else:
tempUrl = baseUrl + line[1:]
return tempUrl
if (line.startswith('http://') or line.startswith('https://')):
return line
return baseUrl + line
### ๅคๆญm3u8ไธญๆฏๅฆๅญๅจ#EXT-X-STREAM-INF
def has_ext_stream(url):
val = urlparse.urlsplit(url)
if (val.scheme != 'http') and (val.scheme != 'https'):
return 'Error protocol'
request = requests.get(url, timeout=20)
for line in request.iter_lines():
if (line.startswith(TAG_PREFIX)):
if (line.startswith(TAG_STREAM_INF)):
return True
return False
### ๅคๆญm3u8ไธญๆฏๅฆๅญๅจ#EXT-X-KEY
def has_ext_key(url):
val = urlparse.urlsplit(url)
if (val.scheme != 'http') and (val.scheme != 'https'):
return 'Error protocol'
request = requests.get(url, timeout=20)
hasStreamInf = False
for line in request.iter_lines():
if (line.startswith(TAG_PREFIX)):
if (line.startswith(TAG_STREAM_INF)):
hasStreamInf = True
elif (line.startswith(TAG_KEY)):
return True
continue
if (hasStreamInf):
return has_ext_key(get_final_url(url, line))
hasStreamInf = False
return False
### ๅคๆญm3u8ไธญๆฏๅฆๅญๅจ#EXT-X-DISCONTINUITY
def has_ext_discontinuity(url):
val = urlparse.urlsplit(url)
if (val.scheme != 'http') and (val.scheme != 'https'):
return 'Error protocol'
request = requests.get(url, timeout=20)
hasStreamInf = False
for line in request.iter_lines():
if (line.startswith(TAG_PREFIX)):
if (line.startswith(TAG_STREAM_INF)):
hasStreamInf = True
elif (line.startswith(TAG_DISCONTINUITY)):
return True
continue
if (hasStreamInf):
return has_ext_key(get_final_url(url, line))
hasStreamInf = False
return False
### ่ทๅM3U8ๆไปถ็ๆปๆถ้ฟ
def get_total_duration(url):
val = urlparse.urlsplit(url)
if (val.scheme != 'http') and (val.scheme != 'https'):
return 'Error protocol'
request = requests.get(url, timeout=20)
result = ''
hasStreamInf = False
totalDuration = 0
for line in request.iter_lines():
if (line.startswith(TAG_PREFIX)):
if (line.startswith(TAG_STREAM_INF)):
hasStreamInf = True
elif (line.startswith(TAG_MEDIA_DURATION)):
ret = parse_pattern_str(REGEX_MEDIA_DURATION, line)
totalDuration += float(ret)
continue
if (hasStreamInf):
return get_total_duration(get_final_url(url, line))
hasStreamInf = False
return totalDuration
def parse_pattern_str(pattern_str, str):
matchObj = re.match(pattern_str, str)
if (matchObj) :
return matchObj.group(1)
return ''
result = parse_m3u8_info(URL)
print('M3U8ๆไปถๅ
ๅฎนๅฆไธ:\n' + result)
finalResult = parse_final_m3u8_info(URL)
print('M3U8ๆไปถๆ็ปๅ
ๅฎนๅฆไธ:\n' + finalResult)
hasExtStream = has_ext_stream(URL)
print('ๆฏๅฆๅญๅจๅค่ทฏๆต: ---> ' + str(hasExtStream))
hasExtKey = has_ext_key(URL)
print('ๆฏๅฆๅญๅจkey: ---> ' + str(hasExtKey))
hasDisContinuity = has_ext_discontinuity(URL)
print('ๆฏๅฆๅญๅจไธ่ฟ็ปญ็ : ---> ' + str(hasDisContinuity))
totalDuration = get_total_duration(URL)
print('ๆญคM3U8ๆไปถ็ๆปๆถ้ด: --->' + str(totalDuration))
|
import sys,os
import numpy as np
def divide_into_states(st_dur, fn_dur, num_states):
state_dur = np.zeros((2, num_states), np.int64)
state_dur[0][0] = st_dur
state_dur[1][num_states-1] = fn_dur
num_of_frames = (fn_dur-st_dur)/50000
nof_each_state = num_of_frames/num_states
#if nof_each_state<1:
# print 'warning: some states are with zero duration'
for k in range(num_states-1):
state_dur[1][k] = state_dur[0][k]+(nof_each_state*50000)
state_dur[0][k+1] = state_dur[1][k]
return state_dur
def normalize_dur(dur):
rem_t = dur%50000
if rem_t<=25000:
dur = dur - rem_t
else:
dur = dur + (50000-rem_t)
return dur
def normalize_label_files(in_lab_file, out_lab_file, label_style, write_time_stamps):
out_f = open(out_lab_file,'w')
in_f = open(in_lab_file,'r')
data = in_f.readlines()
in_f.close()
ph_arr=[]
for i in data:
fstr = i.strip().split()
ftag = fstr[2]
ph = ftag[ftag.index('-')+1:ftag.index('+')]
if(ph=='pau'):
continue;
ph_arr.append(ph)
count=0;prev_ph=''
merged_data = [[],[],[]]
for i in data:
fstr = i.strip().split()
start_time = fstr[0]
end_time = fstr[1]
ftag = fstr[2]
mid_indx = ftag.index(':')
p1 = ftag[0:mid_indx]
p2 = ftag[mid_indx:]
ph = ftag[ftag.index('-')+1:ftag.index('+')]
#print ph
if(ph!='pau'):
count=count+1
if(prev_ph=='pau' and ph=='pau'):
continue;
if(count<=2 and 'pau' in p1) or (count>len(ph_arr)-2 and 'pau' in p1):
p1 = p1.replace('pau','sil')
ftag = p1+p2
if(count>=1 and count<len(ph_arr)):
if '-sil+' in ftag:
ftag = ftag.replace('-sil+','-pau+')
merged_data[0].append(start_time)
merged_data[1].append(end_time)
merged_data[2].append(ftag)
prev_ph=ph
num_states = 5
tot_num_ph = len(merged_data[0])
for j in range(tot_num_ph):
if j<tot_num_ph-1:
ph_end = normalize_dur(int(merged_data[0][j+1]))
merged_data[0][j+1] = str(ph_end)
merged_data[1][j] = merged_data[0][j+1]
else:
end_time = normalize_dur(int(end_time))
merged_data[1][j]=str(end_time)
if (int(merged_data[1][j])-int(merged_data[0][j]))==0:
print('Error: zero duration for this phone')
raise
if label_style == "phone_align":
if write_time_stamps:
out_f.write(merged_data[0][j]+' '+merged_data[1][j]+' '+merged_data[2][j]+'\n')
else:
out_f.write(merged_data[2][j]+'\n')
elif label_style == "state_align":
if write_time_stamps:
for k in range(num_states):
state_dur = divide_into_states(int(merged_data[0][j]), int(merged_data[1][j]), num_states)
out_f.write(str(state_dur[0][k])+' '+str(state_dur[1][k])+' '+merged_data[2][j]+'['+str(k+2)+']\n')
else:
out_f.write(merged_data[2][j]+'\n')
out_f.close()
if __name__ == "__main__":
if len(sys.argv)<5:
print('Usage: python normalize_lab_for_merlin.py <input_lab_dir> <output_lab_dir> <label_style> <file_id_list_scp> <optional: write_time_stamps (1/0)>\n')
sys.exit(0)
in_lab_dir = sys.argv[1]
out_lab_dir = sys.argv[2]
label_style = sys.argv[3]
file_id_list = sys.argv[4]
write_time_stamps = True
if len(sys.argv)==6:
if int(sys.argv[5])==0:
write_time_stamps = False
if label_style!="phone_align" and label_style!="state_align":
print("These labels %s are not supported as of now...please use state_align or phone_align!!" % (label_style))
sys.exit(0)
if not os.path.exists(out_lab_dir):
os.makedirs(out_lab_dir)
in_f = open(file_id_list,'r')
for i in in_f.readlines():
filename = i.strip()+'.lab'
print(filename)
in_lab_file = os.path.join(in_lab_dir, filename)
out_lab_file = os.path.join(out_lab_dir, filename)
normalize_label_files(in_lab_file, out_lab_file, label_style, write_time_stamps)
#break;
in_f.close()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: Josรฉ Sรกnchez-Gallego (gallegoj@uw.edu)
# @Date: 2021-07-27
# @Filename: fvc.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
from __future__ import annotations
import asyncio
from typing import TYPE_CHECKING, Optional, cast
import click
from drift import DriftError, Relay
from jaeger import config
from jaeger.exceptions import FVCError
from jaeger.fvc import FVC
from jaeger.ieb import FVC as FVC_IEB
from jaeger.utils import run_in_executor
from . import jaeger_parser
if TYPE_CHECKING:
from clu import Command
from jaeger import FPS
from jaeger.actor import JaegerActor
__all__ = ["fvc_parser"]
@jaeger_parser.group(name="fvc")
def fvc_parser():
"""Commands to command the FVC."""
pass
@fvc_parser.command()
@click.argument("EXPOSURE-TIME", default=None, type=float, required=False)
async def expose(
command: Command[JaegerActor],
fps: FPS,
exposure_time: Optional[float] = None,
):
"""Takes an exposure with the FVC."""
exposure_time = exposure_time or config["fvc"]["exposure_time"]
assert isinstance(exposure_time, float)
command.info("Taking FVC exposure with fliswarm.")
fvc = FVC(config["observatory"])
try:
fvc.set_command(command)
filename = await fvc.expose(exposure_time=exposure_time)
except FVCError as err:
return command.fail(error=f"Failed taking FVC exposure: {err}")
return command.finish(fvc_filename=str(filename))
@fvc_parser.command(cancellable=True)
@click.option(
"--exposure-time",
type=float,
help="Exposure time.",
)
@click.option(
"--fbi-level",
type=float,
help="FBI LED levels.",
)
@click.option(
"--one",
is_flag=True,
help="Only runs one FVC correction iteration.",
)
@click.option(
"--max-iterations",
type=int,
help="Maximum number of iterations.",
)
@click.option(
"--stack",
type=int,
default=1,
help="Number of FVC image to stack.",
)
@click.option(
"--plot/--no-plot",
default=True,
help="Generate and save plots.",
)
@click.option(
"--apply/--no-apply",
default=True,
help="Apply corrections.",
)
@click.option(
"-m",
"--max-correction",
type=float,
help="Maximum correction allowed, in degrees.",
)
@click.option(
"-k",
type=float,
help="Proportional term of the correction.",
)
async def loop(
command: Command[JaegerActor],
fps: FPS,
exposure_time: float | None = None,
fbi_level: float | None = None,
one: bool = False,
max_iterations: int | None = None,
stack: int = 3,
plot: bool = True,
apply: bool = True,
max_correction: float | None = None,
k: float | None = None,
):
"""Executes the FVC correction loop.
This routine will turn the FBI LEDs on, take FVC exposures, process them,
calculate the offset correction and applies them. Loops until the desided
convergence is achieved.
"""
exposure_time = exposure_time or config["fvc"]["exposure_time"]
fbi_level = fbi_level or config["fvc"]["fbi_level"]
assert isinstance(exposure_time, float) and isinstance(fbi_level, float)
if fps.configuration is None:
return command.fail("Configuration not loaded.")
fvc = FVC(fps.observatory, command=command)
# Check that the rotator is halted.
axis_cmd = await command.send_command("keys", "getFor=tcc AxisCmdState")
if axis_cmd.status.did_fail:
command.warning("Cannot check the status of the rotator.")
else:
rot_status = axis_cmd.replies[0].keywords[0].values[2]
if rot_status != "Halted":
return command.fail(f"Cannot expose FVC while the rotator is {rot_status}.")
else:
command.debug("The rotator is halted.")
command.debug("Turning LEDs on.")
await command.send_command("jaeger", f"ieb fbi led1 {fbi_level}")
await command.send_command("jaeger", f"ieb fbi led2 {fbi_level}")
if one is True and apply is True:
command.warning(
"One correction will be applied. The confSummaryF "
"file will not reflect the final state."
)
max_iterations = max_iterations or config["fvc"]["max_fvc_iterations"]
current_rms = None
delta_rms = None
filename = None
proc_image_saved = False
# Flag to determine when to exit the loop.
finish: bool = False
try:
n = 1
while True:
command.info(f"FVC iteration {n}")
proc_image_saved: bool = False
# 1. Expose the FVC
command.debug("Taking exposure with fliswarm.")
filename = await fvc.expose(exposure_time=exposure_time, stack=stack)
command.debug(fvc_filename=str(filename))
# 2. Process the new image.
await run_in_executor(fvc.process_fvc_image, filename, plot=plot)
# 3. Set current RMS and delta.
new_rms = fvc.fitrms * 1000.0
command.info(fvc_rms=new_rms)
if current_rms is None:
pass
else:
delta_rms = current_rms - new_rms
command.info(fvc_deltarms=delta_rms)
current_rms = new_rms
# 4. Check if the RMS or delta RMS criteria are met.
if current_rms < config["fvc"]["target_rms"]:
command.info("RMS target reached.")
finish = True
elif delta_rms is not None:
if delta_rms < config["fvc"]["target_delta_rms"]:
command.info("Delta RMS reached. RMS target has not been reached.")
finish = True
elif delta_rms < 0:
command.warning("RMS has increased. Cancelling FVC loop.")
finish = True
# 4. Update current positions and calculate offsets.
command.debug("Calculating offsets.")
await fps.update_position()
await run_in_executor(
fvc.calculate_offsets,
fps.get_positions(),
k=k,
max_correction=max_correction,
)
# 5. Apply corrections.
if finish is False and apply is True:
if n == max_iterations and one is False:
command.debug("Not applying correction during the last iteration.")
else:
await fvc.apply_correction()
# 6. Save processed file.
proc_path = filename.with_name("proc-" + filename.name)
command.debug(f"Saving processed image {proc_path}")
await fvc.write_proc_image(proc_path)
proc_image_saved = True
if finish is True:
break
if one is True or apply is False:
command.warning("Cancelling FVC loop after one iteration.")
break
if n == max_iterations:
command.warning("Maximum number of iterations reached.")
break
n += 1
except Exception as err:
return command.fail(error=f"Failed processing image: {err}")
finally:
command.info("Saving confSummaryF file.")
await asyncio.get_running_loop().run_in_executor(None, fvc.write_summary_F)
command.debug("Turning LEDs off.")
await command.send_command("jaeger", "ieb fbi led1 0")
await command.send_command("jaeger", "ieb fbi led2 0")
if proc_image_saved is False:
if filename is not None and fvc.proc_hdu is not None:
proc_path = filename.with_name("proc-" + filename.name)
command.debug(f"Saving processed image {proc_path}")
await fvc.write_proc_image(proc_path)
else:
command.warning("Cannot write processed image.")
# FVC loop always succeeds.
return command.finish()
@fvc_parser.command()
async def status(command: Command[JaegerActor], fps: FPS):
"""Reports the status of the FVC."""
fvc_ieb = FVC_IEB.create()
try:
status = {}
categories = fvc_ieb.get_categories()
for category in sorted(categories):
cat_data = await fvc_ieb.read_category(category)
status[category] = []
for cd in cat_data:
value = cat_data[cd][0]
if value == "closed":
value = True
elif value == "open":
value = False
else:
value = round(value, 1)
status[category].append(value)
command.finish(status)
except DriftError:
return command.fail(error="FVC IEB is unavailable or failed to connect.")
async def _power_device(device: str, mode: str):
"""Power on/off the device."""
fvc_ieb = FVC_IEB.create()
dev: Relay = cast(Relay, fvc_ieb.get_device(device))
if mode == "on":
await dev.close()
else:
await dev.open()
async def _execute_on_off_command(
command: Command[JaegerActor], device: str, mode: str
):
"""Executes the on/off command."""
mode = mode.lower()
try:
await _power_device(device, mode)
command.info(text=f"{device} is now {mode}.")
except DriftError:
return command.fail(error=f"Failed to turn {device} {mode}.")
await command.send_command("jaeger", "fvc status")
return command.finish()
@fvc_parser.command()
@click.argument("MODE", type=click.Choice(["on", "off"], case_sensitive=False))
async def camera(command: Command[JaegerActor], fps: FPS, mode: str):
"""Turns camera on/off."""
await _execute_on_off_command(command, "FVC", mode)
@fvc_parser.command()
@click.argument("MODE", type=click.Choice(["on", "off"], case_sensitive=False))
async def NUC(command: Command[JaegerActor], fps: FPS, mode: str):
"""Turns NUC on/off."""
await _execute_on_off_command(command, "NUC", mode)
@fvc_parser.command()
@click.argument("LEVEL", type=int)
async def led(command: Command[JaegerActor], fps: FPS, level: int):
"""Sets the level of the FVC LED."""
fvc_ieb = FVC_IEB.create()
led = fvc_ieb.get_device("LED1")
raw_value = 32 * int(1023 * (level / 100))
await led.write(raw_value)
await command.send_command("jaeger", "fvc status")
return command.finish()
|
import logging
import os
import queue
import random
import shutil
import subprocess
import threading
import tkinter as tk
import tkinter.messagebox as tkMessageBox
import webbrowser
from pathlib import Path
from tkinter import PhotoImage, ttk
from tkinter.scrolledtext import ScrolledText
import requests
from packaging import version
from PIL import Image, ImageTk
from modlunky2.assets.assets import AssetStore
from modlunky2.assets.constants import (
EXTRACTED_DIR,
FILEPATH_DIRS,
OVERRIDES_DIR,
PACKS_DIR,
)
from modlunky2.assets.exc import MissingAsset
from modlunky2.assets.patcher import Patcher
from modlunky2.constants import BASE_DIR, ROOT_DIR
from modlunky2.ui.extract import ExtractTab
from modlunky2.ui.levels import LevelsTab
from modlunky2.ui.pack import PackTab
from modlunky2.ui.widgets import ConsoleWindow
logger = logging.getLogger("modlunky2")
cwd = os.getcwd()
def get_latest_version():
try:
return version.parse(
requests.get(
"https://api.github.com/repos/spelunky-fyi/modlunky2/releases/latest"
).json()["tag_name"]
)
except Exception: # pylint: disable=broad-except
return None
def get_current_version():
with (ROOT_DIR / "VERSION").open() as version_file:
return version.parse(version_file.read().strip())
class ModlunkyUI:
def __init__(self, install_dir, beta=False):
self.install_dir = install_dir
self.beta = beta
self.current_version = get_current_version()
self.latest_version = get_latest_version()
if self.latest_version is None or self.current_version is None:
self.needs_update = False
else:
self.needs_update = self.current_version < self.latest_version
self._shutdown_handlers = []
self._shutting_down = False
self.root = tk.Tk(className="Modlunky2") # Equilux Black
self.root.title("Modlunky 2")
self.root.geometry("950x650")
# self.root.resizable(False, False)
self.icon_png = PhotoImage(file=BASE_DIR / "static/images/icon.png")
self.root.iconphoto(False, self.icon_png)
if self.needs_update:
update_button = ttk.Button(
self.root, text="Update Modlunky2!", command=self.update
)
update_button.pack()
# Handle shutting down cleanly
self.root.protocol("WM_DELETE_WINDOW", self.quit)
self.tabs = {}
self.tab_control = ttk.Notebook(self.root)
self.register_tab(
"Pack Assets",
PackTab(
tab_control=self.tab_control,
install_dir=install_dir,
),
)
self.register_tab(
"Extract Assets",
ExtractTab(
tab_control=self.tab_control,
install_dir=install_dir,
),
)
self.register_tab(
"Levels",
LevelsTab(
tab_control=self.tab_control,
install_dir=install_dir,
),
)
self.tab_control.bind("<<NotebookTabChanged>>", self.on_tab_change)
self.tab_control.pack(expand=1, fill="both")
self.console = ConsoleWindow()
def update(self):
webbrowser.open_new_tab(
f"https://github.com/spelunky-fyi/modlunky2/releases/tag/{self.latest_version}"
)
def self_update(self):
updater = Path(cwd + "/updater.exe")
subprocess.call([updater]) # if file exists
self.root.quit()
self.root.destroy()
def on_tab_change(self, event):
tab = event.widget.tab("current")["text"]
self.tabs[tab].on_load()
def register_tab(self, name, obj):
self.tabs[name] = obj
self.tab_control.add(obj, text=name)
def quit(self):
if self._shutting_down:
return
self._shutting_down = True
logger.info("Shutting Down.")
for handler in self._shutdown_handlers:
handler()
self.root.quit()
self.root.destroy()
def register_shutdown_handler(self, func):
self._shutdown_handlers.append(func)
def mainloop(self):
try:
self.root.mainloop()
except KeyboardInterrupt:
self.quit()
|
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2018-2020 azai/Rgveda/GolemQuant
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
็ปๅถๆ็ฎๅ็K็บฟๅพ๏ผ็ปK็บฟๅพๆฏไธๅฎ่ฆไผ็ป็๏ผไปฅๅๅค็CVๅฝขๆ่ฏๅซK็บฟ่ฎญ็ปๅพ็้ฝๆฏ่ฆ่ชๅทฑ็ปๅพ็๏ผ่ฟๆฏๅบๆฌๅใ
"""
from datetime import datetime as dt
from datetime import timedelta
from datetime import timezone
import pandas as pd
import numpy as np
import time
import matplotlib.pyplot as plt
import mplfinance as mpf
import matplotlib.dates as mdates
try:
import QUANTAXIS as QA
except:
print('PLEASE run "pip install QUANTAXIS" before run this demo')
pass
from GolemQ.utils.path import (
load_cache,
save_cache,
)
try:
import talib
except:
print('PLEASE run "pip install talib" before call these methods')
pass
def TA_MACD(prices:np.ndarray,
fastperiod:int=12,
slowperiod:int=26,
signalperiod:int=9) -> np.ndarray:
'''
ๅฎไนMACDๅฝๆฐ
ๅๆฐ่ฎพ็ฝฎ:
fastperiod = 12
slowperiod = 26
signalperiod = 9
่ฟๅ: macd - dif, signal - dea, hist * 2 - bar, delta
'''
macd, signal, hist = talib.MACD(prices,
fastperiod=fastperiod,
slowperiod=slowperiod,
signalperiod=signalperiod)
hist = (macd - signal) * 2
delta = np.r_[np.nan, np.diff(hist)]
return np.c_[macd, signal, hist, delta]
def macd_cross_func(data):
"""
็ฅไธๆ ท็ๆๆ ๏ผMACD
"""
MACD = TA_MACD(data.close)
MACD_CROSS = pd.DataFrame(MACD,
columns=['DIF',
'DEA',
'MACD',
'DELTA'],
index=data.index)
return MACD_CROSS
def ohlc_plot_with_macd(ohlc_data, features,
code=None, codename=None, title=None):
# ๆ่ฒไธป้ข
plt.style.use('Solarize_Light2')
# ๆญฃๅธธๆพ็คบไธญๆๅญไฝ
plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']
fig = plt.figure(figsize = (16,7))
plt.subplots_adjust(left=0.05, right=0.97)
if (title is None):
fig.suptitle(u'้ฟ่ดข็ {:s}๏ผ{:s}๏ผ้ๅๅญฆไน ็ฌ่ฎฐ็ปๅถK็บฟDEMO'.format(codename,
code), fontsize=16)
else:
fig.suptitle(title, fontsize=16)
ax1 = plt.subplot2grid((4,3),(0,0), rowspan=3, colspan=3)
ax2 = plt.subplot2grid((4,3),(3,0), rowspan=1, colspan=3, sharex=ax1)
# ็ปๅถK็บฟ
ohlc_data = ohlc_data.reset_index([1], drop=False)
mc_stock_cn = mpf.make_marketcolors(up='r',down='g')
s_stock_cn = mpf.make_mpf_style(marketcolors=mc_stock_cn)
mpf.plot(data=ohlc_data, ax=ax1, type='candle', style=s_stock_cn)
# ่ฎพๅฎๆๆ ่ฝดๆถ้ด
datetime_index = ohlc_data.index.get_level_values(level=0).to_series()
DATETIME_LABEL = datetime_index.apply(lambda x:
x.strftime("%Y-%m-%d %H:%M")[2:16])
ax1.plot(DATETIME_LABEL, features['MA30'], lw=0.75,
color='blue', alpha=0.6)
ax1.plot(DATETIME_LABEL, features['MA90'], lw=1,
color='crimson', alpha=0.5)
ax1.plot(DATETIME_LABEL, features['MA120'], lw=1,
color='limegreen', alpha=0.5)
ax1.grid(True)
ax2.plot(DATETIME_LABEL, features['DIF'],
color='green', lw=1, label='DIF')
ax2.plot(DATETIME_LABEL, features['DEA'],
color = 'purple', lw = 1, label = 'DEA')
barlist = ax2.bar(DATETIME_LABEL, features['MACD'],
width = 0.6, label = 'MACD')
for i in range(len(DATETIME_LABEL.index)):
if features['MACD'][i] <= 0:
barlist[i].set_color('g')
else:
barlist[i].set_color('r')
ax2.set(ylabel='MACD(26,12,9)')
ax2.set_xticks(range(0, len(DATETIME_LABEL),
round(len(DATETIME_LABEL) / 12)))
ax2.set_xticklabels(DATETIME_LABEL[::round(len(DATETIME_LABEL) / 12)],
rotation=15)
ax2.grid(True)
return ax1, ax2, DATETIME_LABEL
def ma30_cross_func(data):
"""
MAๅ็บฟ้ๅๆๆ
"""
MA5 = talib.MA(data.close, 5)
MA10 = talib.MA(data.close, 10)
MA30 = talib.MA(data.close, 30)
MA90 = talib.MA(data.close, 90)
MA120 = talib.MA(data.close, 120)
MA30_CROSS = pd.DataFrame(np.c_[MA5,
MA10,
MA30,
MA90,
MA120],
columns=['MA5',
'MA10',
'MA30',
'MA90',
'MA120'],
index=data.index)
return MA30_CROSS
if __name__ == '__main__':
start = dt.now() - timedelta(hours=19200)
end = dt.now(timezone(timedelta(hours=8))) + timedelta(minutes=1)
symbol = '399300'
frequence = '60min'
try:
# ๅฐ่ฏ็จQAๆฅๅฃ่ฏปๅK็บฟๆฐๆฎ
data_min = QA.QA_fetch_index_min_adv('399300',
start='{}'.format(start),
end='{}'.format(end),
frequence=frequence)
if (len(data_min.data) > 100):
fllename = 'kline_{}_{}.pickle'.format(symbol,
frequence)
save_cache(fllename, data_min.data)
features = pd.concat([ma30_cross_func(data_min.data),
macd_cross_func(data_min.data)],
axis=1,
sort=False)
ohlc_data = data_min.data.tail(320)
except:
# ๆฒกๆ่ฃ
QAๆ่
ๆฒกๆๆฐๆฎ๏ผๅฐ่ฏ่ฏปๅ pickle ็ผๅญ
fllename = 'kline_{}_{}.pickle'.format(symbol,
frequence)
ohlc_data = load_cache(fllename)
features = pd.concat([ma30_cross_func(ohlc_data),
macd_cross_func(ohlc_data)],
axis=1,
sort=False)
ohlc_data = ohlc_data.tail(320)
ohlc_plot_with_macd(ohlc_data, features.tail(320),
code='399300',
codename=u'ๆฒชๆทฑ300')
plt.show() |
"""
MX-Font
Copyright (c) 2021-present NAVER Corp.
MIT license
"""
import torch
import utils
def torch_eval(val_fn):
@torch.no_grad()
def decorated(self, gen, *args, **kwargs):
gen.eval()
ret = val_fn(self, gen, *args, **kwargs)
gen.train()
return ret
return decorated
class Evaluator:
def __init__(self, writer):
torch.backends.cudnn.benchmark = True
self.writer = writer
@torch_eval
def comparable_val_saveimg(self, gen, loader, step, n_row, tag='val'):
compare_batches = self.infer_fact_loader(gen, loader)
comparable_grid = utils.make_comparable_grid(*compare_batches[::-1], nrow=n_row)
saved_path = self.writer.add_image(tag, comparable_grid, global_step=step)
return comparable_grid, saved_path
@torch_eval
def infer_fact_loader(self, gen, loader, save_dir=None):
outs = []
trgs = []
for batch in loader:
style_imgs = batch["style_imgs"].cuda()
char_imgs = batch["source_imgs"].unsqueeze(1).cuda()
out = gen.gen_from_style_char(style_imgs, char_imgs)
outs.append(out.detach().cpu())
if "trg_imgs" in batch:
trgs.append(batch["trg_imgs"])
outs = torch.cat(outs).float()
ret = (outs,)
if trgs:
trgs = torch.cat(trgs)
ret += (trgs,)
return ret
|
from aiopoke.objects.resources.pokemon.ability import Ability
from aiopoke.objects.resources.pokemon.characteristic import Characteristic
from aiopoke.objects.resources.pokemon.egg_group import EggGroup
from aiopoke.objects.resources.pokemon.gender import Gender
from aiopoke.objects.resources.pokemon.growth_rate import GrowthRate
from aiopoke.objects.resources.pokemon.natural_gift_type import NaturalGiftType
from aiopoke.objects.resources.pokemon.nature import Nature
from aiopoke.objects.resources.pokemon.pokeathlon_stat import PokeathlonStat
from aiopoke.objects.resources.pokemon.pokemon import Pokemon
from aiopoke.objects.resources.pokemon.pokemon_color import PokemonColor
from aiopoke.objects.resources.pokemon.pokemon_form import PokemonForm
from aiopoke.objects.resources.pokemon.pokemon_habitat import PokemonHabitat
from aiopoke.objects.resources.pokemon.pokemon_shape import PokemonShape
from aiopoke.objects.resources.pokemon.pokemon_species import PokemonSpecies
from aiopoke.objects.resources.pokemon.stat import Stat
__all__ = (
"Ability",
"Characteristic",
"EggGroup",
"Gender",
"GrowthRate",
"NaturalGiftType",
"Nature",
"PokeathlonStat",
"PokemonColor",
"PokemonForm",
"PokemonHabitat",
"PokemonShape",
"PokemonSpecies",
"Pokemon",
"Stat",
)
|
n = str(input('Digite um nรบmero inteiro entre 0 e 9999: ')).strip()
print('Unidade: {}'.format(n[3]))
print('Dezena: {}'.format(n[2]))
print('Centena: {}'.format(n[1]))
print('Milhar: {}'.format(n[0]))
|
def default() -> int:
"""Returns 2."""
return 2
def minimum() -> int:
"""Returns 0."""
return 0
def maximum() -> int:
"""Returns 9."""
return 9
|
from django.urls import path
from .views import RegisterView, UserEditView, PasswordChangeUserView
from django.contrib.auth import views as auth_views
urlpatterns = [
path('register/', RegisterView.as_view(), name='register'),
path('edit-profile/', UserEditView.as_view(), name='edit-profile'),
path('change-password/', PasswordChangeUserView.as_view(), name='change-password'),
path('reset-password/', auth_views.PasswordResetView.as_view(template_name='registration/reset-password.html'), name='password_reset'),
path('reset-password/done/', auth_views.PasswordResetDoneView.as_view(template_name='registration/reset-password-done.html'), name='password_reset_done'),
path('reset/confirm/<uidb64>/<token>/', auth_views.PasswordResetConfirmView.as_view( template_name='registration/reset-password-confirm.html' ), name='password_reset_confirm'),
path('reset/complete/', auth_views.PasswordResetCompleteView.as_view(template_name='registration/reset-password-complete.html'), name='password_reset_complete'),
]
|
import numpy as np
import dgl
import torch
import torch.nn as nn
import torch.nn.functional as F
from .PwGaANLayer import MultiHeadPwGaANLayer
class SpatAttLayer(nn.Module):
def __init__(self, feat_dim, hidden_dim, num_heads, gate=False, merge='mean'):
super(SpatAttLayer, self).__init__()
self.feat_dim = feat_dim
self.hidden_dim = hidden_dim
self.num_heads = num_heads
self.gate = gate
self.merge = merge
self.fwdSpatAttLayer = MultiHeadPwGaANLayer(self.feat_dim, self.hidden_dim, self.num_heads, gate=self.gate, merge=self.merge)
self.bwdSpatAttLayer = MultiHeadPwGaANLayer(self.feat_dim, self.hidden_dim, self.num_heads, gate=self.gate, merge=self.merge)
self.geoSpatAttLayer = MultiHeadPwGaANLayer(self.feat_dim, self.hidden_dim, self.num_heads, gate=self.gate, merge=self.merge)
self.proj_fc = nn.Linear(self.feat_dim, self.hidden_dim, bias=False)
# BatchNorm
self.bn = nn.BatchNorm1d(num_features=self.hidden_dim * 4)
if self.merge == 'mean':
self.bn = nn.BatchNorm1d(num_features=self.hidden_dim * 4)
elif self.merge == 'cat':
self.bn = nn.BatchNorm1d(num_features=self.hidden_dim * (3 * self.num_heads + 1))
self.reset_parameters()
def reset_parameters(self):
""" Reinitialize learnable parameters. """
gain = nn.init.calculate_gain('leaky_relu')
nn.init.xavier_normal_(self.proj_fc.weight, gain=gain)
def forward(self, fg: dgl.DGLGraph, bg: dgl.DGLGraph, gg: dgl.DGLGraph):
feat = fg.ndata['v']
feat = F.dropout(feat, 0.1)
fg.ndata['v'] = feat
bg.ndata['v'] = feat
fg.ndata['v'] = feat
proj_feat = self.proj_fc(feat)
del feat
fg.ndata['proj_z'] = proj_feat
bg.ndata['proj_z'] = proj_feat
gg.ndata['proj_z'] = proj_feat
out_proj_feat = proj_feat.reshape(fg.batch_size, -1, self.hidden_dim)
del proj_feat
h_fwd = self.fwdSpatAttLayer(fg)
h_bwd = self.bwdSpatAttLayer(bg)
h_geo = self.geoSpatAttLayer(gg)
h = torch.cat([out_proj_feat, h_fwd, h_bwd, h_geo], dim=-1)
del out_proj_feat
del h_fwd
del h_bwd
del h_geo
# TODO: BatchNorm
normH = self.bn(torch.transpose(h, -2, -1))
reshapedH = torch.transpose(normH, -2, -1)
del h
del normH
return reshapedH
if __name__ == '__main__':
""" Test: Remove dot in the package importing to avoid errors """
GDVQ = np.load('test/GDVQ.npy', allow_pickle=True).item()
V = GDVQ['V']
(dfg, dbg,), _ = dgl.load_graphs('test/FBGraphs.dgl')
(dgg,), _ = dgl.load_graphs('test/GeoGraph.dgl')
V = torch.from_numpy(V)
spatAttLayer = SpatAttLayer(feat_dim=7, hidden_dim=16, num_heads=3, gate=True)
print(V, V.shape)
dfg.ndata['v'] = V
dbg.ndata['v'] = V
dgg.ndata['v'] = V
out = spatAttLayer(dfg, dbg, dgg)
print(out, out.shape)
test = out.detach().numpy()
|
from .message import HerokuLogParser, ParseError
version_info = (0, 0, 4)
__version__ = '.'.join(map(str, version_info))
__author__ = 'Chris De Cairos <chris@chrisdecairos.ca>'
__all__ = [
'HerokuLogParser',
'ParseError',
'version_info',
'__version__',
'__author__'
]
|
##########################################################################
# simple example GET API calls to a local server running on localhost:8080
##########################################################################
import requests, json, time
# make a call to the route-planning function
# doc'ed at http://dev.opentripplanner.org/apidoc/1.0.0/resource_PlannerResource.html
# There are more options available. These are just some.
options = {
'fromPlace':'43.63725,-79.434928',
'toPlace':'43.646448,-79.3880',
'time':'1:02pm',
'date':'11-14-2017',
'mode':'TRANSIT,WALK',
'maxWalkDistance':1000,
'clampInitialWait':0,
'wheelchair':False
}
response = requests.get(
"http://localhost:8080/otp/routers/ttc/plan",
params = options
)
# parse from JSON to python dictionary
response = json.loads(response.text)
# e.g. get the travel time of the first itinerary
print response['plan']['itineraries'][0]['duration']
# isochrone travel area function
# returns a polygon geometry
def isojson(xin,yin,t):
coords = ('%f, %f' % (yin, xin))
options = {
'fromPlace': coords,
'time':'1:02pm',
'date':'05-20-2016',
'mode':'WALK',
'clampInitialWait':0,
'wheelchair': False,
'cutoffSec': t,
'precisionMeters': 10 # how detailed will this be
}
response = requests.get(
"http://localhost:8080/otp/routers/g/isochrone?",
params = options
)
# parse from JSON to python dictionary
print response.text
response = json.loads(response.text)
start = time.time()
isojson(-97.11930,49.89281,1800)
print time.time() - start
|
import logging
import os
import unittest
import pandas as pd
from pandas.io.sql import DatabaseError
import psycopg2
from ml_toolkit.db_interaction.api import PostgreSQLManager
from ml_toolkit.utils.io_utl import get_decorators
print({k: v for k, v in os.environ.items() if k.startswith('POSTGRES')})
CFG = dict(user=os.environ.get('POSTGRES_USER', 'postgres'),
password=os.environ.get('POSTGRES_PASSWORD', ''),
host=os.environ.get('POSTGRES_HOST', 'localhost'),
port=os.environ.get('POSTGRES_PORT', '5432'),
database=os.environ.get('POSTGRES_DB', None))
def open_db():
db = PostgreSQLManager(**CFG)
db.logger.setLevel(logging.ERROR + 1)
db.logger.setFormattersIsColored(False)
db.set_exception_handling('raise')
return db
class DBTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.db = open_db()
cls.test_table = 'public.test_postgres'
@classmethod
def tearDownClass(cls):
del cls.db
class ConnectionCase(unittest.TestCase):
def test_connection(self):
# to run locally you might need to edit the pg_hba.conf file to use "method" "trust" for local connections
db = open_db()
self.assertEqual(db.name, CFG.get('database') or CFG.get('user'))
self.assertEqual(db.user, CFG.get('user'))
def test_connection_fail(self):
cfg = CFG.copy()
cfg['user'] = 'lskdjfl'
self.assertRaises(psycopg2.Error, PostgreSQLManager, **cfg)
class SchemaCase(DBTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.test_schemas = ('test1', 'test2', 'test3')
def test_create_schema(self):
for schema in self.test_schemas:
self.db.create_schema(schema)
self.db.create_schema(self.test_schemas[0], True)
self.assertRaises(psycopg2.Error, self.db.create_schema, *(self.test_schemas[0], False))
def test_get_schemas(self):
self.assertIn('public', self.db.get_schemas())
def test_drop_schemas(self):
self.db.execute("CREATE TABLE test1.test ()")
self.db.execute("CREATE TABLE test2.test ()")
self.assertRaises(psycopg2.Error, self.db.drop_schema, *('test1', False))
self.assertRaises(psycopg2.Error, self.db.drop_schema, *('smth', True, False))
self.db.drop_schema('test1', True)
self.assertRaises(psycopg2.Error, self.db.drop_schema, *(['test2', 'test3'], False))
self.db.drop_schema(['test2', 'test3'], True)
def test_set_active_schema(self):
self.db.set_active_schema()
self.assertEqual('public', self.db.active_schema)
self.db.create_schema(self.test_schemas[0])
self.db.set_active_schema(self.test_schemas[0])
self.assertEqual(self.test_schemas[0], self.db.active_schema)
self.db.drop_schema(self.test_schemas[0])
self.db.set_active_schema('smth')
self.assertEqual('public', self.db.active_schema)
class DropTableCase(DBTestCase):
def setUp(self):
self.db.execute(f'CREATE TABLE IF NOT EXISTS {self.test_table} ()')
self.db.refresh()
def tearDown(self):
self.db.execute(f'DROP TABLE IF EXISTS {self.test_table} CASCADE')
self.db.refresh()
def test_drop(self):
self.db.drop_table(self.test_table)
self.assertNotIn(self.test_table, self.db.tables())
def test_drop_multiple(self):
self.db.execute('CREATE TABLE IF NOT EXISTS public.test_postgres1 ()')
self.db.refresh()
self.db.drop_table([self.test_table, 'public.test_postgres1'])
self.assertNotIn(self.test_table, self.db.tables())
self.assertNotIn('public.test_postgres1', self.db.tables())
def test_if_not_exists(self):
self.db.drop_table(self.test_table)
self.assertRaises(psycopg2.Error, self.db.drop_table, self.test_table, if_exists=False)
class CreateEmptyTableCase(DBTestCase):
def tearDown(self):
self.db.drop_table(self.test_table)
def test_new_table(self):
self.db.create_empty_table(self.test_table, if_not_exists=True)
self.assertIn(self.test_table, self.db.tables())
self.assertTrue(self.db.query(f"SELECT * FROM {self.test_table}").empty)
def test_if_not_exists(self):
self.db.create_empty_table(self.test_table)
self.assertRaises(psycopg2.Error, self.db.create_empty_table, self.test_table)
self.db.create_empty_table(self.test_table, if_not_exists=True)
def test_types_and_columns(self):
params = dict()
params['schema'], params['table_name'] = self.test_table.split('.')
# types
test_types = {'a': 'int', 'b': 'float', 'c': 'object'}
types_query = f"""SELECT column_name,
CASE
WHEN domain_name IS NOT NULL THEN domain_name
WHEN data_type='character varying' THEN 'varchar('||character_maximum_length||')'
WHEN data_type='numeric' THEN 'numeric('||numeric_precision||','||numeric_scale||')'
ELSE data_type
END AS data_type
FROM information_schema.columns
WHERE table_schema = %(schema)s AND table_name = %(table_name)s """
self.db.create_empty_table(self.test_table, types=test_types)
self.assertEqual(self.db.query(types_query, params=params)['data_type'].tolist(),
['integer', 'double precision', 'text'])
# columns
cols_query = f"""SELECT column_name FROM information_schema.columns
WHERE table_schema = %(schema)s AND table_name = %(table_name)s """
self.assertEqual(self.db.query(cols_query, params=params)['column_name'].to_list(), list(test_types))
def test_types_from_df(self):
params = dict()
params['schema'], params['table_name'] = self.test_table.split('.')
test_df = pd.DataFrame({'a': [1, 2, 3, 4, 5],
'b': [1.1, 2, 3.3, 4.4, 5.5],
'c': [1.1, 2, '3', 4, None]})
test_types = {'b': 'object'}
types_query = f"""SELECT column_name,
CASE
WHEN domain_name IS NOT NULL THEN domain_name
WHEN data_type='character varying' THEN 'varchar('||character_maximum_length||')'
WHEN data_type='numeric' THEN 'numeric('||numeric_precision||','||numeric_scale||')'
ELSE data_type
END AS data_type
FROM information_schema.columns
WHERE table_schema = %(schema)s AND table_name = %(table_name)s """
self.db.create_empty_table(self.test_table, test_types, test_df)
self.assertEqual(self.db.query(types_query, params=params)['data_type'].tolist(),
['integer', 'text', 'text'])
class GetTableCase(DBTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.db.execute(f"CREATE TABLE {cls.test_table} (a integer, b text, c float)")
cls.db.execute(f"INSERT INTO {cls.test_table} VALUES (1, 'b', 1), (1, 'a', 2.0), (2, 'c', null)")
cls.db.refresh()
@classmethod
def tearDownClass(cls):
cls.db.drop_table(cls.test_table)
super().tearDownClass()
def test_select_all(self):
df = self.db.get_table(self.test_table)
self.assertEqual(df.shape[0], 3)
self.assertEqual(df.shape[1], 3)
def test_select_columns(self):
cols_sets = (['a', 'b'], ['a', 'c'], ['b'], 'b')
for cols in cols_sets:
df = self.db.get_table(self.test_table, columns=cols)
if isinstance(cols, str):
cols = [cols]
self.assertEqual(df.shape[0], 3)
self.assertEqual(df.shape[1], len(cols))
self.assertEqual(df.columns.to_list(), cols)
def test_limit(self):
limits_sets = (0, 1, 2, 3)
for limit in limits_sets:
df = self.db.get_table(self.test_table, limit=limit)
self.assertEqual(df.shape[0], limit)
def test_select_where(self):
test_set = (dict(where="a = 1", result_set=[[1, 'b', 1], [1, 'a', 2.0]], shape=2),
dict(where="b = 'b'", result_set=[[1, 'b', 1]], shape=1),
dict(where="c is Null", result_set=[[2, 'c', None]], shape=1),
dict(where="a = 1 and b = 'b'", result_set=[[1, 'b', 1]], shape=1))
for test in test_set:
df = self.db.get_table(self.test_table, where=test['where'])
self.assertEqual(df.shape[0], test['shape'])
self.assertEqual(df.to_numpy(na_value=None).tolist(), test['result_set'])
def test_where_safety(self):
test_set = (f"a = 1; SELECT * FROM {self.test_table}",
f"'; SELECT * FROM {self.test_table} --")
for test in test_set:
self.assertRaises(DatabaseError, self.db.get_table, self.test_table, where=test)
def test_order_and_sort(self):
test_set = (('a', 'asc', [[1, 'b', 1], [1, 'a', 2.0], [2, 'c', None]]),
('b', 'asc', [[1, 'a', 2.0], [1, 'b', 1], [2, 'c', None]]),
(['a', 'b'], 'asc', [[1, 'a', 2.0], [1, 'b', 1], [2, 'c', None]]),
# sort dir
('a', 'desc', [[2, 'c', None], [1, 'b', 1], [1, 'a', 2.0]]),
('b', 'desc', [[2, 'c', None], [1, 'b', 1], [1, 'a', 2.0]]),
(['a', 'b'], 'desc', [[2, 'c', None], [1, 'b', 1], [1, 'a', 2.0]]))
for order, sort_dir, result in test_set:
df = self.db.get_table(self.test_table, order_by=order, sort_dir=sort_dir)
self.assertEqual(df.to_numpy(na_value=None).tolist(), result)
class UploadTableCase(DBTestCase):
def tearDown(self):
self.db.drop_table(self.test_table)
def test__commit_table(self):
values = pd.DataFrame({'a': [1, 2], 'b': [4, 5]})
self.db.create_empty_table(self.test_table, from_df=values)
self.db._commit_table(self.test_table, values.to_numpy().tolist(), values.columns.to_list())
table = self.db.get_table(self.test_table)
self.assertTrue(values.equals(table))
def test_upload_columns(self):
values = [[1, 2], [4, 5]]
# raise error from creating a table without column definitions
self.assertRaises(TypeError, self.db.upload_table, *(self.test_table, values, ['a', 'b']))
# creates table without columns which results in error uploading data
self.assertRaises(KeyError, self.db.upload_table, *(self.test_table, values, None))
def test_upload_df(self):
values = pd.DataFrame({'a': [1, 2], 'b': [4, 5]})
self.db.upload_table(self.test_table, values)
table = self.db.get_table(self.test_table)
self.assertTrue(values.equals(table))
def test_upload_values(self):
values = [[1, 2], [4, 5]]
columns = {'a': 'integer', 'b': 'float'}
self.db.upload_table(self.test_table, values, columns)
table = self.db.get_table(self.test_table)
self.assertEqual(values, table.to_numpy().tolist())
def test_upload_conflict(self):
values = pd.DataFrame({'a': [1, 2], 'b': [4, 5]})
self.db.upload_table(self.test_table, values)
self.assertRaises(KeyError, self.db.upload_table, self.test_table, values, on_conflict='raise')
self.db.upload_table(self.test_table, values, on_conflict='drop')
class ColumnsCase(DBTestCase):
def setUp(self):
self.db.execute(f"CREATE TABLE {self.test_table} (a integer, b text, c float)")
self.db.refresh()
def tearDown(self) -> None:
self.db.drop_table(self.test_table)
def test_add_columns(self):
self.db.add_columns(self.test_table, 'd')
self.assertRaises(psycopg2.Error, self.db.add_columns, *(self.test_table, 'd'))
self.db.add_columns(self.test_table, ['e', 'f'])
self.db.add_columns(self.test_table, {'g': 'int', 'h': 'string'})
def test_add_columns_not_null(self):
schema, table_name = self.test_table.split('.')
query = f"""SELECT column_name FROM information_schema.columns
WHERE table_schema = '{schema}'
AND table_name = '{table_name}'
AND is_nullable = 'YES';"""
self.db.add_columns(self.test_table, 'i', True)
self.assertNotIn('i', self.db.query(query)['column_name'].to_list())
self.db.add_columns(self.test_table, ['j', 'k'], True)
self.assertNotIn('j', self.db.query(query)['column_name'].to_list())
self.assertNotIn('k', self.db.query(query)['column_name'].to_list())
self.db.add_columns(self.test_table, ['l', 'm'], [True, False])
self.assertNotIn('l', self.db.query(query)['column_name'].to_list())
self.assertIn('m', self.db.query(query)['column_name'].to_list())
self.assertRaises(AssertionError, self.db.add_columns, *(self.test_table, ['n', 'o'], [True]))
def test_alter_columns(self):
self.db.execute(f"""INSERT INTO {self.test_table} VALUES (1, 'a', 1.0), (2, 'b', 4)""")
self.db.alter_columns(self.test_table, {'a': 'float'})
self.assertEqual(self.db.get_dtypes(self.test_table).to_dict(),
{'a': 'double precision', 'b': 'text', 'c': 'double precision'})
def test_alter_columns_using(self):
self.db.execute(f"""INSERT INTO {self.test_table} VALUES (1, '1', 1.0), ('2', '3', '4')""")
# not using
self.assertRaises(psycopg2.Error, self.db.alter_columns, *(self.test_table, {'b': 'integer'}))
# using
self.db.alter_columns(self.test_table, {'b': 'integer'}, using='integer')
self.assertEqual(self.db.get_dtypes(self.test_table).to_dict(),
{'a': 'integer', 'b': 'integer', 'c': 'double precision'})
# using multiple
# setup all as text
self.db.alter_columns(self.test_table, {'a': 'text', 'b': 'text', 'c': 'text'})
# fail
self.assertRaises(AssertionError, self.db.alter_columns,
*(self.test_table, {'a': 'integer', 'c': 'integer'}, ['integer']))
self.assertRaises(psycopg2.Error, self.db.alter_columns,
*(self.test_table, {'a': 'integer', 'b': 'integer'}, ['integer', 'timestamp']))
# convert multiple
self.db.alter_columns(self.test_table, {'a': 'integer', 'b': 'integer'}, ['integer', 'integer'])
self.assertEqual(self.db.get_dtypes(self.test_table).to_dict(),
{'a': 'integer', 'b': 'integer', 'c': 'text'})
self.db.alter_columns(self.test_table, {'a': 'integer', 'b': 'integer', 'c': 'integer'}, 'integer')
self.assertEqual(self.db.get_dtypes(self.test_table).to_dict(),
{'a': 'integer', 'b': 'integer', 'c': 'integer'})
def test_drop_columns(self):
self.assertRaises(psycopg2.Error, self.db.drop_columns, self.test_table, ['c', 'd'], if_exists=False)
self.db.drop_columns(self.test_table, ['c', 'd'])
self.assertNotIn('c', self.db.get_columns(self.test_table))
self.db.drop_columns(self.test_table, 'a')
self.assertNotIn('a', self.db.get_columns(self.test_table))
def test_drop_columns_cascade(self):
self.assertRaises(AssertionError, self.db.drop_columns, *(self.test_table, ['b', 'c'], [True]))
def test_rename_column(self):
self.db.rename_column(self.test_table, 'a', 'd')
self.assertIn('d', self.db.get_columns(self.test_table))
self.assertNotIn('a', self.db.get_columns(self.test_table))
self.assertRaises(psycopg2.Error, self.db.rename_column, self.test_table, 'e', 'f')
self.assertRaises(psycopg2.Error, self.db.rename_column, self.test_table, 'd', 'b')
class IndexCase(DBTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.db.create_schema('test1')
cls.db.create_schema('test2')
@classmethod
def tearDownClass(cls):
cls.db.drop_schema(['test1', 'test2'])
super().tearDownClass()
def setUp(self):
self.db.create_empty_table('test1.test', {'a': 'integer', 'b': 'float'})
self.db.create_empty_table('test1.test1', {'a': 'integer', 'b': 'float'})
self.db.create_empty_table('test2.test', {'a': 'integer', 'b': 'float'})
def tearDown(self):
self.db.drop_table(['test1.test', 'test1.test1', 'test2.test'])
def test_create(self):
self.db.create_index('test1.test', 'a')
self.db.create_index('test1.test1', ['a', 'b'])
self.assertRaises(psycopg2.Error, self.db.create_index, 'test2.test', 'c')
def test_create_with_name(self):
custom_index = 'custom_name'
query = """SELECT * FROM pg_indexes WHERE schemaname != 'pg_catalog'
AND schemaname = 'test1' AND tablename = 'test' """
self.assertEqual(self.db.query(query).shape[0], 0)
self.db.create_index('test1.test', 'a', custom_index)
self.assertEqual(self.db.query(query).shape[0], 1)
self.assertIn(custom_index, self.db.query(query)['indexname'].to_list())
def test_create_unique(self):
query = """SELECT * FROM pg_indexes WHERE schemaname != 'pg_catalog'
AND schemaname = 'test1' AND tablename = 'test' """
self.db.create_index('test1.test', 'a', unique=True)
self.assertIn('unique', self.db.query(query).loc[0, 'indexdef'].lower())
def test_create_non_unique(self):
query = """SELECT * FROM pg_indexes WHERE schemaname != 'pg_catalog'
AND schemaname = 'test1' AND tablename = 'test' """
self.db.create_index('test1.test', 'a', unique=False)
self.assertNotIn('unique', self.db.query(query).loc[0, 'indexdef'].lower())
def test_create_on_conflict(self):
self.db.create_index('test1.test', 'a')
self.db.create_index('test1.test', ['a', 'b']) # no conflict, works fine
self.assertRaises(IndexError, self.db.create_index, 'test1.test', ['a', 'b'])
self.db.create_index('test1.test', ['a', 'b'], on_conflict='drop')
def test_drop(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.drop_index('test1.custom_name')
def test_drop_cascade(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.drop_index('test1.custom_name', cascade=True)
def test_drop_return_query(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.assertEqual(self.db.drop_index('test1.custom_name'), None)
self.db.create_index('test1.test', 'a', 'custom_name')
self.assertIsInstance(self.db.drop_index('test1.custom_name', return_query=True), str)
def test_drop_no_schema(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.drop_index('custom_name')
def test_drop_no_schema_multiple_same_name(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.create_index('test2.test', 'a', 'custom_name')
self.assertRaises(IndexError, self.db.drop_index, 'custom_name')
self.db.drop_index('test2.custom_name')
def test_get(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.assertIn('custom_name', self.db.get_index('custom_name')['indexname'].to_list())
def test_get_on_schema(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.create_index('test2.test', 'a', 'custom_name')
self.assertEqual(self.db.get_index('custom_name').shape[0], 2)
self.assertEqual(self.db.get_index('custom_name', 'test1').shape[0], 1)
def test_get_all(self):
idxs = (['test1', 'test', 'custom_name'],
['test1', 'test1', 'custom_name1'],
['test2', 'test', 'custom_name'])
for schema, table, idx_name in idxs:
self.db.create_index(f'{schema}.{table}', 'a', idx_name)
idxs_read = self.db.get_indexes()[['schemaname', 'tablename', 'indexname']].to_numpy().tolist()
for test in idxs:
self.assertIn(test, idxs_read)
def test_get_all_on_table_name(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.create_index('test1.test1', 'a', 'custom_name1')
self.db.create_index('test2.test', 'a', 'custom_name')
self.assertEqual(self.db.get_indexes(table_name='test').shape[0], 2)
self.assertEqual(self.db.get_indexes(table_name='test1').shape[0], 1)
self.assertEqual(self.db.get_indexes(table_name='test3.test').shape[0], 0)
self.assertEqual(self.db.get_indexes(table_name='test1.test').shape[0], 1)
def test_get_all_on_schema(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.create_index('test1.test1', 'a', 'custom_name1')
self.db.create_index('test2.test', 'a', 'custom_name')
self.assertEqual(self.db.get_indexes(schema='test1').shape[0], 2)
self.assertEqual(self.db.get_indexes(schema='test2').shape[0], 1)
def test_get_all_on_schema_and_table(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.create_index('test1.test1', 'a', 'custom_name1')
self.db.create_index('test2.test', 'a', 'custom_name')
self.assertEqual(self.db.get_indexes(table_name='test', schema='test1').shape[0], 1)
self.assertEqual(self.db.get_indexes(table_name='test1', schema='test1').shape[0], 1)
self.assertEqual(self.db.get_indexes(table_name='test', schema='test2').shape[0], 1)
self.assertEqual(self.db.get_indexes(table_name='test1.test', schema='test3').shape[0], 1)
self.assertEqual(self.db.get_indexes(table_name='test3.test', schema='test1').shape[0], 0)
def test_get_indexes_columns_by_name(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.create_index('test1.test1', ['a', 'b'], 'custom_name1')
self.assertEqual(self.db.get_indexes_columns('custom_name').values.tolist(), [['a']])
self.assertEqual(self.db.get_indexes_columns('custom_name1').values.tolist(), [['a', 'b']])
self.assertEqual(self.db.get_indexes_columns(['custom_name', 'custom_name1']).values.tolist(),
[['a'], ['a', 'b']])
self.assertTrue(self.db.get_indexes_columns('some_other_name').empty)
def test_get_indexes_columns_by_table(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.create_index('test2.test', 'b', 'custom_name')
self.db.create_index('test1.test1', ['a', 'b'], 'custom_name1')
self.assertEqual(self.db.get_indexes_columns(table_name='test').values.tolist(), [['a'], ['b']])
self.assertEqual(self.db.get_indexes_columns(table_name='test1').values.tolist(), [['a', 'b']])
self.assertTrue(self.db.get_indexes_columns(table_name='test2').empty)
def test_get_indexes_columns_by_schema(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.create_index('test2.test', 'b', 'custom_name')
self.db.create_index('test1.test1', ['a', 'b'], 'custom_name1')
self.assertTrue(self.db.get_indexes_columns(schema='test').empty)
self.assertEqual(self.db.get_indexes_columns(schema='test1').values.tolist(), [['a'], ['a', 'b']])
self.assertEqual(self.db.get_indexes_columns(schema='test2').values.tolist(), [['b']])
def test_get_indexes_columns_by_name_table(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.create_index('test2.test', 'b', 'custom_name')
self.db.create_index('test1.test1', ['a', 'b'], 'custom_name1')
self.assertEqual(self.db.get_indexes_columns('custom_name', 'test').values.tolist(), [['a'], ['b']])
self.assertEqual(self.db.get_indexes_columns('custom_name1', 'test1').values.tolist(), [['a', 'b']])
self.assertEqual(self.db.get_indexes_columns(['custom_name', 'custom_name1'], 'test1').values.tolist(),
[['a', 'b']])
self.assertTrue(self.db.get_indexes_columns('custom_name', 'test1').empty)
self.assertTrue(self.db.get_indexes_columns(['custom_name', 'custom_name1'], 'test3').empty)
def test_get_indexes_columns_by_name_schema(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.create_index('test2.test', 'b', 'custom_name')
self.db.create_index('test1.test1', ['a', 'b'], 'custom_name1')
self.assertEqual(self.db.get_indexes_columns('custom_name', schema='test1').values.tolist(), [['a']])
self.assertEqual(self.db.get_indexes_columns('custom_name1', schema='test1').values.tolist(), [['a', 'b']])
self.assertEqual(self.db.get_indexes_columns(['custom_name', 'custom_name1'], schema='test2').values.tolist(),
[['b']])
self.assertTrue(self.db.get_indexes_columns('custom_name', schema='test').empty)
self.assertTrue(self.db.get_indexes_columns(['custom_name', 'custom_name1'], schema='test').empty)
def test_get_indexes_columns_by_name_table_schema(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.create_index('test1.test1', ['a', 'b'], 'custom_name1')
self.assertEqual(self.db.get_indexes_columns('custom_name', table_name='test',
schema='test1').values.tolist(), [['a']])
self.assertEqual(self.db.get_indexes_columns('custom_name1', table_name='test1',
schema='test1').values.tolist(), [['a', 'b']])
self.assertTrue(self.db.get_indexes_columns('custom_name2', table_name='test', schema='test1').empty)
def test_get_indexes_columns_by_table_schema(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.create_index('test2.test', 'b', 'custom_name')
self.db.create_index('test1.test1', ['a', 'b'], 'custom_name1')
self.assertEqual(self.db.get_indexes_columns(table_name='test', schema='test1').values.tolist(), [['a']])
self.assertEqual(self.db.get_indexes_columns(table_name='test', schema='test2').values.tolist(), [['b']])
self.assertEqual(self.db.get_indexes_columns(table_name='test1', schema='test1').values.tolist(), [['a', 'b']])
self.assertTrue(self.db.get_indexes_columns(table_name='test1', schema='test').empty)
class PrimaryKeysCase(DBTestCase):
def setUp(self) -> None:
self.db.execute(f"CREATE TABLE {self.test_table} (a integer, b text PRIMARY KEY, c float)")
self.db.execute(f"INSERT INTO {self.test_table} VALUES (1, 'b', 1), (3, 'a', 2.0), (2, 'c', null)")
self.db.refresh()
def tearDown(self) -> None:
self.db.drop_table(self.test_table)
def test_drop_primary_key(self):
self.assertEqual(self.db.get_constraints(self.test_table, 'p').shape[0], 1)
self.db.drop_primary_key(self.test_table)
self.db.drop_primary_key(self.test_table) # test it doesn't raise an error if it doesn't exist
def test_get_primary_key(self):
self.assertEqual(self.db.get_primary_key(self.test_table).shape[0], 1)
self.db.drop_primary_key(self.test_table)
self.assertEqual(self.db.get_primary_key(self.test_table).shape[0], 0)
def test_get_primary_key_columns(self):
self.assertEqual(self.db.get_primary_key_columns(self.test_table), ['b'])
self.assertEqual(self.db.get_primary_key_columns(self.test_table, idx=True), [2])
self.db.drop_primary_key(self.test_table)
self.assertEqual(self.db.get_primary_key_columns(self.test_table), [])
self.assertEqual(self.db.get_primary_key_columns(self.test_table, idx=True), [])
def test_set_primary_key(self):
self.db.drop_primary_key(self.test_table)
self.assertEqual(self.db.get_primary_key(self.test_table).shape[0], 0)
# set with one column
self.db.set_primary_key(self.test_table, 'b')
self.assertEqual(self.db.get_primary_key_columns(self.test_table), ['b'])
# try to set another and catch error
self.assertRaises(psycopg2.Error, self.db.set_primary_key, *(self.test_table, ['a', 'b']))
# set with on_conflict='drop'
self.db.set_primary_key(self.test_table, ['a', 'b'], on_conflict='drop')
self.assertEqual(self.db.get_primary_key_columns(self.test_table), ['a', 'b'])
def test_temporary_primary_key(self):
keys = (['a'], ['a', 'b'])
for key in keys:
existing_key = self.db.get_primary_key_columns(self.test_table)
with self.db._temporary_primary_key(key, self.test_table) as new_key:
self.assertEqual(self.db.get_primary_key_columns(self.test_table), key)
self.assertEqual(new_key, key)
self.assertEqual(self.db.get_primary_key_columns(self.test_table), existing_key)
def test_temporary_primary_key_no_existing_key(self):
self.db.drop_primary_key(self.test_table)
key = ['a']
with self.db._temporary_primary_key(key, self.test_table) as new_key:
self.assertEqual(self.db.get_primary_key_columns(self.test_table), key)
self.assertEqual(new_key, key)
self.assertEqual(self.db.get_primary_key_columns(self.test_table), [])
def test_temporary_primary_key_conflict(self):
key = ['a']
existing_key = self.db.get_primary_key_columns(self.test_table)
self.db.set_exception_handling('ignore')
with self.db._temporary_primary_key(['a'], self.test_table) as new_key:
self.db.execute(f"UPDATE {self.test_table} SET b = 'a' WHERE a = 1")
self.assertEqual(self.db.get_primary_key_columns(self.test_table), key)
self.assertEqual(new_key, key)
self.assertNotEqual(self.db.get_primary_key_columns(self.test_table), existing_key)
self.assertEqual(self.db.get_primary_key_columns(self.test_table), key)
self.db.set_exception_handling('raise')
class MiscTableCase(DBTestCase):
def setUp(self):
self.db.execute(f"CREATE TABLE {self.test_table} (a integer, b text, c float)")
self.db.execute(f"INSERT INTO {self.test_table} VALUES (1, 'b', 1), (1, 'a', 2.0), (2, 'c', null)")
self.db.refresh()
def tearDown(self):
self.db.drop_table(self.test_table)
def test_analyse_and_get_shape(self):
self.assertEqual(self.db.get_shape(self.test_table, True), (3, 3))
self.assertEqual(self.db.get_shape(self.test_table, False), (0, 3))
# check that analyse is working and that the "exact" now gets the correct number of rows
self.db.analyse(self.test_table)
self.assertEqual(self.db.get_shape(self.test_table, False), (3, 3))
def test_get_columns(self):
self.assertEqual(self.db.get_columns(self.test_table), ['a', 'b', 'c'])
def test_get_constraints(self):
# set constraints
self.db.execute(f"ALTER TABLE {self.test_table} ADD PRIMARY KEY (b)")
self.db.execute(f"ALTER TABLE {self.test_table} ADD UNIQUE (c)")
self.assertEqual(self.db.get_constraints(self.test_table).shape[0], 2)
self.assertEqual(self.db.get_constraints(self.test_table)['contype'].to_list(), ['p', 'u'])
self.assertEqual(self.db.get_constraints(self.test_table, 'primary').shape[0], 1)
self.assertNotIn('u', self.db.get_constraints(self.test_table, 'p')['contype'].to_list())
def test_get_dtypes(self):
test_set = ((None, {'a': 'integer', 'b': 'text', 'c': 'double precision'}),
(['a', 'b'], {'a': 'integer', 'b': 'text'}),
(['a', 'b', 'd'], {'a': 'integer', 'b': 'text'}),
(['a'], {'a': 'integer'}),
('a', {'a': 'integer'}))
for columns, expected in test_set:
self.assertEqual(self.db.get_dtypes(self.test_table, columns=columns).to_dict(), expected)
def test_get_na(self):
self.db.analyse(self.test_table)
self.assertEqual(self.db.get_na(self.test_table).to_dict(), {'a': 0, 'b': 0, 'c': 1})
self.assertEqual(self.db.get_na(self.test_table, ['a', 'b']).to_dict(), {'a': 0, 'b': 0})
self.assertEqual(self.db.get_na(self.test_table, 'a').to_dict(), {'a': 0})
self.assertEqual(self.db.get_na(self.test_table, ['a', 'b', 'd']).to_dict(), {'a': 0, 'b': 0})
na = self.db.get_na(self.test_table, relative=True).round(5)
expected = pd.Series({'a': 0.0, 'b': 0.0, 'c': 1/3}).round(5)
self.assertTrue(na.equals(expected))
def test_get_nunique(self):
self.assertEqual(self.db.get_nunique(self.test_table).to_dict(), {'a': 2, 'b': 3, 'c': 2})
self.assertEqual(self.db.get_nunique(self.test_table, count_null=True).to_dict(), {'a': 2, 'b': 3, 'c': 3})
self.assertEqual(self.db.get_nunique(self.test_table, ['a', 'b']).to_dict(), {'a': 2, 'b': 3})
self.assertEqual(self.db.get_nunique(self.test_table, 'a').to_dict(), {'a': 2})
self.assertEqual(self.db.get_nunique(self.test_table, ['a', 'b', 'd']).to_dict(), {'a': 2, 'b': 3})
def test_get_summary(self):
self.db.analyse(self.test_table)
summary = self.db.get_summary(self.test_table, count_null=True).round(5)
expected = pd.DataFrame([['integer', 2, 0, 0.0],
['text', 3, 0, 0.0],
['double precision', 3, 1, 1/3]],
columns=['type', 'distinct', 'missing_values', 'missing_values_per'],
index=['a', 'b', 'c']).round(5)
self.assertTrue(summary.equals(expected))
summary = self.db.get_summary(self.test_table, count_null=False).round(5)
expected = pd.DataFrame([['integer', 2, 0, 0.0],
['text', 3, 0, 0.0],
['double precision', 2, 1, 1/3]],
columns=['type', 'distinct', 'missing_values', 'missing_values_per'],
index=['a', 'b', 'c']).round(5)
self.assertTrue(summary.equals(expected))
def test_rename_table(self):
new_name = 'public.test_postgres_new'
self.db.drop_table(new_name)
self.db.rename_table(self.test_table, new_name)
self.assertIn(new_name, self.db.tables())
# check if exists
self.assertRaises(psycopg2.Error, self.db.rename_table, 'smth', 'smth_new')
self.db.rename_table('smth', 'smth_new', True)
self.db.rename_table(new_name, self.test_table)
self.assertNotIn(new_name, self.db.tables())
class DeleteRowsCase(DBTestCase):
def setUp(self):
self.db.execute(f"CREATE TABLE {self.test_table} (a integer, b text, c float)")
self.db.execute(f"INSERT INTO {self.test_table} VALUES (1, 'b', 1), (1, 'a', 2.0), (2, 'c', null)")
self.db.refresh()
def tearDown(self):
self.db.drop_table(self.test_table)
def test_delete_all(self):
self.db.delete_rows(self.test_table)
self.assertEqual(self.db.get_shape(self.test_table)[0], 0)
def test_delete_where_single(self):
test_sets = (dict(where="b = 'a'", col='b', result='a', shape=2),
dict(where="b = 'a'", col='b', result='a', shape=2), # repetition. shouldn't do anything
dict(where="c is Null", col='c', result=None, shape=1),
dict(where="a = 1", col='a', result='a', shape=0))
for test in test_sets:
self.db.delete_rows(self.test_table, where=test['where'])
self.assertNotIn(test['result'], self.db.get_table(self.test_table)[test['col']].to_list())
self.assertEqual(self.db.get_table(self.test_table).shape[0], test['shape'])
def test_delete_where_multiple(self):
self.db.delete_rows(self.test_table, where="a = 1")
self.assertNotIn(1, self.db.get_table(self.test_table)['a'].to_list())
self.assertEqual(self.db.get_table(self.test_table).shape[0], 1)
def test_delete_where_multiple_complex(self):
self.db.delete_rows(self.test_table, where="a = 1 and b = 'a' ")
self.assertNotIn([1, 'a'], self.db.get_table(self.test_table)[['a', 'b']].to_numpy().tolist())
self.assertEqual(self.db.get_table(self.test_table).shape[0], 2)
def test_check_where_safety(self):
test_set = (f"a = 1; SELECT * FROM {self.test_table}",
f"'; SELECT * FROM {self.test_table} --")
for test in test_set:
self.assertRaises(DatabaseError, self.db.delete_rows, self.test_table, where=test)
class CopyTableCase(DBTestCase):
def setUp(self):
self.db.upload_table(self.test_table, pd.DataFrame({'a': [1, 3, 2], 'b': [4, 5, 6], 'c': [0, 0, 0]}))
self.new_table_name = 'public.test_postgres1'
def tearDown(self):
self.db.drop_table(self.test_table)
self.db.drop_table(self.new_table_name)
def test_copy_all(self):
self.db.copy_table(self.test_table, self.new_table_name)
self.assertTrue(self.db.get_table(self.test_table).equals(self.db.get_table(self.new_table_name)))
def test_columns(self):
self.db.copy_table(self.test_table, self.new_table_name, columns=['a', 'b'])
old = self.db.get_table(self.test_table, columns=['a', 'b'])
new = self.db.get_table(self.new_table_name)
self.assertTrue(new.equals(old))
def test_where(self):
self.db.copy_table(self.test_table, self.new_table_name, where="a in (1, 2)")
old = self.db.get_table(self.test_table, where="a in (1, 2)")
new = self.db.get_table(self.new_table_name)
self.assertTrue(new.equals(old))
def test_structure_only(self):
self.db.copy_table(self.test_table, self.new_table_name, structure_only=True)
old_columns = self.db.get_columns(self.test_table)
new_columns = self.db.get_columns(self.new_table_name)
self.assertEqual(old_columns, new_columns)
old_dtypes = self.db.get_dtypes(self.test_table)
new_dtypes = self.db.get_dtypes(self.new_table_name)
self.assertTrue(old_dtypes.equals(new_dtypes))
self.assertTrue(self.db.get_table(self.new_table_name).empty)
def test_another_schema(self):
self.db.create_schema('test1')
self.db.copy_table(self.test_table, 'test1', destination_schema='test1')
self.assertIn('test1.test1', self.db.tables('test1'))
self.assertTrue(self.db.get_table(self.test_table).equals(self.db.get_table('test1.test1')))
self.db.drop_schema('test1', cascade=True)
class AppendTableCase(DBTestCase):
def setUp(self):
self.db.create_empty_table(self.test_table, {'a': 'integer', 'b': 'float'})
def tearDown(self):
self.db.drop_table(self.test_table)
def test__check_integrity(self):
# DataFrame checks
self.db._check_integrity(pd.DataFrame({'a': [1, 2], 'b': [4, 5]}), ['a', 'b'])
self.db._check_integrity(pd.DataFrame({'a': [1, 2], 'b': [4, 5]}), ['a'])
self.db._check_integrity(pd.DataFrame({'a': [1, 2], 'b': [4, 5]}), None)
# list of lists checks
self.db._check_integrity([[1, 2], [4, 5]], ['a', 'b'])
self.assertRaises(ValueError, self.db._check_integrity, [[1, 2], [4, 5]], ['a'])
self.assertRaises(ValueError, self.db._check_integrity, [[1, 2], [4, 5]], 'a')
def test__update_table_schema_df_columns(self):
values = pd.DataFrame({'a': [1, 2], 'b': [4, 5]})
column_sets = (['a'], ['a', 'b'], ['a', 'b', 'c'], {'a': 'integer', 'd': 'text'})
for columns in column_sets:
_values, _columns = self.db._update_table_schema(self.test_table, values, columns)
self.assertEqual(values.to_numpy().tolist(), _values)
self.assertEqual(values.columns.to_list(), _columns)
def test__update_table_schema_df_on_new_columns(self):
values = pd.DataFrame({'a': [1, 2], 'b': [4, 5], 'c': [6, 7]})
# 'raise' doesn't filter the dataframe and the error will be raised when the stmt is executed
_values, _columns = self.db._update_table_schema(self.test_table, values, [], 'raise')
self.assertEqual(values.to_numpy().tolist(), _values)
self.assertEqual(values.columns.to_list(), _columns)
_values, _columns = self.db._update_table_schema(self.test_table, values, [], 'ignore')
self.assertEqual(values[['a', 'b']].to_numpy().tolist(), _values)
self.assertEqual(['a', 'b'], _columns)
_values, _columns = self.db._update_table_schema(self.test_table, values, [], 'add')
self.assertEqual(values.to_numpy().tolist(), _values)
self.assertEqual(values.columns.to_list(), _columns)
self.assertIn('c', self.db.get_columns(self.test_table))
def test__update_table_schema_sequence_columns(self):
values = [[1, 2], [4, 5]]
column_sets = (['a', 'b'], {'a': 'integer', 'd': 'text'})
for columns in column_sets:
_values, _columns = self.db._update_table_schema(self.test_table, values, columns)
self.assertEqual(values, _values)
self.assertEqual(list(columns), _columns)
def test__update_table_schema_sequence_on_new_columns(self):
values = [[1, 2], [4, 5]]
# no new column definition
self.assertRaises(ValueError, self.db._update_table_schema, self.test_table, values, ['a', 'c'], 'add')
# new column - raise
columns = {'a': 'integer', 'd': 'text'}
_values, _columns = self.db._update_table_schema(self.test_table, values, columns, 'raise')
self.assertEqual(values, _values)
self.assertEqual(list(columns), _columns)
# new column - ignore
columns = {'a': 'integer', 'd': 'text'}
_values, _columns = self.db._update_table_schema(self.test_table, values, columns, 'ignore')
self.assertEqual(values, _values)
self.assertEqual(list(columns), _columns)
# new column - add
columns = {'a': 'integer', 'd': 'text'}
_values, _columns = self.db._update_table_schema(self.test_table, values, columns, 'add')
self.assertEqual(values, _values)
self.assertEqual(list(columns), _columns)
self.assertIn('d', self.db.get_columns(self.test_table))
def test_append_new_table(self):
values = pd.DataFrame({'a': [1, 2], 'b': [4, 5]})
self.db.append_to_table('public.test_postgres1', values)
table = self.db.get_table('public.test_postgres1')
self.assertTrue(values.equals(table))
self.db.drop_table('public.test_postgres1')
def test_append_new_table_no_column_definition(self):
values = [[1, 2], [4, 5]]
self.assertRaises(TypeError, self.db.append_to_table, 'public.test_postgres1', values, ['a', 'b'])
self.assertRaises(TypeError, self.db.append_to_table, 'public.test_postgres2', values, None)
def test_append_to_table(self):
values = pd.DataFrame({'a': [1, 2], 'b': [4, 5]})
self.db.append_to_table(self.test_table, values)
class UpdateTableCase(DBTestCase):
def setUp(self):
self.db.upload_table(self.test_table, pd.DataFrame({'a': [1, 3, 2], 'b': [4, 5, 6], 'c': [0, 0, 0]}))
def tearDown(self):
self.db.drop_table(self.test_table)
def test_table_existence(self):
self.assertRaises(psycopg2.Error, self.db.update_table, 'public.test_smth', ['b'], 1)
self.db.update_table(self.test_table, ['b'], 1)
def test_integrity(self):
self.assertRaises(IndexError, self.db.update_table, self.test_table, ['b', 'a'], 1)
self.assertRaises(IndexError, self.db.update_table, self.test_table, ['b', 'a'], [2])
self.assertRaises(IndexError, self.db.update_table, self.test_table, 'b', [1, 2])
self.assertRaises(ValueError, self.db.update_table, self.test_table, [], [])
def test_update(self):
self.db.update_table(self.test_table, 'b', 1)
self.assertEqual(self.db.get_table(self.test_table, 'b')['b'].unique().tolist(), [1])
self.db.update_table(self.test_table, ['b'], 3)
self.assertEqual(self.db.get_table(self.test_table, 'b')['b'].unique().tolist(), [3])
def test_with_expressions(self):
self.db.update_table(self.test_table, ['b'], ['b+3'])
self.assertEqual(self.db.get_table(self.test_table, 'b')['b'].to_list(), [7, 8, 9])
self.db.update_table(self.test_table, ['b', 'c'], [2, 'a+b'])
self.assertEqual(self.db.get_table(self.test_table, ['b', 'c']).to_numpy().tolist(), [[2, 8], [2, 11], [2, 11]])
def test_where(self):
self.db.update_table(self.test_table, 'b', 1, where='a=1')
self.assertEqual(self.db.get_table(self.test_table).to_numpy().tolist(), [[3, 5, 0], [2, 6, 0], [1, 1, 0]])
self.db.update_table(self.test_table, ['b', 'c'], [3, 5], where='a != 1')
self.assertEqual(self.db.get_table(self.test_table).to_numpy().tolist(), [[1, 1, 0], [3, 3, 5], [2, 3, 5]])
def test_safety(self):
injection = "SELECT * FROM public.test --"
self.assertRaises(DatabaseError, self.db.update_table, self.test_table, 'a', 1, where=f"b=4; {injection}")
self.assertRaises(DatabaseError, self.db.update_table, self.test_table, 'a', 1, where=f"'; {injection}")
self.assertRaises(DatabaseError, self.db.update_table, self.test_table, 'a', f"1; {injection}")
self.assertRaises(DatabaseError, self.db.update_table, self.test_table, ['a', 'b'], [f"1; {injection}", 2])
class UpsertTableCase(DBTestCase):
def setUp(self):
self.db.upload_table(self.test_table, pd.DataFrame({'a': [1, 3, 2], 'b': [4, 5, 6], 'c': [0, 0, 0]}))
def tearDown(self):
self.db.drop_table(self.test_table)
def test_upsert_new_table(self):
values = pd.DataFrame({'a': [1, 2], 'b': [4, 6], 'c': [1, 3]})
self.db.upsert_table('public.test_postgres1', values)
self.db.drop_table('public.test_postgres1')
def test_upsert_no_pkey(self):
values = pd.DataFrame({'a': [1, 2], 'b': [4, 6], 'c': [1, 3]})
self.assertRaises(KeyError, self.db.upsert_table, self.test_table, values)
def test_upsert_no_pkey_existing_pkey(self):
values = pd.DataFrame({'a': [7, 1, 4], 'b': [5, 6, 7], 'c': [1, 3, 4]})
self.db.set_primary_key(self.test_table, 'b')
self.db.upsert_table(self.test_table, values)
expected = pd.DataFrame({'a': [1, 7, 1, 4], 'b': [4, 5, 6, 7], 'c': [0, 1, 3, 4]})
self.assertTrue(expected.equals(self.db.get_table(self.test_table)))
def test_upsert_new_pkey(self):
values = pd.DataFrame({'a': [7, 1, 3], 'b': [5, 6, 7], 'c': [1, 3, 4]})
self.db.upsert_table(self.test_table, values, id_column_pkey='a')
expected = pd.DataFrame({'a': [2, 7, 1, 3], 'b': [6, 5, 6, 7], 'c': [0, 1, 3, 4]})
self.assertTrue(expected.equals(self.db.get_table(self.test_table)))
values = pd.DataFrame({'a': [5, 1, 4], 'b': [5, 6, 7], 'c': [1, 3, 4]})
self.db.upsert_table(self.test_table, values, id_column_pkey=['c'])
expected = pd.DataFrame({'a': [2, 5, 1, 4], 'b': [6, 5, 6, 7], 'c': [0, 1, 3, 4]})
self.assertTrue(expected.equals(self.db.get_table(self.test_table)))
class MiscCase(DBTestCase):
def test_methods_parse_schema_table(self):
methods = ['analyse', 'add_columns', 'alter_columns', 'drop_columns', 'rename_column', 'create_index',
'drop_primary_key', 'get_primary_key', 'get_primary_key_columns', 'set_primary_key',
'append_to_table', 'delete_rows', 'copy_table', 'create_empty_table', 'get_table',
'rename_table', 'update_table', 'upload_table', 'upsert_table', 'get_columns', 'get_constraints',
'get_dtypes', 'get_na', 'get_nunique', 'get_shape', 'get_summary', '_commit_table',
'_update_table_schema']
decorators = get_decorators(PostgreSQLManager)
methods_registered = [k for k, v in decorators.items() if 'parse_schema_table' in v]
self.assertEqual(sorted(methods), sorted(methods_registered))
def test_get_transactions(self):
self.assertTrue(not self.db.get_transactions().empty)
def test_get_transactions_state(self):
states = set(self.db.get_transactions('active')['state'].to_list())
self.assertEqual({'active'}, states)
if __name__ == '__main__':
unittest.main()
|
'''
Created on 05-Feb-2015
@author: Asawari.Vaidya
'''
from PythonNetBanxSDK.common.DomainObject import DomainObject
class CardExpiry(DomainObject):
'''
classdocs
'''
def __init__(self, obj):
'''
Constructor
'''
# Handler dictionary
handler = dict()
handler['month'] = self.month
handler['year'] = self.year
if obj is not None:
self.setProperties(obj, handler=handler)
else:
pass
'''
Property Month
'''
def month(self, month):
self.__dict__['month'] = month
'''
Property Year
'''
def year(self, year):
self.__dict__['year'] = year
|
#!/usr/bin/env python
# -*- coding: utf-8 -*
import logging
import sys
from sentry_sdk import init as SentryInit
from sentry_sdk import push_scope as SentryPushScope
from sentry_sdk import capture_message as SentryCaptureMessage
from sentry_sdk import capture_exception as SentryCaptureException
from sentry_sdk import configure_scope
import platform
import traceback
import copy
class CrashReport():
"""
Crash report class
"""
def __init__(self, token, product, product_version, libs_version=None, debug=False, disabled_by_core=False):
"""
Constructor
Args:
token (string): crash report service token (like sentry dsn)
product (string): product name
product_version (string): product version
libs_version (dict): important libraries versions
debug (bool): debug flag
disabled_by_core (bool): used by core to force crash report deactivation
"""
# logger
self.logger = logging.getLogger(self.__class__.__name__)
if debug:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.WARN)
# members
self.__disabled_by_core = disabled_by_core
self.__enabled = False
self.__token = token
self.__libs_version = libs_version or {}
self.__product = product
self.__product_version = product_version
# disable crash report if necessary
if self.__disabled_by_core or not token:
self.disable()
# create and configure raven client
SentryInit(
dsn=self.__token,
release=product_version,
attach_stacktrace=True,
before_send=self.__filter_exception,
default_integrations=False
)
# fill current scope
with configure_scope() as scope:
scope.set_tag('platform', platform.platform())
scope.set_tag('product', product)
scope.set_tag('product_version', product_version)
for key, value in libs_version.items():
scope.set_tag(key, value)
try:
# append more metadata for raspberry
import core.libs.tools as Tools
infos = Tools.raspberry_pi_infos()
scope.set_tag('raspberrypi_model', infos[u'model'])
scope.set_tag('raspberrypi_revision', infos[u'revision'])
scope.set_tag('raspberrypi_pcbrevision', infos[u'pcbrevision'])
except Exception as e: # pragma: no cover
self.logger.debug('Application is not running on a raspberry pi: %s' % str(e))
def __filter_exception(self, event, hint): # pragma: no cover
"""
Callback used to filter sent exception
"""
if 'exc_info' in hint:
_, exc_value, _ = hint["exc_info"]
if type(exc_value).__name__ in (u'KeyboardInterrupt', u'zmq.error.ZMQError', u'AssertionError', u'ForcedException', u'NotReady'):
self.logger.debug('Exception "%s" filtered' % type(exc_value).__name__)
return None
return event
def is_enabled(self):
"""
Returns True if crash report is enabled
Returns:
bool: True if enabled
"""
return self.__enabled
def enable(self):
"""
Enable crash report
"""
self.logger.debug('Crash report is enabled')
self.__enabled = True
def disable(self):
"""
Disable crash report
"""
self.logger.debug('Crash report is disabled')
self.__enabled = False
def report_exception(self, extra=None):
"""
Exception handler that report crashes. It automatically include stack trace
Args:
extra (dict): extra metadata to post with the report
"""
self.logger.debug('Send crash report')
if self.__enabled:
with SentryPushScope() as scope:
self.__set_extra(scope, extra)
SentryCaptureException()
def manual_report(self, message, extra=None):
"""
Report manually a crash report dumping current stack trace to report error
Args:
message (string): message to attach to crash report
extra (dict): extra metadata to post with the report
"""
self.logger.debug('Send manual report "%s": %s' % (message, extra))
if self.__enabled:
with SentryPushScope() as scope:
self.__set_extra(scope, extra)
SentryCaptureMessage(message)
def __set_extra(self, scope, more_extra={}):
"""
Set extra data to specified Sentry scope (typically )
Args:
scope: Sentry scope
"""
if isinstance(more_extra, dict) and more_extra:
for key, value in more_extra.items():
scope.set_extra(key, value)
def get_infos(self):
"""
Return infos from crash report instance
Returns:
dict: crash report infos::
{
libsversion (dict): libs version (lib: version),
product (string): product name,
productversion (string): product version
}
"""
return {
'libsversion': copy.deepcopy(self.__libs_version),
'product': self.__product,
'productversion': self.__product_version,
}
def add_module_version(self, module_name, module_version):
"""
Add module version to libs version
Args:
module_name (string): module name
module_version (string): module version
"""
self.__libs_version[module_name.lower()] = module_version
with configure_scope() as scope:
scope.set_tag(module_name, module_version)
|
import graphene
from dagster import check
from dagster.core.snap import ConfigSchemaSnapshot, ModeDefSnap
from ..util import non_null_list
from .logger import GrapheneLogger
from .resource import GrapheneResource
class GrapheneMode(graphene.ObjectType):
id = graphene.NonNull(graphene.String)
name = graphene.NonNull(graphene.String)
description = graphene.String()
resources = non_null_list(GrapheneResource)
loggers = non_null_list(GrapheneLogger)
class Meta:
name = "Mode"
def __init__(self, config_schema_snapshot, pipeline_snapshot_id, mode_def_snap):
super().__init__()
self._mode_def_snap = check.inst_param(mode_def_snap, "mode_def_snap", ModeDefSnap)
self._config_schema_snapshot = check.inst_param(
config_schema_snapshot, "config_schema_snapshot", ConfigSchemaSnapshot
)
self._pipeline_snapshot_id = pipeline_snapshot_id
def resolve_id(self, _graphene_info):
return "{pipeline}-{mode}".format(
pipeline=self._pipeline_snapshot_id, mode=self._mode_def_snap.name
)
def resolve_name(self, _graphene_info):
return self._mode_def_snap.name
def resolve_description(self, _graphene_info):
return self._mode_def_snap.description
def resolve_resources(self, _graphene_info):
return [
GrapheneResource(self._config_schema_snapshot, resource_def_snap)
for resource_def_snap in sorted(self._mode_def_snap.resource_def_snaps)
]
def resolve_loggers(self, _graphene_info):
return [
GrapheneLogger(self._config_schema_snapshot, logger_def_snap)
for logger_def_snap in sorted(self._mode_def_snap.logger_def_snaps)
]
|
"""
template loading helper
"""
from jinja2 import FileSystemLoader, Environment
template_engine = Environment(loader=FileSystemLoader("web/templates"))
def get_template(name):
"""get and return a template"""
return template_engine.get_template(name)
|
from audiobonsai import settings
from django.http import HttpResponse, HttpResponseRedirect
import json
from spotify_helper.helpers import get_spotipy_oauth
from spotify_helper.models import SpotifyUser
import spotipy
from spotipy import oauth2
# Create your views here.
def spotify_ask_user(request):
html = '<HTML><BODY>The requested operation requires permission to access your Spotify account. Click ' \
'<a href="http://' + request.get_host() + '/spotify/request_token/">here</a> to give Audio Bonsai Access. ' \
'Press the back button if you choose not to grant access.</BODY></HTML>'
return HttpResponse(html)
def spotify_request_token(request):
sp_oauth = get_spotipy_oauth(request.get_host())
return_path = None
try:
auth_user = request.user.spotifyuser
return_path = auth_user.return_path
except:
auth_user = SpotifyUser(user=request.user)
auth_user.save()
if auth_user.spotify_token is None or len(auth_user.spotify_token) == 0:
return HttpResponseRedirect(sp_oauth.get_authorize_url())
token_info = json.loads(auth_user.spotify_token.replace('\'', '"'))
if token_info is None or len(token_info) == 0:
return HttpResponseRedirect(sp_oauth.get_authorize_url())
elif sp_oauth._is_token_expired(token_info):
token_info = sp_oauth.refresh_access_token(token_info['refresh_token'])
print('Saving spotify_token as type {} from spotify_request_token'.format(type(token_info)))
auth_user.spotify_token = token_info
auth_user.save()
if return_path is None or len(return_path) == 0:
# Not sure how we got here, but follow through on login
return HttpResponseRedirect('http://' + request.get_host() + '/spotify/login')
return HttpResponseRedirect(return_path)
def spotify_login(request):
sp_oauth = get_spotipy_oauth(request.get_host())
token_info = sp_oauth.get_access_token(request.GET.dict()['code'])
try:
auth_user = request.user.spotifyuser
print('Saving spotify_token as type {} from spotify_login'.format(type(token_info)))
auth_user.spotify_token = token_info
auth_user.save()
except:
auth_user = SpotifyUser(user=request.user, spotify_token=token_info)
auth_user.save()
return HttpResponseRedirect('http://' + request.get_host() + '/spotify/confirm_access')
def spotify_confirm_access(request):
try:
auth_user = request.user.spotifyuser
return_path = auth_user.return_path
except:
return HttpResponseRedirect('http://' + request.get_host())
html='<HTML><BODY>Access has been granted (or refreshed). <meta http-equiv="refresh" content="3;url=' \
+ return_path + '"> Click <a href="' + return_path + '">here</a> to return and try your operation again ' \
'if you are not redirected shortly. Thanks for using Audio Bonsai!</BODY></HTML>'
return HttpResponse(html)
def expire_token(request):
auth_user = request.user.spotifyuser
token_info = json.loads(auth_user.spotify_token.replace('\'', '"'))
token_info['expires_at'] = token_info['expires_at'] - 15000
print('Saving spotify_token as type {} from expire_token'.format(type(token_info)))
auth_user.spotify_token = token_info
auth_user.save()
sp_oauth = oauth2.SpotifyOAuth(settings.SPOTIPY_CLIENT_ID,
settings.SPOTIPY_CLIENT_SECRET,
'http://' + request.get_host() + '/spotify/ask_user')
html='<HTML><BODY>_is_token_expired:' + str(sp_oauth._is_token_expired(token_info)) + '</BODY></HTML>'
return HttpResponse(html)
def test_conn(request):
auth_user = request.user.spotifyuser
print(auth_user.spotify_token)
token_info = json.loads(auth_user.spotify_token.replace('\'', '"'))
sp = spotipy.Spotify(auth=token_info['access_token'])
html = '<HTML><BODY>Current User:' + sp.current_user()[u'id'] + '<br/><ul>'
user_info = sp.user(sp.current_user()[u'id'])
for key in user_info.keys():
html += '<li>' + key + ': ' + str(user_info[key]) + '</li>'
html += '</ul></BODY></HTML>'
return HttpResponse(html)
|
#
# MIT 6.01 (Week 1)
# Design Lab 1. Problem 1.3.4: 2D vector arithmetic
# Define a Python class V2 , which represents two-dimensional vectors and
# supports the following operations:
# - Create a new vector out of two real numbers: v = V2(1.1, 2.2)
# - Convert a vector to a string (with the __str__ method)
# - Access the components (with the getX and getY methods)
# - Add two V2 s to get a new V2 (with add and __add__ methods)
# - Multiply a V2 by a scalar (real or int) and return a new V2
# (with the mul and __mul__ methods)
#
|
# *********************************************************************************
# REopt, Copyright (c) 2019-2020, Alliance for Sustainable Energy, LLC.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list
# of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# Neither the name of the copyright holder nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# *********************************************************************************
import json
import os
from tastypie.test import ResourceTestCaseMixin
from unittest import TestCase # have to use unittest.TestCase to get tests to store to database, django.test.TestCase flushes db
from reo.nested_to_flat_output import nested_to_flat
from reo.models import ModelManager
from reo.utilities import check_common_outputs
load_list_2 = [50]*35040
fts_post_2 = {"Scenario": {"webtool_uuid": None, "description": "", "timeout_seconds": 295, "Site": {"PV": {"pbi_years": 1.0, "macrs_bonus_pct": 0.0, "max_kw": 0.0, "pbi_max_us_dollars": 1000000000.0, "radius": 0.0, "state_ibi_pct": 0.0, "utility_rebate_max_us_dollars": 10000000000.0, "installed_cost_us_dollars_per_kw": 2000.0, "utility_ibi_max_us_dollars": 10000000000.0, "tilt": 0.537, "federal_rebate_us_dollars_per_kw": 0.0, "gcr": 0.4, "pbi_system_max_kw": 1000000000.0, "utility_ibi_pct": 0.0, "state_ibi_max_us_dollars": 10000000000.0, "state_rebate_us_dollars_per_kw": 0.0, "macrs_option_years": 5, "state_rebate_max_us_dollars": 10000000000.0, "dc_ac_ratio": 1.1, "federal_itc_pct": 0.3, "pbi_us_dollars_per_kwh": 0.0, "module_type": 0, "array_type": 1, "existing_kw": 0.0, "om_cost_us_dollars_per_kw": 16.0, "utility_rebate_us_dollars_per_kw": 0.0, "min_kw": 0.0, "losses": 0.14, "macrs_itc_reduction": 0.5, "degradation_pct": 0.005, "inv_eff": 0.96, "azimuth": 180.0}, "Generator": {"pbi_years": 0.0, "macrs_bonus_pct": 0.0, "om_cost_us_dollars_per_kwh": 0.01, "max_kw": 1000000000.0, "pbi_max_us_dollars": 0.0, "state_ibi_pct": 0.0, "fuel_intercept_gal_per_hr": 0.0125, "generator_only_runs_during_grid_outage": True, "state_rebate_us_dollars_per_kw": 0.0, "installed_cost_us_dollars_per_kw": 600.0, "utility_ibi_max_us_dollars": 0.0, "fuel_avail_gal": 1000000000.0, "min_turn_down_pct": 0.0, "pbi_system_max_kw": 0.0, "utility_ibi_pct": 0.0, "state_ibi_max_us_dollars": 0.0, "diesel_fuel_cost_us_dollars_per_gallon": 3.0, "fuel_slope_gal_per_kwh": 0.068, "utility_rebate_max_us_dollars": 0.0, "macrs_option_years": 0, "state_rebate_max_us_dollars": 0.0, "federal_itc_pct": 0.0, "existing_kw": 0.0, "pbi_us_dollars_per_kwh": 0.0, "om_cost_us_dollars_per_kw": 10.0, "utility_rebate_us_dollars_per_kw": 0.0, "min_kw": 0.0, "macrs_itc_reduction": 0.0, "federal_rebate_us_dollars_per_kw": 0.0, "generator_sells_energy_back_to_grid": False}, "LoadProfile": {"loads_kw": load_list_2,"critical_loads_kw_is_net": False, "critical_load_pct": 0.5, "loads_kw_is_net": True, "outage_end_hour": None, "monthly_totals_kwh": [], "year": 2018, "outage_start_hour": None, "outage_is_major_event": True, "critical_loads_kw": [], "annual_kwh": None}, "roof_squarefeet": None, "Storage": {"max_kwh": 0.0, "rectifier_efficiency_pct": 0.96, "total_itc_pct": 0.0, "min_kw": 0.0, "max_kw": 0.0, "replace_cost_us_dollars_per_kw": 460.0, "replace_cost_us_dollars_per_kwh": 230.0, "min_kwh": 0.0, "installed_cost_us_dollars_per_kw": 1000.0, "total_rebate_us_dollars_per_kw": 0, "installed_cost_us_dollars_per_kwh": 500.0, "inverter_efficiency_pct": 0.96, "macrs_itc_reduction": 0.5, "canGridCharge": True, "macrs_bonus_pct": 0.0, "battery_replacement_year": 10, "macrs_option_years": 7, "internal_efficiency_pct": 0.975, "soc_min_pct": 0.2, "soc_init_pct": 0.5, "inverter_replacement_year": 10}, "land_acres": None, "ElectricTariff": {"add_blended_rates_to_urdb_rate": False, "wholesale_rate_us_dollars_per_kwh": 0.0, "net_metering_limit_kw": 0.0, "interconnection_limit_kw": 100000000.0, "blended_monthly_demand_charges_us_dollars_per_kw": [20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0], "urdb_utility_name": "", "urdb_label": "", "wholesale_rate_above_site_load_us_dollars_per_kwh": 0.0, "urdb_rate_name": "custom", "urdb_response": None, "blended_annual_demand_charges_us_dollars_per_kw": 0.0, "blended_annual_rates_us_dollars_per_kwh": 0.0, "blended_monthly_rates_us_dollars_per_kwh": [0.29, 0.29, 0.29, 0.29, 0.29, 0.29, 0.29, 0.29, 0.29, 0.29, 0.29, 0.29]}, "longitude": -91.7337, "address": "", "latitude": 35.2468, "Financial": {"escalation_pct": 0.026, "offtaker_discount_pct": 0.081, "value_of_lost_load_us_dollars_per_kwh": 100.0, "analysis_years": 20, "microgrid_upgrade_cost_pct": 0.3, "offtaker_tax_pct": 0.26, "om_cost_escalation_pct": 0.025}, "Wind": {"pbi_years": 1.0, "macrs_bonus_pct": 0.0, "max_kw": 0.0, "pbi_max_us_dollars": 1000000000.0, "wind_meters_per_sec": None, "state_ibi_pct": 0.0, "state_rebate_us_dollars_per_kw": 0.0, "installed_cost_us_dollars_per_kw": 3013.0, "utility_ibi_max_us_dollars": 10000000000.0, "pressure_atmospheres": None, "pbi_system_max_kw": 1000000000.0, "utility_ibi_pct": 0.0, "state_ibi_max_us_dollars": 10000000000.0, "wind_direction_degrees": None, "size_class": "", "utility_rebate_max_us_dollars": 10000000000.0, "macrs_option_years": 5, "state_rebate_max_us_dollars": 10000000000.0, "federal_itc_pct": 0.3, "temperature_celsius": None, "pbi_us_dollars_per_kwh": 0.0, "om_cost_us_dollars_per_kw": 35.0, "utility_rebate_us_dollars_per_kw": 0.0, "min_kw": 0.0, "macrs_itc_reduction": 0.5, "federal_rebate_us_dollars_per_kw": 0.0}}, "time_steps_per_hour": 4, "user_uuid": None}}
class TestFlexibleTimeSteps(ResourceTestCaseMixin, TestCase):
def setUp(self):
super(TestFlexibleTimeSteps, self).setUp()
self.reopt_base = '/v1/job/'
self.REopt_tol = 1e-2
def get_response(self, data):
return self.api_client.post(self.reopt_base, format='json', data=data)
def test_flexible_time_steps(self):
"""
- Validation to ensure that upon entering time_steps_per_hour=1 or 4, the results of the analysis
are as expected (keeping pv and storage off to test wind module's performance)
- the output csv files dimensions (8760, 35040 etc) must also match time_steps_per_hour given as input
:return:
"""
# results for time_steps_per_hour = 1
d1 = json.load(open(os.path.join("reo", "tests", "outputs_test_flexible_time_steps_one_per_hour.json"), "r"))
c1 = nested_to_flat(d1)
# results for time_steps_per_hour = 4
response2 = self.get_response(data=fts_post_2)
self.assertHttpCreated(response2)
r2 = json.loads(response2.content)
run_uuid2 = r2.get('run_uuid')
d2 = ModelManager.make_response(run_uuid=run_uuid2)
c2 = nested_to_flat(d2['outputs'])
# Seems reasonable that the exact resiliency average will be different due to a great granularity of survival
# information in a quarter-hourly simulation vs hourly.
del c1['avoided_outage_costs_us_dollars']
del c2['avoided_outage_costs_us_dollars']
try:
check_common_outputs(self, c1, c2)
except:
print("Run {} expected outputs may have changed.".format(run_uuid2))
print("Error message with ts=1: {}".format(d1['messages']))
print("Error message with ts=4: {}".format(d2['messages']))
raise
|
import numpy as np
import matplotlib.pyplot as plt
def V(x):
return 0.5*x*x
def numerov(E,plot=0,normalize=0,returnfi=0):
#lim=5*np.sqrt(2*E)
xmin = -8.0
xmax = 8.0
ndivide = 1000
s=(xmax-xmin)/(ndivide-1)
G = np.zeros(ndivide,dtype=np.dtype('f16'))
f = np.zeros(ndivide,dtype=np.dtype('f16'))
x = np.linspace(xmin, xmax, num=ndivide,dtype=np.dtype('f16'))
nn = 0 #number of nodes
#assign initial values of phi
f[0] = 0
f[1] = 0.0001
G[0] = 2*V(x[0]) - 2*E
G[1] = 2*V(x[1]) - 2*E
for i in range(2, ndivide):
G[i] = 2*V(x[i]) - 2*E
f[i] = (-f[i-2] + 2*f[i-1] + 5.0*G[i-1]*f[i-1]*s*s/6.0 + G[i-2]*f[i-2]*s*s/12.0)/(1-G[i]*s*s/12.0)
if (f[i]*f[i-1]) < 0.0:
nn+=1
#print('E = %.10f Phimax = %.5f nn = %d'%(E,f[i],nn))
if normalize:
I=np.trapz(x=x,y=f**2)
f=f/np.sqrt(I)
if plot:
plt.plot(x,f,label='%.10f'%E)
#plt.savefig('%.10f.png'%E)
#plt.clf()
if returnfi:
return nn,f[i]
else:
return nn
#uniform sampling, found limits of nn changes
Es=np.linspace(0,7,num=200,dtype=np.dtype('f16'))
NNs=[numerov(i) for i in Es]
#print(NNs)
Pairs=[]
for i in range(1,len(NNs)):
if NNs[i]!=NNs[i-1]:
Pairs.append((Es[i-1],Es[i],NNs[i-1],NNs[i]))
epsilon=0.0001
print('With accuracy of %.10f'%epsilon)
for a,b,nn0,nn1 in Pairs:
#print(a,b,nn0,nn1)
x0=a
x1=b
_,fxnew=numerov(x0,normalize=1,returnfi=1)
while(abs(fxnew) > epsilon):
xnew=0.5*(x0+x1)
nn_new,fxnew=numerov(xnew,normalize=1,returnfi=1)
print(xnew,nn_new,fxnew)
if (nn_new == nn0):
x0 = xnew
elif (nn_new == nn1):
x1 = xnew
print('Found eigenenergy at %.10f, nu = %d'%(x0,nn0))
numerov(x0,plot=1,normalize=1)
plt.legend()
plt.xlabel(r'$x_r$')
plt.title('First several wavefunctions')
plt.savefig('sum.png')
|
#
# Uncomplicated VM Builder
# Copyright (C) 2010-2015 Canonical Ltd.
#
# See AUTHORS for list of contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import time
from VMBuilder.plugins.ubuntu.focal import Focal
from VMBuilder.util import run_cmd
import tempfile
import re
import logging
class Hirsute(Focal):
valid_flavours = { 'i386' : ['386', 'generic', 'generic-pae', 'virtual'],
'amd64' : ['generic', 'server', 'virtual'],
'lpia' : ['lpia'] }
preferred_filesystem = 'ext4'
def install_grub(self, chroot_dir, devmapfile, root_dev, kclfile):
logging.info("BEG of install_grub =====================================================")
self.install_from_template('/etc/kernel-img.conf', 'kernelimg', { 'updategrub' : self.updategrub })
# installing the kernel that is in tmplinux, if lkif (linux kernel image file)
lkif = self.context.get_setting('lkif')
lkmf = self.context.get_setting('lkmf')
lh1f = self.context.get_setting('lh1f')
lh2f = self.context.get_setting('lh2f')
logging.info('lkif=%s' % lkif)
logging.info('lkmf=%s' % lkmf)
logging.info('lh1f=%s' % lh1f)
logging.info('lh2f=%s' % lh2f)
if lkif:
self.run_in_target('apt-get', '-y', 'install', 'linux-base', 'initramfs-tools', env={ 'DEBIAN_FRONTEND' : 'noninteractive' })
self.run_in_target('rm', '-f', '/boot/{config*,initrd*,System-map*,vmlinuz*}')
self.run_in_target('mkdir', '/linux')
self.run_in_target('chmod', '+rx', '/linux')
tmplinux = self.context.get_setting('tmplinux')
if lkif:
run_cmd('rsync', '-a', lkif, '%s/linux' % chroot_dir)
if lkmf:
run_cmd('rsync', '-a', lkmf, '%s/linux' % chroot_dir)
if lh1f:
run_cmd('rsync', '-a', lh1f, '%s/linux' % chroot_dir)
if lh2f:
run_cmd('rsync', '-a', lh2f, '%s/linux' % chroot_dir)
kll = run_cmd('ls', '%s/linux/' % chroot_dir).split('\n')
r=re.compile('linux-image.*')
kfnl = list(filter(r.match,kll))
if len(kfnl) > 0:
# extract the linux version number from the filename
kfn = kfnl[0]
kvn = re.search(r'[0-9][^-]*-[0-9]*',kfn).group()
self.run_in_target('ls', '-la', '/linux/')
self.run_in_target('bash', '-c', 'dpkg -i --force-all /linux/*')
self.run_in_target('apt', '--fix-broken', 'install')
self.run_in_target('update-initramfs', '-c', '-k', kvn)
self.run_in_target('apt-mark', 'hold', 'linux-image-generic', 'linux-headers-generic')
run_cmd('rm', '-rf', tmplinux)
# select grub architecture-dependent files
arch = self.context.get_setting('arch')
arch = 'i386' # forcing an i386 target for grub
if arch == 'amd64':
target = 'x86_64-efi'
grubpkg = 'grub-efi-amd64'
grubpk2 = 'efibootmgr'
else:
target = 'i386-pc'
grubpkg = 'grub-pc'
grubpk2 = ''
self.run_in_target('apt-get', '-y', 'install', 'grub2-common', grubpkg, 'fdisk', grubpk2, env={ 'DEBIAN_FRONTEND' : 'noninteractive' })
self.run_in_target('dpkg-reconfigure', grubpkg, env={ 'UCF_FORCE_CONFFMISS' : 'Yes', 'DEBIAN_FRONTEND' : 'noninteractive' })
run_cmd('rsync', '-a', '%s%s/%s/' % (chroot_dir, self.grubroot, target), '%s/boot/grub/' % chroot_dir)
self.run_in_target('echo', '\"%s\"' % devmapfile)
self.run_in_target('cat', '/tmp/vmbuilder-grub/device.map')
self.run_in_target('ls', '-l', '/tmp/vmbuilder-grub/')
self.run_in_target('echo', '\"---\"')
self.run_in_target('echo', '\"%s\"' % root_dev)
dfoutput=run_cmd('df')
run_cmd('losetup', '-a')
run_cmd('ls', '-l', '/dev/mapper/')
for line in dfoutput.split('\n'):
if line.endswith(chroot_dir):
myloopdev = line.split(' ')[0]
self.run_in_target('ls', '-l', '/dev')
self.run_in_target('ls', '-l', '/boot')
self.run_in_target('ls', '-l', '/etc/default')
self.run_in_target('cat', '/etc/fstab')
mydrv = re.search(r'loop[0-9]+',myloopdev).group()
## run_cmd('mount', '--bind', '/dev', '%s/dev' % chroot_dir)
# run_cmd('mount', '--bind', '/proc', '%s/proc' % chroot_dir)
# run_cmd('mount', '--bind', '/sys', '%s/sys' % chroot_dir)
# run_cmd('grub-install', '--boot-directory=%s/boot' % chroot_dir, '--root-directory=%s' % chroot_dir, '/dev/%s' % mydrv)
## run_cmd('grub-install', '--boot-directory=%s/boot' % chroot_dir, '--root-directory=%s' % chroot_dir, '--target=i386-pc', '/dev/%s' % mydrv)
# run_cmd('grub-install', '--boot-directory=%s/boot' % chroot_dir, '--root-directory=%s' % chroot_dir, '--target=%s' % target, '/dev/%s' % mydrv, '--no-uefi-secure-boot')
# run_cmd('mkdir', '%s/boot/efi' % chroot_dir)
# run_cmd('mount', '')
run_cmd('grub-install', '--boot-directory=%s/boot' % chroot_dir, '--root-directory=%s' % chroot_dir, '--target=%s' % target, '/dev/%s' % mydrv, '--no-uefi-secure-boot', '--efi-directory=%s/boot/efi' % chroot_dir)
run_cmd('mount', '--bind', '/dev', '%s/dev' % chroot_dir)
## run_cmd('mount', '--bind', '/proc', '%s/proc' % chroot_dir)
## run_cmd('mount', '--bind', '/sys', '%s/sys' % chroot_dir)
self.run_in_target('touch', '/boot/grub/menu.lst')
self.run_in_target('grub-editenv', '-', 'unset', 'recordfail')
self.run_in_target('bash', '-c', 'grep -qxF \"GRUB_TERMINAL\" /etc/default/grub || echo \"GRUB_TERMINAL=console\" >> /etc/default/grub')
self.run_in_target('bash', '-c', 'grep -qxF \"GRUB_GFXMODE\" /etc/default/grub || echo \"GRUB_GFXMODE=text\" >> /etc/default/grub')
self.run_in_target('bash', '-c', 'grep -qxF \"GRUB_RECORDFAIL_TIMEOUT\" /etc/default/grub || echo \"GRUB_RECORDFAIL_TIMEOUT=0\" >> /etc/default/grub')
self.run_in_target('bash', '-c', 'grep -qxF \"GRUB_HIDDEN_TIMEOUT\" /etc/default/grub || echo \"GRUB_HIDDEN_TIMEOUT=0\" >> /etc/default/grub')
self.run_in_target('sed', '-ie', 's/\(GRUB_TERMINAL=\).*/\\1\"console\"/', '/etc/default/grub')
self.run_in_target('sed', '-ie', 's/\(GRUB_GFXMODE=\).*/\\1\"text\"/', '/etc/default/grub')
self.run_in_target('sed', '-ie', 's/\(GRUB_RECORDFAIL_TIMEOUT=\).*/\\1\"5\"/', '/etc/default/grub')
self.run_in_target('sed', '-ie', 's/\(GRUB_HIDDEN_TIMEOUT=0=\).*/\\1\"0\"/', '/etc/default/grub')
# Shut down a couple of failing daemons at boot time
self.run_in_target('systemctl', 'disable', 'systemd-timesyncd.service')
self.run_in_target(*'systemctl disable systemd-resolved.service'.split(' '))
# reading the kernel command line string to be added from the kclfile
mycl = ""
if (kclfile):
myfh = open(kclfile, "r")
if myfh:
mycl = myfh.readline()
myfh.close()
logging.debug('mycl=%s' % mycl)
mycl=mycl.rstrip("\n")
# updating /etc/default/grub
self.run_in_target('sed', '-ie', '/GRUB_CMDLINE_LINUX_DEFAULT/s/quiet\(.*\)/%s \\1/' % mycl, '/etc/default/grub')
self.run_in_target('sed', '-ie', '/GRUB_TIMEOUT=/s/=.*/=\"2\"/', '/etc/default/grub')
self.run_in_target('sed', '-ie', '/GRUB_TIMEOUT_STYLE=/s/=.*/=\"menu\"/', '/etc/default/grub')
self.run_in_target('sed', '-ie', 's/splash//', '/etc/default/grub')
self.run_in_target('sed', '-ie', 's/vt.handoff=[0-9]//', '/etc/default/grub')
self.run_in_target('cat', '/etc/default/grub')
# self.run_in_target('grub-mkconfig', '-o', '/boot/grub/grub.cfg') # same as self.run_in_target(self.updategrub)
self.run_in_target('update-grub') # same as self.run_in_target(self.updategrub)
self.run_in_target('sync')
self.run_in_target('sync')
# exit();
# try:
# run_cmd('umount', '%s/sys' % destdir)
# run_cmd('umount', '%s/proc' % destdir)
# except:
# pass
logging.info("END of install_grub =====================================================")
def install_menu_lst(self, disks):
# self.run_in_target(self.updategrub, '-y') # deprecated
self.run_in_target(self.updategrub)
self.mangle_grub_menu_lst(disks)
self.run_in_target(self.updategrub)
self.run_in_target('grub-set-default', '0')
pass
def install_kernel(self, destdir):
try:
self.run_in_target('mount', '-t', 'proc', 'proc', '/proc')
lkif = self.context.get_setting('lkif')
if not lkif: # if an image was not installed before
# run_cmd('chroot', destdir, 'apt-get', '--force-yes', '-y', 'install', self.kernel_name(), env={ 'DEBIAN_FRONTEND' : 'noninteractive' }) #deprecated
run_cmd('chroot', destdir, 'apt-get', '-y', 'install', self.kernel_name(), env={ 'DEBIAN_FRONTEND' : 'noninteractive' })
finally:
self.run_in_target('umount', '/proc')
# run_cmd('umount', '%s/sys' % destdir)
run_cmd('umount', '%s/dev' % destdir)
# run_cmd('umount', '%s/proc' % destdir)
def uses_grub2(self):
return True
def install_extras(self):
seedfile = self.context.get_setting('seedfile')
if seedfile:
self.seed(seedfile)
addpkg = self.context.get_setting('addpkg')
removepkg = self.context.get_setting('removepkg')
if not addpkg and not removepkg:
return
# cmd = ['apt-get', 'install', '-y', '--force-yes'] #deprecated
cmd = ['apt-get', 'install', '-y']
cmd += addpkg or []
cmd += ['%s-' % pkg for pkg in removepkg or []]
self.run_in_target(env={ 'DEBIAN_FRONTEND' : 'noninteractive' }, *cmd)
def update(self):
# self.run_in_target('apt-get', '-y', '--force-yes', 'update', #deprecated
# env={ 'DEBIAN_FRONTEND' : 'noninteractive' })
self.run_in_target('apt-get', '-y', 'update',
env={ 'DEBIAN_FRONTEND' : 'noninteractive' })
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2016,ๅฐๅฟๆบๅจไบบ
All rights reserved.
ๆ ่ฆ๏ผ
ๅ ๅปบ ่
๏ผไฝ่ฒ
ๅๅปบๆฅๆ๏ผ16/12/17
"""
import json
import redis
scene_redis = redis.Redis('127.0.0.1', '6579', 0, socket_timeout=2)
class RobotScene(object):
"""
็ฎก็ๆบๅจไบบ็ๅบๆฏ
"""
def __init__(self):
pass
@staticmethod
def get_scene(robot_code):
"""
ๅๅพrobot_code็ๅฝๅๅบๆฏไธๅบๆฏ่ฏฆๆ
:param robot_code:ๆบๅจไบบcode
:return:
"""
scene = scene_redis.hgetall(robot_code)
return scene
@staticmethod
def set_scene_name(robot_code, scene_name):
"""
่ฎพ็ฝฎๆบๅจไบบ็ๅบๆฏ
:param robot_code: ๆบๅจไบบcode
:param scene_name: ๅบๆฏๅ
:return:
"""
scene_redis.hset(robot_code, 'name', scene_name)
@staticmethod
def get_scene_name(robot_code):
"""
ๅๅพๅฝๅๆบๅจไบบ็ๅบๆฏๅ
:param robot_code: ๆบๅจไบบcode
:return:
"""
return scene_redis.hget(robot_code, 'name')
@staticmethod
def clear_scene_name(robot_code):
"""
ๆธ
็ฉบๅบๆฏๅ
:param robot_code: ๆบๅจ็
:return:
"""
scene_redis.delete(robot_code)
@staticmethod
def set_scene_kv(robot_code, key, value):
"""
่ฎพ็ฝฎๆบๅจไบบ็ๅบๆฏkvๅฏน
:param robot_code: ๆบๅจไบบcode
:param key: key
:param value: value
:return:
"""
scene_redis.hset(robot_code, key, value)
@staticmethod
def get_scene_kv(robot_code, key):
"""
ๅๅพๆบๅจไบบ็ๅบๆฏๆๅฎKVๅฏน
:param robot_code: ๆบๅจไบบcode
:param key: key
:return:
"""
return scene_redis.hget(robot_code, key)
|
import scrapy
from scrapy.loader import ItemLoader
from FiScrape.items import ZhArtItem, \
parse_to_os_tz
from FiScrape.search import query, start_date
from scrapy_splash import SplashRequest, SplashFormRequest
from itertools import count
from ln_meta_template import zh_user, zh_pass
class ZhSpider(scrapy.Spider):
'''
Spider for Zero Hedge.
name : 'zh'
'''
name = "zh"
allowed_domains = ['zerohedge.com']
# domain_name ='https://www.zerohedge.com'
query = query
zh_user = zh_user
zh_pass = zh_pass
http_user = 'user'
http_pass = 'userpass'
pages_to_check = 10 # This variable sets the depth of pages to crawl, if not logged in. ZH does not sort seach results by date, unless logged in.
url = f"https://www.zerohedge.com/search-content?qTitleBody={query}&page=0"
# url = f'http://localhost:8050/render.html?url=https://www.zerohedge.com/search-content?qTitleBody={query}&page=0'
script="""
function main(splash, args)
splash:on_request(function(request)
if request.url:find('css') then
request.abort()
end
end)
splash.images_enabled = false
assert(splash:go(args.url))
splash:wait((args.wait))
splash:select('button.SimplePaginator_next__15okP'):mouse_click()
splash:wait((args.wait))
return splash:html()
end
"""
next_script="""
function main(splash, args)
assert(splash:go(args.url))
splash:wait((args.wait))
splash:select('button.SimplePaginator_next__15okP'):mouse_click()
splash:wait((args.wait))
return splash:html()
end
"""
login_script = f"""
function main(splash)
local url = splash.args.url
assert(splash:go(url))
splash:wait((args.wait))
splash:set_viewport_full()
local search_input = splash:select('input[name=username]')
search_input:send_text("{zh_user}"")
local search_input = splash:select('input[name=password]')
search_input:send_text("{zh_pass}"")
splash:wait((args.wait))
local submit_button = splash:select('input[class^=BlockLogin_formSubmit__2kXfE]')
submit_button:click()
splash:wait((args.wait))
return splash:html()
end
"""
def start_requests(self):
# for url in self.start_urls:
yield SplashRequest(self.url, callback=self.parse, args={'wait': 5, 'lua_source': self.script})
# def start_requests(self):
# return SplashFormRequest.from_response(self.url, callback=self.after_login, endpoint='execute',
# args={'lua_source': self.login_script, 'wait': 5})
# # 'ua': "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36"})
def after_login(self,response):
""" This is a test function to see if login is successful."""
print('BODY START: '+response.body+' BODY END.')
### Going to film list ######
if "Username" in response.body:
self.logger.error("##Successful Login##")
def parse(self, response):
self.logger.info('Parse function called on {}'.format(response.url))
article_snippets = response.xpath('//*/div[@class="SearchResult_container__BnK-I"]')
for snippet in article_snippets:
snippet_date = snippet.xpath('.//div[@class="SearchResult_authorInfo__33M2f"]/span[2]/text()').get()
if snippet_date:
snippet_date = parse_to_os_tz(snippet_date)
if snippet_date >= start_date:
loader = ItemLoader(item=ZhArtItem(), selector=snippet)
loader.add_xpath('headline', './/a/text()')
div_a = snippet.xpath('.//div[contains(@class,"SearchResult_authorInfo__33M2f")]//text()').getall()
div_3 = snippet.xpath('.//div[3]//text()').getall()
if div_a == div_3:
standfirst = snippet.xpath('.//div[4]//text()').getall()
else:
standfirst = div_3
if standfirst:
loader.add_value('standfirst', standfirst)
tags = snippet.css('div.SearchResult_category__3FL2h::text, div.tout-tag.d-lg-flex > a::text').getall()
if tags:
loader.add_value('tags', tags)
article_url = snippet.xpath('.//a/@href').get()
loader.add_value('article_link', article_url)
# self.logger.info('Get article page url')
article_item = loader.load_item()
request = response.follow(article_url, self.parse_article, meta={'article_item': article_item})
request.meta['article_item'] = article_item
if request:
yield request
else:
yield article_item
# last_date = response.xpath('//div[@class="SearchResult_authorInfo__33M2f"]/span[2]/text()')[-1].extract()
# last_date = parse_to_os_tz(last_date)
# if last_date >= start_date:
# # Go to next search page
# for a in response.css('button.SimplePaginator_next__15okP').get():
# if a:
# yield SplashRequest(response.url, callback=self.parse, endpoint='execute', args={'lua_source': self.next_script, 'wait': 5}, dont_filter=True)
for a in count(1):
if a <= self.pages_to_check: # This number represents the depth of pages to crawl. ZH do not sort seach results by date unless logged in.
if response:
url = f"https://www.zerohedge.com/search-content?qTitleBody={self.query}&page={a}"
yield SplashRequest(url=url, callback=self.parse)
else:
break
else:
break
def parse_article(self, response):
article_item = response.meta['article_item']
loader = ItemLoader(item=article_item, response=response)
loader.add_xpath('published_date', '//header/footer/div[2]/text()')
article_item['authors'] = {}
authors = response.xpath('//article/div[3]/div[1]/p[1]/a/em/text()').get()
if authors:
authors = authors.replace('Authored by ', '')
loader.add_xpath('origin_link', '//*[@id="__next"]/div/div[5]/main/article/div[3]/div[1]/p[1]/a/@href')
elif not authors:
authors = response.xpath('//div[@class="ContributorArticleFull_headerFooter__author__2NXEq"]/text()[2]').get()
if not authors:
authors = response.xpath('//*[@id="__next"]/div/div[5]/main/article/header/footer/div[1]/div/text()').get().replace('by ', '')
if authors:
authors = authors.replace('by ', '')
if authors:
author_twitter = response.xpath('//p[last()]/em/a[contains(@href,"twitter")]/@href').get()
if author_twitter:
twitter_handle = response.xpath('//p[last()]/em/a[contains(@href,"twitter")]/text()').get()
auth = twitter_handle
article_item['authors'][f'{auth}'] = {}
article_item['authors'][f'{auth}']['author_twitter'] = author_twitter
else:
auth = authors
article_item['authors'][f'{auth}'] = {}
article_item['authors'][f'{auth}']['author_twitter'] = None
article_item['authors'][f'{auth}']['bio_link'] = None
article_item['authors'][f'{auth}']['author_position'] = None
article_item['authors'][f'{auth}']['author_bio'] = None
article_item['authors'][f'{auth}']['author_email'] = None
article_summary = response.xpath(
'//article/div[3]/div[1]/ul[1]/li/p/text() | //article/div[3]/div[1]/ul[1]/li/p/a/text() | //article/div[3]/div[1]/ul[1]/li/p/strong | //article/div[3]/div[1]/ul[1]/li/p/em').getall()
if article_summary:
loader.add_value('article_summary', article_summary)
body = response.css('div.NodeContent_body__2clki.NodeBody_container__1M6aJ')
if body:
# article_content = body.css(
# 'p > strong, p > a::text, li > p::text, li > p > a::text, li > p > strong, li > p > em, li > p > span::text, p > u, h1, h2, h3, h4, h5, p::text').getall()
article_content = response.css('div.NodeContent_body__2clki.NodeBody_container__1M6aJ').getall()
if article_content:
# article_content = [x for x in article_content if x not in article_summary]
# article_content = [x for x in article_content if authors not in x]
# article_content = [x for x in article_content if '<strong>Summary </strong>' not in x]
loader.add_value('article_content', article_content)
if body:
image_caption = body.xpath('.//figcaption/text()').getall()
else:
image_caption = response.xpath('//figcaption/text()').getall()
if image_caption:
loader.add_value('image_caption', image_caption)
article_footnote = response.css('div.read-original ::text').getall()
if article_footnote:
loader.add_value('article_footnote', article_footnote)
yield loader.load_item()
|
from flask import (abort, jsonify, g, session, render_template, redirect,
request, url_for)
from manage import app#, client
from . import main
# import self written modules from modules dir
# from ..modules import ...
@main.route('/')
def index():
return render_template('index.html') |
import subprocess
import sys
import click
def _is_clean():
result = subprocess.run(
["git", "status", "--porcelain"], capture_output=True, encoding="utf-8",
)
return result.stdout.strip() == ""
@click.command()
def cmd():
"""Check that the repository is in a clean state."""
if not _is_clean():
click.echo("git working directory/index is not clean.", err=True)
sys.exit(1)
|
import numpy as np
from AbstractNeuralNetwork import AbstractNeuralNetwork as ANN
class NNReluSigmoid(ANN):
def __init__(self):
super().__init__()
def findActivationImplementation(self, activation_type):
if activation_type == "sigmoid":
return self.sigmoid
elif activation_type == "relu":
return self.relu
else:
return self.relu
def findDerivativeOfActivationImplementation(self, activation_type):
if activation_type == "sigmoid":
return self.backwardSigmoid
elif activation_type == "relu":
return self.backwardRelu
else:
return self.backwardRelu
def sigmoid(self, Z):
A = 1 / (1+np.exp(-Z))
return A
def relu(self, Z):
A = np.maximum(0, Z)
return A
def backwardSigmoid(self, dA, Z):
s = self.sigmoid(Z)
dZ = dA * s * (1-s)
return dZ
def backwardRelu(self, dA, Z):
dZ = np.array(dA, copy=True)
dZ[Z<0] = 0
return dZ
def setDefaultActivation(self, activations=None):
if activations:
self.activations = activations
else:
size = len(self.layerdims)
self.activations = [(size-2, 'relu'), (size-1, 'sigmoid')] |
import pickle as pkl
import numpy as np
def parse_index_file(filename):
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def load_data(dataset):
# load feature matrix
with open('./data/{}_feats.pkl'.format(dataset), 'rb') as f1:
features = pkl.load(f1)
# load adjacency matrix
# with open('./data/{}_adj.pkl'.format(dataset), 'rb') as f2:
# adj = pkl.load(f2, encoding='latin1')
with open('./data/{}_new_adj.pkl'.format(dataset), 'rb') as f2:
adj = pkl.load(f2, encoding='latin1')
# load validation set and test set
val_edges = np.load('./data/{}_val_edges.npy'.format(dataset))
val_edges_false = np.load('./data/{}_val_edges_false.npy'.format(dataset))
test_edges = np.load('./data/{}_test_edges.npy'.format(dataset))
test_edges_false = np.load('./data/{}_test_edges_false.npy'.format(dataset))
# labels = np.load('./data/{}_labels.npy'.format(dataset))
with open('./data/{}_adj_train.pkl'.format(dataset), 'rb') as handle:
adj_train = pkl.load(handle)
# return new_adj, features
return adj, features, adj_train, val_edges, val_edges_false, test_edges, test_edges_false # , labels
|
from actfw_raspberrypi.edid import *
from warnings import warn
warn(DeprecationWarning("actfw is DEPRECATED. Use actfw-raspberrypi instead"))
|
'''input
6
-679 -2409 -3258 3095 -3291 -4462
21630
21630
19932
8924
21630
19288
3
3 5 -1
12
8
10
5
1 1 1 2 0
4
4
4
2
4
'''
# -*- coding: utf-8 -*-
# AtCoder Beginner Contest
# Problem C
if __name__ == '__main__':
spot_count = int(input())
spots = [0] + list(map(int, input().split())) + [0]
# See:
# https://www.youtube.com/watch?v=WFg2yJGZ2Cw
# https://img.atcoder.jp/arc093/editorial.pdf
total_distance = 0
total_cost = 0
for i in range(spot_count + 1):
total_distance += abs(spots[i + 1] - spots[i])
for j in range(1, spot_count + 1):
shortcut_cost = (abs(spots[j - 1] - spots[j]) + abs(spots[j] - spots[j + 1]))
total_cost = total_distance + abs(spots[j - 1] - spots[j + 1]) - shortcut_cost
print(total_cost)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
import time
from threading import Thread
try:
from Queue import Queue
from BeautifulSoup import BeautifulSoup
except ImportError:
from queue import Queue
from bs4 import BeautifulSoup
from lib import END
class Wiki(Thread):
def __init__(self, client, chat_window, query, wiki_q):
super(Wiki, self).__init__()
self.wiki_url = 'http://en.wikipedia.org/w/index.php?action=render&title='
self.client = client
self.chat_window = chat_window
self.prefix_line = prefix
self.query = query
self.wiki_q = wiki_q
def display_alternate_articles(self, content):
a_tags = content.findAll('a')
for tag in a_tags:
tag_line = '{} -- {}\n'.format(a_tags.index(tag) + 1, tag['href'])
self.chat_window._insert('Server', tag_line)
self.chat_window._insert('Server', 'Do you want to do a look-up on any of these?\n')
self.chat_window._insert('Server', "Enter '/WHATIS #from_above' or '/WHATIS n'\n")
return self.select_alternate_article(a_tags)
def select_alternate_article(self, links):
choice = self.make_selection()
if choice == 'n' or not choice.isdigit():
return
query = links[int(choice) - 1]['href'].split('/')[-1]
return self.make_request(query)
def make_selection(self):
start_timeout = time.time()
selection = None
while selection is None:
try:
selection = self.wiki_q.get_nowait()
except Queue.Empty:
current = time.time()
if (current - start_timeout > 300):
selection = 'n'
return selection
def make_request(self, query):
req = requests.get(self.wiki_url + query)
soup = BeautifulSoup(req.content)
p_tags = soup.findAll('p')
if any(i.text=='It may also refer to:' for i in p_tags) or len(p_tags) <= 3:
return self.display_alternate_articles(soup)
else:
return p_tags
def display_eof(self):
self.chat_window._insert('Server', 'End of File\n')
self.client.search = False
def run(self):
page = 0
page_content = self.make_request(self.query)
if page_content is None:
self.client.search = False
while self.client.search:
self.chat_window._insert('Server', '{}\n'.format(page_content[page].text))
if page == len(page_content) - 1:
self.display_eof()
self.client.search = False
else:
self.chat_window._insert('Server', 'More? (y or n)\n')
expand = self.make_selection()
if expand == 'y':
page += 1
else:
self.client.search = False
|
from itertools import product
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
import seaborn as sns
from keras import backend as K
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
from keras.layers import TimeDistributed, Input, LSTM, Lambda, Dense, Reshape, Concatenate, Dropout
from keras.models import Model
from keras.regularizers import l2
from keras.utils import plot_model
from sklearn.model_selection import train_test_split
from spektral.layers import GraphConv, GlobalAttentionPool
from spektral.utils import localpooling_filter, init_logging, log
from spektral.utils.plotting import plot_numpy
from data_generator import get_peter_graphs, get_rotation_graphs, get_input_sequences, get_targets
from graph_distances import NX_GED
def get_recerr(gl_test, gl_pred, gl_var_pred, reduced=None, nxged=None, methods=None):
if reduced is None:
n = len(gl_test)
else:
n = reduced
data = []
labels = []
# Compute the full distance matrix needed for the moving average
if 'mavg' in methods:
dm = nxged.distmat(gl_test, gl_test, symmetric=True, n_jobs=20)
else:
dm = None
if 'iid' in methods:
# i.i.d. process
# recerr distance of graph gt from the best stationary guess (the mean graph)
print('i.i.d. baseline')
if dm is None:
_, residuals_mean = nxged.get_mean(gl_test, n_jobs=20)
else:
g_medoid = np.argmin(np.sum(dm**2))
residuals_mean = dm[g_medoid]
data.append(residuals_mean)
labels.append('Mean')
if 'mart' in methods:
# Martingale process
# recerr distance of graph g(t) from the g(t-1)
print('Martingale baseline')
residuals_mart = nxged.distmat(gl_test[1:], gl_test[:-1], paired=True, n_jobs=20)
data.append(residuals_mart)
labels.append('Mart.')
if 'mavg' in methods:
# Moving average
print('moving average baseline')
residuals_mavg = []
p = 20
for t in range(n-p-1):
g_medoid = np.argmin(np.sum(dm[:, t:t+p]**2))
residuals_mavg.append(dm[g_medoid, t+p])
residuals_mavg = np.array(residuals_mavg)
data.append(residuals_mavg)
labels.append('Mov.avg.')
if 'var' in methods:
# Multivariate AR
print('VAR baseline')
residuals_var = nxged.distmat(gl_test[-len(gl_var_pred):], gl_var_pred, paired=True)
data.append(residuals_var)
labels.append('VAR')
if 'ngar' in methods:
# AR process
# recerr distance of graph gt from the gt_pred
print('NGAR')
residuals_ngar = nxged.distmat(gl_test, gl_pred, paired=True)
data.append(residuals_ngar)
labels.append('NGAR')
if 'mean' in methods and 'ngar' in methods:
# Validate the models
plt.figure()
plt.subplot(1, 2, 1)
plt.plot([0, n], [np.mean(residuals_mean)] * 2, label='E[e]')
plt.scatter(list(range(n)), residuals_mean, label='e')
plt.title('Graphical check of model assumption H0')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot([0, n], [np.mean(residuals_ngar)] * 2, label='E[e\']')
plt.scatter(list(range(n)), residuals_ngar, label='e\'')
plt.title('Graphical check of model assumption H1')
plt.legend()
# Compare models
plt.figure().add_subplot(111)
ax = sns.violinplot(data=data)
plt.ylabel('GED')
ax.set_xticklabels(labels)
plt.xticks(rotation=45)
ax.set(ylim=(0, 35))
ax.yaxis.grid()
plt.figure(figsize=[10, 4])
grid = plt.GridSpec(1, 3, wspace=0.4, hspace=0.)
sax1 = plt.subplot(grid[0, :2])
if 'mean' in methods:
plt.plot([0, n], [np.mean(residuals_mean)] * 2, c='C0')
plt.scatter(list(range(n)), residuals_mean, label='Mean', marker='.', c='C0')
if 'mart' in methods:
plt.plot([0, n], [np.mean(residuals_mart)] * 2, c='C1')
plt.scatter(list(range(1, n)), residuals_mart, label='Martingale', marker='.', c='C1')
if 'ngar' in methods:
plt.plot([0, n], [np.mean(residuals_ngar)] * 2, c='C2')
plt.scatter(list(range(n)), residuals_ngar, label='NGAR', marker='.', c='C2')
if 'mavg' in methods:
plt.plot([0, n], [np.mean(residuals_mavg)] * 2, c='C3')
plt.scatter(list(range(len(residuals_mavg))), residuals_mavg, label='M.AVG.', marker='.', c='C3')
if 'var' in methods:
plt.plot([0, n], [np.mean(residuals_var)] * 2, c='C4')
plt.scatter(list(range(len(residuals_var))), residuals_var, label='VAR', marker='.', c='C4')
plt.ylabel('GED')
plt.xlabel('Timestep')
plt.legend()
sax2 = plt.subplot(grid[0, 2], sharey=sax1)
# data = [residuals_mean, residuals_mart, residuals_ngar]
# labels = ['Mean', 'Mart.', 'NGAR']
ax = sns.violinplot(data=data, cut=0)
plt.ylabel('GED')
ax.set_xticklabels(labels)
plt.xticks(rotation=45)
if 'mean' in methods and 'ngar' in methods:
a = scipy.stats.wilcoxon(residuals_mean - residuals_ngar)
log('p-value wicoxon Mean - NGAR = {}'.format(a.pvalue))
if 'mart' in methods and 'ngar' in methods:
b = scipy.stats.wilcoxon(residuals_mart - residuals_ngar[1:])
log('p-value wicoxon Martingale - NGAR = {}'.format(b.pvalue))
# if a.pvalue < 0.05:
# plt.arrow(.05, -.75, .9, 0, color='black')
# if b.pvalue < 0.05:
# plt.arrow(1.05, -.75, .9, 0, color='black')
# plt.tight_layout()
return data, dm
# Parameters
np.random.seed(20180114) # seed for replicability
problem = 'rotation' # 'peter' or 'rotation'
rotation_type = 'simple' # 'simple' or 'dynamic'
T_main = 100000 # Length of main sequence to split randomly
T_seq = int(T_main * 0.1) # Length of sequence for testing in time
T = T_main + T_seq # Length of full sequence
distortion = .01 # Distortion of the rotation system
N = 5 # Number of nodes
F = 2 # Dimension of the node attributes
ts = 20 # Length of a single sequence (must be proportional to 2 * (complexity - N*F))
l2_reg = 5e-4 # Weight for l2 regularization
batch_size = 256 # Size of the minibatches
dropout_rate = 0.0 # Dropout rate for whole network
epochs = 2000 # Training epochs
es_patience = 20 # Patience for early stopping
log_dir = init_logging() # Directory for logging
reduced = 1000
methods = []
methods += ['iid']
methods += ['mart']
methods += ['mavg']
methods += ['var']
methods += ['ngar']
# Tuneables
complexity = [10, 15, 20, 30, 60, 110] # Complexity of the peter system
memory_order = [1, 5, 10, 20, 50, 100] # Memory order for rotation system
tuneables = [complexity if problem == 'peter' else memory_order]
for c_m_o_, in product(*tuneables):
log('Problem: {}; Complexity/Memory order: {}'.format(problem, c_m_o_))
# Create all graphs
if problem == 'peter':
nfeat, adjacency = get_peter_graphs(N, F, T, c_m_o_, distortion)
elif problem == 'rotation':
nfeat, adjacency = get_rotation_graphs(N, F, T, c_m_o_, distortion, rot_type=rotation_type)
else:
raise ValueError('Problem can be: peter, rotation')
np.savez(log_dir + '{}_{}_original_graph'.format(problem, c_m_o_),
nfeat=nfeat,
adjacency=adjacency)
# Create filters (Laplacian)
fltr = localpooling_filter(adjacency.copy())
# Create regressors and targets
adj_target = get_targets(adjacency, T, ts)
nf_target = get_targets(nfeat, T, ts)
fltr = get_input_sequences(fltr, T, ts)
node_features = get_input_sequences(nfeat, T, ts)
# Split data for sequential tests
adj_target_seq = adj_target[T_main:]
adj_target = adj_target[:T_main]
nf_target_seq = nf_target[T_main:]
nf_target = nf_target[:T_main]
adj_seq = fltr[T_main:]
fltr = fltr[:T_main]
nf_seq = node_features[T_main:]
node_features = node_features[:T_main]
# Train, test, val split (randomized)
adj_train, adj_test, \
nf_train, nf_test, \
adj_target_train, adj_target_test, \
nf_target_train, nf_target_test = train_test_split(fltr, node_features,
adj_target, nf_target,
test_size=int(T_main * 0.1))
adj_train, adj_val, \
nf_train, nf_val, \
adj_target_train, adj_target_val, \
nf_target_train, nf_target_val = train_test_split(adj_train, nf_train,
adj_target_train, nf_target_train,
test_size=int(T_main * 0.1))
matplotlib.rcParams.update({'font.size': 14})
sns.set(style='whitegrid', palette="pastel", color_codes=True)
if 'ngar' in methods:
# Model definition
# Note: TimeDistributed does not work for multiple inputs, so we need to
# concatenate X and A before feeding the layers
X_in = Input(shape=(ts, N, F))
filter_in = Input(shape=(ts, N, N))
# Convolutional block
conc1 = Concatenate()([X_in, filter_in])
gc1 = TimeDistributed(
Lambda(lambda x_:
GraphConv(128, activation='relu', kernel_regularizer=l2(l2_reg), use_bias=True)([x_[..., :-N], x_[..., -N:]])
)
)(conc1)
gc1 = Dropout(dropout_rate)(gc1)
conc2 = Concatenate()([gc1, filter_in])
gc2 = TimeDistributed(
Lambda(lambda x_:
GraphConv(128, activation='relu', kernel_regularizer=l2(l2_reg), use_bias=True)([x_[..., :-N], x_[..., -N:]])
)
)(conc2)
# pool = TimeDistributed(NodeAttentionPool())(gc2)
pool = TimeDistributed(GlobalAttentionPool(128))(gc2)
# pool = Lambda(lambda x_: K.reshape(x_, (-1, ts, N * 128)))(gc2)
# Recurrent block
lstm = LSTM(256, return_sequences=True)(pool)
lstm = LSTM(256)(lstm)
# Dense block
# dense1 = BatchNormalization()(lstm)
# dense1 = Dropout(dropout_rate)(dense1)
dense1 = Dense(256, activation='relu', kernel_regularizer=l2(l2_reg))(lstm)
# dense2 = BatchNormalization()(dense1)
# dense2 = Dropout(dropout_rate)(dense2)
dense2 = Dense(512, activation='relu', kernel_regularizer=l2(l2_reg))(dense1)
adj_out = Dense(N * N, activation='sigmoid')(dense2)
adj_out = Reshape((N, N), name='ADJ')(adj_out)
nf_out = Dense(N * F, activation='linear')(dense2)
nf_out = Reshape((N, F), name='NF')(nf_out)
# Callbacks
es_callback = EarlyStopping(monitor='val_loss', patience=es_patience, verbose=1)
mc_callback = ModelCheckpoint(log_dir + '{}_{}_best_model.h5'.format(problem, c_m_o_),
save_best_only=True, save_weights_only=True,
verbose=1)
tb_callback = TensorBoard(log_dir)
# Build model
model = Model(inputs=[X_in, filter_in],
outputs=[nf_out, adj_out])
model.compile('adam',
['mse', 'binary_crossentropy'],
metrics=['acc'])
plot_model(model,
to_file=log_dir + '{}_{}_model.png'.format(problem, c_m_o_),
show_shapes=True)
# Train model
validation_data = [[nf_val, adj_val], [nf_target_val, adj_target_val]]
model.fit([nf_train, adj_train],
[nf_target_train, adj_target_train],
epochs=epochs,
batch_size=batch_size,
validation_data=validation_data,
callbacks=[es_callback, mc_callback, tb_callback])
# Evaluate model
eval_results = model.evaluate([nf_test, adj_test],
[nf_target_test, adj_target_test],
batch_size=batch_size,
verbose=True)
log('Done\nLoss: {}\nNF loss: {}\nADJ loss: {}\nNF acc: {}\nADJ acc: {}\n'
.format(*eval_results))
log('Problem: {} Order: {} Loss: {} NF loss: {} ADJ loss: {} NF acc: {} ADJ acc: {}\n'
.format(*[problem, c_m_o_] + eval_results))
nf_pred, adj_pred = model.predict([nf_test, adj_test],
batch_size=batch_size)
adj_pred = np.round(adj_pred + 1e-6)
nf_pred_seq, adj_pred_seq = model.predict([nf_seq, adj_seq],
batch_size=batch_size)
adj_pred_seq = np.round(adj_pred_seq + 1e-6)
# Save data for later
np.savez(log_dir + '{}_{}_predicted_and_target_graphs'.format(problem, c_m_o_),
nf_target_test=nf_target_test,
adj_target_test=adj_target_test,
nf_pred=nf_pred,
adj_pred=adj_pred)
np.savez(log_dir + '{}_{}_predicted_and_target_graphs_seq'.format(problem, c_m_o_),
nf_seq=nf_seq,
adj_seq=adj_seq,
nf_target_seq=nf_target_seq,
adj_target_seq=adj_target_seq,
nf_pred_seq=nf_pred_seq,
adj_pred_seq=adj_pred_seq)
############################################################################
# PLOTS
############################################################################
# Plotting params
n_plots = 10
wspace = 0.2
hspace = 0.7
# Plot target-prediction pairs
plt.figure(figsize=(20, 3.5))
# losses = [model.evaluate([nf_test[i:i+1], adj_test[i:i+1]],
# [nf_target_test[i:i+1], adj_target_test[i:i+1]],
# verbose=False)[0]
# for i in range(nf_test.shape[0])]
idxs = np.random.permutation(nf_test.shape[0])[:n_plots]
for idx, i in enumerate(idxs):
plt.subplot(2, n_plots, idx + 1)
plot_numpy(adj_target_test[i], nf_target_test[i],
labels=False, node_size=20, layout='delaunay',
node_color=sns.color_palette('bright')[:N])
plt.subplot(2, n_plots, idx + 1 + n_plots)
plot_numpy(adj_pred[i], nf_pred[i],
labels=False, node_size=20, layout='delaunay',
node_color=sns.color_palette('bright')[:N])
plt.tight_layout()
plt.subplots_adjust(wspace=wspace, hspace=hspace)
plt.savefig(log_dir + '{}_{}_target_prediction_pairs.pdf'.format(problem, c_m_o_),
dpi=500, bbox_inches='tight')
# Plot sequence + target/prediction
plt.figure(figsize=(24, 2.5))
idx = np.random.randint(0, nf_test.shape[0])
for i in range(n_plots):
plt.subplot(1, n_plots + 2, i + 1)
plt.title('t - {}'.format(n_plots - i))
plot_numpy(adj_test[idx, i - n_plots], nf_test[idx, i - n_plots],
labels=False, node_size=20, layout='delaunay',
node_color=sns.color_palette('bright')[:N])
plt.subplot(1, n_plots + 2, n_plots + 1)
plt.title('True')
plot_numpy(adj_target_test[idx], nf_target_test[idx],
labels=False, node_size=20, layout='delaunay',
node_color=sns.color_palette('bright')[:N])
plt.subplot(1, n_plots + 2, n_plots + 2)
plt.title('Pred.')
plot_numpy(adj_pred[idx], nf_pred[idx],
labels=False, node_size=20, layout='delaunay',
node_color=sns.color_palette('bright')[:N])
plt.tight_layout()
plt.subplots_adjust(wspace=wspace, hspace=hspace)
plt.savefig(log_dir + '{}_{}_seq+target+prediction.pdf'.format(problem, c_m_o_),
dpi=500, bbox_inches='tight')
# Plot true sequence vs. purely AR prediction
plt.figure(figsize=(20, 3.5))
idx = np.random.randint(0, nf_seq.shape[0] - n_plots)
nf_seed = nf_seq[idx][None, ...]
adj_seed = adj_seq[idx][None, ...]
for i in range(n_plots):
nf_pred_, adj_pred_ = model.predict([nf_seed, adj_seed])
nf_seed = np.concatenate((nf_seed[:, 1:, ...], nf_pred_[:, None, ...]), axis=1)
adj_seed = np.concatenate((adj_seed[:, 1:, ...], adj_pred_[:, None, ...]), axis=1)
plt.subplot(2, n_plots, i + 1)
plt.title('t + {}'.format(i + 1))
plot_numpy(adj_target_seq[idx + i], nf_target_seq[idx + i],
labels=False, node_size=20, layout='delaunay',
node_color=sns.color_palette('bright')[:N])
plt.subplot(2, n_plots, i + 1 + n_plots)
plot_numpy(adj_pred_[0], nf_pred_[0],
labels=False, node_size=20, layout='delaunay',
node_color=sns.color_palette('bright')[:N])
plt.tight_layout()
plt.subplots_adjust(wspace=wspace, hspace=hspace)
plt.savefig(log_dir + '{}_{}_true_seq_v_AR_seq.pdf'.format(problem, c_m_o_),
dpi=500, bbox_inches='tight')
if 'var' in methods:
from statsmodels.tsa.vector_ar.var_model import VAR
# unroll the nodefeatures and adjacency
nfeat_unrolled = nfeat.reshape(T, -1)
adj_unrolled = adjacency.reshape(T, -1)
# deal with zero variance components
zero_var_method = 'reduced'
if zero_var_method == 'noise':
adj_tmp = adj_unrolled + np.random.randn(*adj_unrolled.shape)*.0001
elif zero_var_method == 'reduced':
variances = np.var(adj_unrolled , axis=0)
variances_null = np.where(variances==0)
variances_not_null = np.where(variances!=0)
adj_tmp = adj_unrolled[:, variances_not_null[0]]
# create sequence of vectors
x = np.concatenate((nfeat_unrolled, adj_tmp), axis=1)
# train the VAR model
T_train = T_main
x_train = x[:T_train]
model = VAR(x_train)
model_fit = model.fit(maxlags=ts)
# test the model
len_test = nf_target_seq.shape[0]
x_test = np.concatenate((nf_target_seq.reshape(len_test, -1),
adj_target_seq.reshape(len_test, -1)[:, variances_not_null[0]]),
axis=1)
x_pred = np.empty((len_test-ts, x_train.shape[1])) # the first ts vectors are the regressors
for t in range(ts, len_test):
x_pred[t-ts] = model_fit.params[0]
x_pred[t-ts] += model_fit.params[1:].transpose().dot(x_test[t-model_fit.k_ar:t][::-1].ravel())
# clip the adjacency value in {0,1}
x_pred[:, N*F:] = np.round(x_pred[:, N*F:])
# deal with zero variance components
if zero_var_method == 'noise':
x_adj = x_pred[:, N * F:]
elif zero_var_method == 'reduced':
x_adj = np.empty((x_pred.shape[0], N*N))
x_adj[:, variances_not_null[0]] = x_pred[:, N * F:]
x_adj[:, variances_null[0]] = adj_unrolled[:1, variances_null[0]].repeat(x_adj.shape[0], axis=0)
# re-assemble node features and adjacency matrices
nf_var_pred = x_pred[:, :N*F].reshape(-1, N, F)
adj_var_pred = x_adj.reshape(-1, N, N)
# Plot residual GED
nxged = NX_GED()
gl_t = NX_GED.npy_to_nx(adj_target_seq, nf_target_seq)[:reduced]
if 'ngar' in methods:
gl_p = NX_GED.npy_to_nx(adj_pred_seq, nf_pred_seq)[:reduced]
else:
gl_p = None
if 'var' in methods:
gl_var = NX_GED.npy_to_nx(adj_var_pred, nf_var_pred)[:reduced]
else:
gl_var = None
_, dissimilarity_matrix = get_recerr(gl_test=gl_t, gl_pred=gl_p, gl_var_pred=gl_var, nxged=nxged, reduced=reduced, methods=methods)
np.savez(log_dir + '{}_{}_dissimilarity_matrix_seq'.format(problem, c_m_o_),
dissimilarity_matrix=dissimilarity_matrix)
plt.tight_layout()
figs = [plt.figure(n) for n in plt.get_fignums()]
for i, fig in enumerate(figs):
fig.savefig(log_dir + '{}_{}_{}_{}.pdf'.format(problem, c_m_o_, fig, i),
dpi=500, bbox_inches='tight')
plt.close('all')
K.clear_session()
|
# Find the contiguous sub-array(containing at least one number) which has the maximum sum
"""
EXAMPLE:
INPUT : [1, 2, 3, -2, 5]
OUTPUT : 9
EXPLANATION :
Because sub-array (1,2,3,-2,5) has maximum sum among all sub-array.
For example : sub-array (1,2,3) has sum 6
sub-array (1,2,3,-2) has sum 4
sub-array (3,-2,5) has sum 6
and so on..................
Final max sum will be 9 and hence we return it
"""
"""
------------------------------IMPORTANT NOTE---------------------------------
This algorithm update the given array so if you are restricted to update the
given array see _08_i_)_Kadane's_Algorithm
"""
"""
------------------------------EXPLANATION----------------------------------------
The main idea in this algorithm is:
Each time we took 2 element (array[n] and array[n + 1]) from the given array and add them
If there summation is greater than array[n + 1] then we replace the value of array[n + 1]
with the summation.
After we finishes with updating the array we simply return the maximum among the
array
For example:
arr = [-2, 2, 3, -2]
Now the loop began:
--------------1st iteration--------------------
summation = arr[0] + arr[1] = 0
is summation > arr[1]
no so we continue without replacing
--------------2nd iteration--------------------
summation = arr[1] + arr[2] = 5
is summation > arr[2]
yes so we replace arr[2] with summation
updated arr = [-2, 2, 5, -2]
--------------3nd iteration--------------------
summation = arr[2] + arr[3] = 3
is summation > arr[3]
yes so we replace arr[3] with summation
updated arr = [-2, 2, 5, 3]
loop ends
we return max among the updated array
which is 5
"""
def max_sub_array(arr):
"""
TIME COMPLEXITY : O(n)
SPACE COMPLEXITY : O(1)
"""
for i in range(len(arr) - 1):
summation = arr[i] + arr[i + 1]
arr[i + 1] = max(summation, arr[i + 1])
return max(arr)
print(max_sub_array([-2, 2, 3, -2]))
|
import os
import base64
from datetime import datetime, timedelta
from operator import attrgetter, itemgetter
from flask import current_app
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
import sqlalchemy as sa
from sqlalchemy_continuum.plugins import FlaskPlugin
from sqlalchemy_continuum import make_versioned
from app import login #, db
#This line critical, it's required for sqlalchemy_continuum versioning:
make_versioned(plugins=[FlaskPlugin()])
#Extending a class from this class enables get_dict automatically, returning column names and field values:
class BaseModel(object):
@classmethod
def _get_keys(cls):
return db.class_mapper(cls).c.keys()
def get_dict(self):
d = {}
for k in self._get_keys():
d[k] = getattr(self, k)
return d
#Suggestion to use, and explaination of, CRUDMixin comes from https://realpython.com/python-web-applications-with-flask-part-ii/
#Customisations from original CRUDMixin:
# - added fill_with_formdata method (so it's easy to pass the class back to a form that failed validation).
# class CRUDMixin(object):
# __table_args__ = {'extend_existing': True}
# id = db.Column(db.Integer, primary_key=True)
# @classmethod
# def get_by_id(cls, id):
# if any(
# (
# isinstance(id, str) and id.isdigit(),
# isinstance(id, (int, float))
# ),
# ):
# return cls.query.get(int(id))
# return None
# @classmethod
# def create(cls, **kwargs):
# classdict = {k: v for k, v in kwargs.items() if k in cls._get_keys()}
# instance = cls(**classdict)
# return instance.save()
# def update(self, commit=True, **kwargs):
# for attr, value in kwargs.items():
# setattr(self, attr, value)
# return commit and self.save() or self
# def save(self, commit=True):
# db.session.add(self)
# if commit:
# db.session.commit()
# return self
# def delete(self, commit=True):
# db.session.delete(self)
# return commit and db.session.commit()
# @classmethod
# def fill_with_formdata(cls, formdata):
# classdict = {k: v for k, v in formdata.items() if k in cls._get_keys()}
# return cls(**classdict)
# class User(BaseModel, UserMixin, db.Model):
# __tablename__ = 'user'
# __versioned__ = {}
# id = db.Column(db.Integer, primary_key=True, autoincrement=True)
# email = db.Column(db.String(120), index=True, unique=True)
# firstname = db.Column(db.String(64), index=True, )
# surname = db.Column(db.String(64), index=True, )
# password_hash = db.Column(db.String(128))
# password_expired = db.Column(db.Boolean, default=False)
# is_admin = db.Column(db.Boolean ,default=False)
# is_active = db.Column(db.Boolean, default=True)
# token = db.Column(db.String(32), index=True, unique=True)
# token_expiration = db.Column(db.DateTime)
# # roles = db.relationship("UserRole")
# # currentorganisation_id = db.Column(db.Integer, db.ForeignKey("organisation.id"), index=True)
# # currentorganisation = db.relationship("app.models.Organisation", backref=db.backref("currentorganisation"))
# def set_password(self, password):
# self.password_hash = generate_password_hash(password)
# def check_password(self, password):
# return check_password_hash(self.password_hash, password)
# def get_token(self, expires_in=3600):
# now = datetime.utcnow()
# if self.token and self.token_expiration > now + timedelta(seconds=60):
# return self.token
# self.token = base64.b64encode(os.urandom(24)).decode('utf-8')
# self.token_expiration = now + timedelta(seconds=expires_in)
# db.session.add(self)
# return self.token
# def revoke_token(self):
# self.token_expiration = datetime.utcnow() - timedelta(seconds=1)
# @staticmethod
# def check_token(token):
# user = User.query.filter_by(token=token).first()
# if user is None or user.token_expiration < datetime.utcnow():
# return None
# return user
# def rolelist(self):
# return [r.role_id for r in self.roles]
# def can_delete(self):
# #If user is in Admin (2) role, then they can delete:
# if set(self.rolelist()).intersection(set([2])):
# return True
# else:
# return False
@login.user_loader
def load_user(id):
return None #User.query.get(int(id))
# class Announcement(BaseModel, db.Model):
# __tablename__ = 'announcement'
# __versioned__ = {}
# id = db.Column(db.Integer, primary_key=True, autoincrement=True)
# subject = db.Column(db.String(120))
# announcement = db.Column(db.Text())
# announcementdate = db.Column(db.Date)
# type = db.Column(db.Integer,default=1)
# status = db.Column(db.Integer,default=1)
# class Role(BaseModel, db.Model):
# __tablename__ = 'role'
# __versioned__ = {}
# id = db.Column(db.Integer, primary_key=True)
# rolename = db.Column(db.String(120))
# class UserRole(db.Model):
# __versioned__ = {}
# user_id = db.Column(db.Integer, db.ForeignKey(User.id), primary_key=True)
# role_id = db.Column(db.Integer, db.ForeignKey(Role.id), primary_key=True)
# # role = db.relationship(Role, backref=db.backref("role_assoc"))
# # user = db.relationship(User, backref=db.backref("user_assoc"))
# class UserOrganisation(BaseModel, db.Model):
# __tablename__ = 'user_organisation'
# __versioned__ = {}
# user_id = db.Column(db.Integer, db.ForeignKey("user.id"), primary_key=True)
# organisation_id = db.Column(db.Integer, db.ForeignKey("organisation.id"), primary_key=True)
# # user = db.relationship("app.models.User", backref=db.backref("user2_assoc"))
# # organisation = db.relationship("app.models.Organisation", backref=db.backref("userorg_assoc"))
# class Organisation(BaseModel, db.Model, CRUDMixin):
# __tablename__ = 'organisation'
# __versioned__ = {}
# id = db.Column(db.Integer, primary_key=True, autoincrement=True)
# name = db.Column(db.String(255), nullable=False)
# type = db.Column(db.SmallInteger)
# # users = db.relationship(UserOrganisation, backref=db.backref("users"))
#Lookups:
# class lkup_example(BaseModel, db.Model):
# id = db.Column(db.SmallInteger, primary_key=True, autoincrement=False)
# example = db.Column(db.String(150))
#This line critical. It configures the sqlalchemy_continuum versioning:
sa.orm.configure_mappers()
|
# Form implementation generated from reading ui file 'ic.ui'
#
# Created by: PyQt6 UI code generator 6.2.3
#
# WARNING: Any manual changes made to this file will be lost when pyuic6 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt6 import QtCore, QtGui, QtWidgets
class Ui_ic(object):
def setupUi(self, ic):
ic.setObjectName("ic")
ic.resize(692, 467)
self.verticalLayout_5 = QtWidgets.QVBoxLayout(ic)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.horizontal_layout_1 = QtWidgets.QHBoxLayout()
self.horizontal_layout_1.setObjectName("horizontal_layout_1")
self.tab_widget = QtWidgets.QTabWidget(ic)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tab_widget.sizePolicy().hasHeightForWidth())
self.tab_widget.setSizePolicy(sizePolicy)
self.tab_widget.setMaximumSize(QtCore.QSize(16777215, 400))
self.tab_widget.setObjectName("tab_widget")
self.tab_text = QtWidgets.QWidget()
self.tab_text.setObjectName("tab_text")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.tab_text)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.group_box_input = QtWidgets.QGroupBox(self.tab_text)
self.group_box_input.setObjectName("group_box_input")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.group_box_input)
self.verticalLayout_6.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.text_edit_input = QtWidgets.QTextEdit(self.group_box_input)
self.text_edit_input.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.text_edit_input.setObjectName("text_edit_input")
self.verticalLayout_6.addWidget(self.text_edit_input)
self.verticalLayout_2.addWidget(self.group_box_input)
self.tab_widget.addTab(self.tab_text, "")
self.tab_document = QtWidgets.QWidget()
self.tab_document.setObjectName("tab_document")
self.tab_widget.addTab(self.tab_document, "")
self.horizontal_layout_1.addWidget(self.tab_widget)
self.group_box_stats = QtWidgets.QGroupBox(ic)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.group_box_stats.sizePolicy().hasHeightForWidth())
self.group_box_stats.setSizePolicy(sizePolicy)
self.group_box_stats.setMaximumSize(QtCore.QSize(16777215, 400))
self.group_box_stats.setObjectName("group_box_stats")
self.verticalLayout = QtWidgets.QVBoxLayout(self.group_box_stats)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.text_edit_stats = QtWidgets.QTextEdit(self.group_box_stats)
self.text_edit_stats.setObjectName("text_edit_stats")
self.verticalLayout.addWidget(self.text_edit_stats)
self.horizontal_layout_1.addWidget(self.group_box_stats)
self.verticalLayout_5.addLayout(self.horizontal_layout_1)
self.horizontal_layout_2 = QtWidgets.QHBoxLayout()
self.horizontal_layout_2.setObjectName("horizontal_layout_2")
self.vertical_layout_2 = QtWidgets.QVBoxLayout()
self.vertical_layout_2.setObjectName("vertical_layout_2")
self.group_box_options = QtWidgets.QGroupBox(ic)
self.group_box_options.setObjectName("group_box_options")
self.verticalLayout_8 = QtWidgets.QVBoxLayout(self.group_box_options)
self.verticalLayout_8.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.form_layout_options = QtWidgets.QFormLayout()
self.form_layout_options.setLabelAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignTrailing|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.form_layout_options.setFormAlignment(QtCore.Qt.AlignmentFlag.AlignHCenter|QtCore.Qt.AlignmentFlag.AlignTop)
self.form_layout_options.setObjectName("form_layout_options")
self.label_lang = QtWidgets.QLabel(self.group_box_options)
self.label_lang.setObjectName("label_lang")
self.form_layout_options.setWidget(0, QtWidgets.QFormLayout.ItemRole.LabelRole, self.label_lang)
self.combo_box_lang = QtWidgets.QComboBox(self.group_box_options)
self.combo_box_lang.setObjectName("combo_box_lang")
self.form_layout_options.setWidget(0, QtWidgets.QFormLayout.ItemRole.FieldRole, self.combo_box_lang)
self.label_max_key_length = QtWidgets.QLabel(self.group_box_options)
self.label_max_key_length.setObjectName("label_max_key_length")
self.form_layout_options.setWidget(1, QtWidgets.QFormLayout.ItemRole.LabelRole, self.label_max_key_length)
self.spin_box_max_key_length = QtWidgets.QSpinBox(self.group_box_options)
self.spin_box_max_key_length.setMinimum(1)
self.spin_box_max_key_length.setMaximum(100)
self.spin_box_max_key_length.setSingleStep(1)
self.spin_box_max_key_length.setProperty("value", 20)
self.spin_box_max_key_length.setObjectName("spin_box_max_key_length")
self.form_layout_options.setWidget(1, QtWidgets.QFormLayout.ItemRole.FieldRole, self.spin_box_max_key_length)
self.label_delta = QtWidgets.QLabel(self.group_box_options)
self.label_delta.setObjectName("label_delta")
self.form_layout_options.setWidget(2, QtWidgets.QFormLayout.ItemRole.LabelRole, self.label_delta)
self.double_spin_box_delta = QtWidgets.QDoubleSpinBox(self.group_box_options)
self.double_spin_box_delta.setDecimals(3)
self.double_spin_box_delta.setMaximum(0.1)
self.double_spin_box_delta.setSingleStep(0.001)
self.double_spin_box_delta.setProperty("value", 0.001)
self.double_spin_box_delta.setObjectName("double_spin_box_delta")
self.form_layout_options.setWidget(2, QtWidgets.QFormLayout.ItemRole.FieldRole, self.double_spin_box_delta)
self.check_box_custom_key_length = QtWidgets.QCheckBox(self.group_box_options)
self.check_box_custom_key_length.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.check_box_custom_key_length.sizePolicy().hasHeightForWidth())
self.check_box_custom_key_length.setSizePolicy(sizePolicy)
self.check_box_custom_key_length.setMinimumSize(QtCore.QSize(0, 0))
self.check_box_custom_key_length.setBaseSize(QtCore.QSize(0, 0))
self.check_box_custom_key_length.setObjectName("check_box_custom_key_length")
self.form_layout_options.setWidget(3, QtWidgets.QFormLayout.ItemRole.LabelRole, self.check_box_custom_key_length)
self.spin_box_custom_key_length = QtWidgets.QSpinBox(self.group_box_options)
self.spin_box_custom_key_length.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.spin_box_custom_key_length.sizePolicy().hasHeightForWidth())
self.spin_box_custom_key_length.setSizePolicy(sizePolicy)
self.spin_box_custom_key_length.setMinimumSize(QtCore.QSize(0, 0))
self.spin_box_custom_key_length.setMinimum(1)
self.spin_box_custom_key_length.setMaximum(1000000)
self.spin_box_custom_key_length.setProperty("value", 4)
self.spin_box_custom_key_length.setObjectName("spin_box_custom_key_length")
self.form_layout_options.setWidget(3, QtWidgets.QFormLayout.ItemRole.FieldRole, self.spin_box_custom_key_length)
self.verticalLayout_8.addLayout(self.form_layout_options)
self.vertical_layout_2.addWidget(self.group_box_options)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Expanding)
self.vertical_layout_2.addItem(spacerItem)
self.horizontal_layout_2.addLayout(self.vertical_layout_2)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontal_layout_2.addItem(spacerItem1)
self.vertical_layout_1 = QtWidgets.QVBoxLayout()
self.vertical_layout_1.setObjectName("vertical_layout_1")
self.horizontal_layout_3 = QtWidgets.QHBoxLayout()
self.horizontal_layout_3.setObjectName("horizontal_layout_3")
self.button_analysis = QtWidgets.QPushButton(ic)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Fixed, QtWidgets.QSizePolicy.Policy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.button_analysis.sizePolicy().hasHeightForWidth())
self.button_analysis.setSizePolicy(sizePolicy)
self.button_analysis.setMinimumSize(QtCore.QSize(100, 30))
self.button_analysis.setObjectName("button_analysis")
self.horizontal_layout_3.addWidget(self.button_analysis)
self.vertical_layout_1.addLayout(self.horizontal_layout_3)
spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Expanding)
self.vertical_layout_1.addItem(spacerItem2)
self.horizontal_layout_2.addLayout(self.vertical_layout_1)
self.verticalLayout_5.addLayout(self.horizontal_layout_2)
spacerItem3 = QtWidgets.QSpacerItem(20, 57, QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Expanding)
self.verticalLayout_5.addItem(spacerItem3)
self.retranslateUi(ic)
self.tab_widget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(ic)
def retranslateUi(self, ic):
_translate = QtCore.QCoreApplication.translate
ic.setWindowTitle(_translate("ic", "Form"))
self.group_box_input.setTitle(_translate("ic", "Input text"))
self.tab_widget.setTabText(self.tab_widget.indexOf(self.tab_text), _translate("ic", "Text"))
self.tab_widget.setTabText(self.tab_widget.indexOf(self.tab_document), _translate("ic", "Document"))
self.group_box_stats.setTitle(_translate("ic", "Statistics"))
self.group_box_options.setTitle(_translate("ic", "Options"))
self.label_lang.setText(_translate("ic", "Language"))
self.label_max_key_length.setText(_translate("ic", "Max key length"))
self.label_delta.setText(_translate("ic", "Delta error"))
self.check_box_custom_key_length.setText(_translate("ic", "Custom key length"))
self.button_analysis.setText(_translate("ic", "Analysis"))
|
#!/usr/bin/env python
# coding=utf-8
import json
from logging import getLogger
import redis
logger = getLogger(__name__)
def get_pending_task_list():
r = redis.Redis()
raw_task_info_list = r.lrange("celery", -100, 100)
task_info_list = list()
for raw_task_info in raw_task_info_list:
j = json.loads(raw_task_info)
task_info = {
"id": j["headers"]["id"],
"info": j["headers"]["argsrepr"],
}
task_info_list.append(task_info)
return task_info_list
|
# -*- coding: utf-8 -*-
import requests , os , time , datetime , re
from bs4 import BeautifulSoup
W = '\033[0m' # white (default)
R = '\033[1;31m' # red
G = '\033[1;32m' # green bold
O = '\033[1;33m' # orange
B = '\033[1;34m' # blue
P = '\033[1;35m' # purple
C = '\033[1;36m' # cyan
GR = '\033[1;37m' # gray
os.system('clear')
def banner():
print (R+"โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ"+C+"โโ")
print ("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ"+R+"โโ"+C+" โ"+R)
print (" โโ"+C+" โ"+W)
print (W+" โโโ โโโ โโโโโโโโโโ โโโ "+R+"โโ"+C+" โ"+W)
print (W+" โโโ โโโ โโโโโโโโโโโโโโโโ "+R+"โโ"+C+" โ"+W)
print (W+" โโโ โโโโโโโ โโโโโโโโโโโ "+R+"โโ"+C+" โ"+W)
print (W+" โโโ โโโโโโโ โโโโโโโ โโโ "+R+"โโ"+C+" โ"+W)
print (W+" โโโโโโโโโโโ โโโโโโโโโโโ โโโ "+R+"โโ"+C+" โ"+W)
print (W+" โโโโโโโโโโโ โโโโโโโโโโโ โโโ "+R+"โโ"+C+" โ"+R)
print (" โโ"+C+" โ"+R)
print ("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ"+C+" โ")
print ("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
print (W+"")
def banner_dl():
print (R+"โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ"+C+"โโ")
print ("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ"+R+"โโ"+C+" โ"+R)
print (" โโ"+C+" โ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
print (W+" โโโ โโโ โโโโโโโโโโ โโโ "+R+"โโ"+C+" โ"+W)
print (W+" โโโ โโโ โโโโโโโโโโโโโโโโ "+R+"โโ"+C+" โ"+W+" ", time.ctime())
print (W+" โโโ โโโโโโโ โโโโโโโโโโโ "+R+"โโ"+C+" โ"+W+" ", judul)
print (W+" โโโ โโโโโโโ โโโโโโโ โโโ "+R+"โโ"+C+" โ"+W+" http://149.56.24.226")
print (W+" โโโโโโโโโโโ โโโโโโโโโโโ โโโ "+R+"โโ"+C+" โ"+W)
print (W+" โโโโโโโโโโโ โโโโโโโโโโโ โโโ "+R+"โโ"+C+" โ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโ"+R)
print (" โโ"+C+" โ"+W+" https://github.com/N1ght420"+R)
print ("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ"+C+" โ")
print ("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
print (W+"")
banner()
a = input(C+" Judul "+R+"> "+W)
os.system('clear')
payload = {"s":a}
req = requests.get("http://149.56.24.226/", params=payload).text
soup = BeautifulSoup(req, "html.parser")
linknya = soup.find_all('h2')
link = linknya[2]
try:
judul = re.search(r'<a href="http://149.56.24.226/(.*)/" rel="bookmark"', str(link)).group(1)
except AttributeError:
judul = re.search(r'<a href="http://149.56.24.226/(.*)/" rel="bookmark"', str(link))
try:
banner_dl()
print (C+" ["+W+" JUDUL "+C+"]"+R+" >"+W+" ",str(judul))
print ("")
dload = "http://dl.sharemydrive.xyz/get/" + judul
bpass = "http://dl.sharemydrive.xyz/verifying.php"
data = {"Content-Type":"application/x-www-form-urlencoded; charset=UTF-8",
"Accept":"*/*",
"X-Requested-With":"XMLHttpRequest"}
payload2 = {"slug":judul}
req2 = requests.post(bpass, headers=data, params=payload2).text
soup2 = BeautifulSoup(req2, "html.parser")
linkdownload = soup2.find_all('a')
p360 = re.findall(r'btn-360" href="(.*)" rel=', str(linkdownload))
if len(p360) > 0:
for laz1 in p360:
print(C+" ["+W+" 360 P "+C+"]"+R+" >"+W+" ",laz1)
p480 = re.findall(r'btn-480" href="(.*)" rel=', str(linkdownload))
if len(p480) > 0:
for laz2 in p480:
print(C+" ["+W+" 480 P "+C+"]"+R+" >"+W+" ",laz2)
p720 = re.findall(r'btn-720" href="(.*)" rel=', str(linkdownload))
if len(p720) > 0:
for laz3 in p720:
print(C+" ["+W+" 720 P "+C+"]"+R+" >"+W+" ",laz3)
p1080 = re.findall(r'btn-1080" href="(.*)" rel=', str(linkdownload))
if len(p1080) > 0:
for laz4 in p1080:
print(C+" ["+W+" 1080P "+C+"]"+R+" >"+W+" ",laz4)
except:
print (C+" ["+W+" 404 "+C+"]"+R+" >"+W+" Film tidak ditemukan")
print ("")
|
'''
Problem Link - https://www.codechef.com/JULY20B/problems/ADAKING
'''
from sys import *
from math import *
from collections import *
mod = 1000000007
def get_array(): return list(map(int, stdin.readline().split()))
def get_ints(): return map(int, stdin.readline().split())
def get_int(): return int(stdin.readline())
def get_input(): return stdin.readline().strip()
def main():
tc = get_int()
while(tc):
K = get_int()
dots = '.' * (K-1)
cross = 'X' * (64-K)
O = 'O'
chess_board = dots + O + cross
for i in range(0,64,8):print(chess_board[i:i+8])
tc-=1
if __name__ == "__main__":
main()
|
from urllib import request
import lxml
from lxml import html
import Utils as utils
import re
from data_amount import *
import data_amount as data_amount
def prune(url, browse_boolean):
"""
This function will return true when the graph should continue searching
and false when the graph should stop.
"""
url = url.lower().strip()
if 'linkedin'in url:
return False
elif 'twitter' in url:
return False
elif 'socrata' in url:
return False
elif 'login' in url:
return False
elif 'mail' in url:
return False
else:
return ((browse_boolean and (('browse' not in url) or ('page' in url))) or not browse_boolean)
def get_all_dataset_pages(url, add_to_url, max_ds_num):
"""
This function will get all urls ending in '/browse?limitTo=datasets&utf8%2Fbrowse%3FlimitTo=datasets&utf8=&page=##'
"""
range_of_ds = range(1,(max_ds_num + 20)//10)
ds_pages = []
for i in range_of_ds:
ds_pages += [(url + add_to_url + str(i))]
return ds_pages
def search(url, add_to_url = '/browse?limitTo=datasets&utf8%2Fbrowse%3FlimitTo=datasets&utf8=&page=', browse_boolean = False, data_set_list = []):
"""
This function will search through the
web graph in order to find the datasets
"""
ds_len = data_amount.main(url)
url = url + add_to_url
url_list = []
dataset_pages = get_all_dataset_pages(url, add_to_url, ds_len)
pattern = re.compile(r'/([a-z]|[0-9]){4}-([a-z]|[0-9]){4}')
for url in dataset_pages:
try:
http_response = request.urlopen(url)
open_response = http_response.read().decode('utf-8')
raw_html = lxml.html.fromstring(open_response)
except:
continue
for link in raw_html.xpath('//a/@href'):
if '#' in link:
continue
if ('limit' in link and add_to_url == ''):
continue
if link[-1:] == '#':
continue
if link in url:
continue
if "http" not in link:
link = url + link
else:
if pattern.match(link[-10:]) is not None and 'socrata' not in link.lower():
print("Made it:{}".format(link))
if link in data_set_list:
continue
else:
data_set_list += [link]
continue
return data_set_list
def main(url = 'https://data.sfgov.org'):
return search(url)
if __name__ == '__main__':
main()
|
import kaldi_io
import numpy as np
import argparse
import sys
import os
import math
if sys.version > '3':
import pickle
else:
import cPickle as pickle
from tqdm import tqdm
def ctc_len(label):
extra = 0
for i in range(len(label)-1):
if label[i] == label[i+1]:
extra += 1
return len(label) + extra
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="convert to pickle")
parser.add_argument("scp", type=str)
parser.add_argument("label", type=str)
parser.add_argument("weight", type=str)
parser.add_argument("chunk_size", type=int, default=40)
parser.add_argument("pickle_path", type=str)
args = parser.parse_args()
label_dict = {}
with open(args.label, encoding="utf-8") as f:
lines = f.readlines()
for line in lines:
sp = line.split()
label_dict[sp[0]] = np.asarray([int(x) for x in sp[1:]])
weight_dict = {}
with open(args.weight, encoding="utf-8") as f:
lines = f.readlines()
for line in lines:
sp = line.split()
weight_dict[sp[0]] = np.asarray([float(sp[1])])
dataset_dict={}
with open(args.scp, encoding="utf-8") as f:
lines = f.readlines()
for line in tqdm(lines):
key, value = line.split()
label = label_dict[key]
weight = weight_dict[key]
feature = kaldi_io.read_mat(value)
feature = np.asarray(feature)
if feature.shape[0] < ctc_len(label):
#print('{} is too short'.format(key))
continue
cate = str(math.ceil(feature.shape[0]/args.chunk_size))+'.pkl'
if cate in dataset_dict:
dataset_dict[cate].append([key, value, label, weight])
else:
dataset = []
dataset.append([key, value, label, weight])
dataset_dict[cate] = dataset
for k,v in dataset_dict.items():
pkl_file = open(os.path.join(args.pickle_path, k), 'wb')
pickle.dump(v, pkl_file, 2)
|
# imports
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
from src.visualization_description.descriptive_tool import DescribeData
update_report_figs = False
fig_path_report = '../Thesis-report/00_figures/describe_data/'
# reading data
df = pd.read_csv('data/processed/taiwanese_credit.csv')
desc = DescribeData(data = df,
y_name = "default_next_month",
a_name = 'sex',
id_name = 'id',
data_name = 'Taiwanese Credit Score')
desc.descriptive_table_to_tex(target_tex_name="Defaulted")
desc.plot_positive_rate(title = 'Percentage of Defaulters', orientation='v')
desc.plot_n_target_across_sens_var(
orientation='v',
return_ax=True,
**{"class_0_label":'Not Defaulted', "class_1_label":'Defaulted'}
)
if update_report_figs:
plt.savefig(fig_path_report+'taiwanese_N_by_sex.pdf', bbox_inches='tight')
|
import hashlib
import json
import requests
import shutil
from judge_pics import judge_pics
from dateutil import parser
from lxml import html
token = ""
if token:
headers = {"Authorization": "Token %s" % token}
else:
print(
"Warning: No CourtListener token used. You'll run out of free queries to the API quickly."
)
headers = {}
def granular_date(d, granularity):
if not d:
return ""
d = parser.parse(d).date()
GRANULARITY_YEAR = "%Y"
GRANULARITY_MONTH = "%Y-%m"
GRANULARITY_DAY = "%Y-%m-%d"
if granularity == GRANULARITY_DAY:
return "-" + d.strftime("%Y-%m-%d")
elif granularity == GRANULARITY_MONTH:
return "-" + d.strftime("%Y-%m")
elif granularity == GRANULARITY_YEAR:
return "-" + d.strftime("%Y")
def make_slug(name):
"""Hit our search engine and get back a good result. Look that up in the
People endpoint to get glorius metadata.
We start with a full name, so we plug that in.
"""
# Drop middle initials
name = name.lower()
name = " ".join(
[n.replace(",", "") for n in name.split() if not n.endswith(".")]
)
result_json = requests.get(
"https://www.courtlistener.com/api/rest/v3/search/?type=p&name=%s&court=cand"
% name,
headers=headers,
).json()
if result_json["count"] > 1:
print(
"Warning: Got back %s results for %s"
% (
result_json["count"],
name,
)
)
return None
if result_json["count"] < 1:
print("Warning: Got back no results for %s" % name)
name_parts = name.split()
if len(name_parts) == 2:
return "%s-%s" % (name_parts[1].lower(), name_parts[0].lower())
return None
id = result_json["results"][0]["id"]
result_json = requests.get(
"https://www.courtlistener.com/api/rest/v3/people/?id=%s" % id,
headers=headers,
).json()
judge = result_json["results"][0]
return "%s-%s%s" % (
judge["name_last"].lower(),
judge["name_first"].lower(),
granular_date(judge["date_dob"], judge["date_granularity_dob"]),
)
def get_hash_from_file(image):
"""Get the hash from the current file"""
with open(image, "r") as f:
return hashlib.sha256(f.read()).hexdigest()
def run_things():
base_href = "http://www.cand.uscourts.gov"
start_path = "/judges"
start_url = base_href + start_path
r = requests.get(start_url)
html_tree = html.fromstring(r.text)
html_tree.make_links_absolute(base_href)
judge_nodes = html_tree.xpath('//section[@id="main-content"]//li')
judge_info = []
for node in judge_nodes:
try:
name = node.xpath("a/text()")[0]
url = node.xpath("a/@href")[0]
except IndexError:
continue
else:
judge_info.append((name, url))
for judge_name, judge_link in judge_info:
judge_r = requests.get(judge_link)
judge_html = html.fromstring(judge_r.text)
judge_html.make_links_absolute(base_href)
try:
img_path = judge_html.xpath(
'//div[@class = "judge_portrait"]//img/@src'
)[0]
except IndexError:
print("Failed to find image for %s" % judge_link)
continue
img_r = requests.get(img_path, stream=True)
if img_r.status_code == 200:
slug = make_slug(judge_name)
if not slug:
continue
with open(slug + ".jpeg", "wb") as f_img:
img_r.raw.decode_content = True
shutil.copyfileobj(img_r.raw, f_img)
img_hash = get_hash_from_file(slug + ".jpeg")
# Update judges.json
judge_pics[slug] = {
"artist": None,
"date_created": None,
"license": "Work of Federal Government",
"source": judge_link,
"hash": img_hash,
}
json.dump(
judge_pics,
open(os.path.join(judge_root, "judges.json"), "w"),
sort_keys=True,
indent=2,
)
if __name__ == "__main__":
run_things()
|
import argparse
import numpy as np
import pandas as pd
import re
import sys
import urllib.request
import yaml
from urllib.parse import urljoin, urlencode
from bs4 import BeautifulSoup
from transliterate import translit
"""
Check for version of Python and exit with error when version is older than 3
"""
def check_version():
major_version = int(re.search('^\d+', sys.version).group(0))
if major_version < 3:
sys.exit('Use version 3 at least\n')
"""
Get content from specified URL
"""
def get_content(url, language):
req = urllib.request.Request(
url,
data=None,
headers={
'User-Agent':
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 ' +
'(KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36',
'Cookie': 'lang={}'.format(language)
}
)
f = urllib.request.urlopen(req)
return f.read().decode('utf-8')
"""
Parse command line arguments or print usage information
"""
def parse_arguments():
parser = argparse.ArgumentParser(description='Gather and process public transportation data from transphoto.org')
parser.add_argument('-c', '--city',
help='number or name (default city is Moscow)',
default='')
parser.add_argument('-t', '--type',
help='transportation type (1-9 or name, default value is tram)',
default='')
parser.add_argument('-l', '--language',
dest='code',
help='ISO 639-1 language code (default is ru for Russian)',
default='')
parser.add_argument('-o', '--output',
dest='file',
help='output frequency table to file')
return parser.parse_args()
"""
Main function
"""
def main():
check_version()
years = []
total = 0
title = ''
with open('config.yml') as file:
config = yaml.full_load(file)
"""
Parse arguments
and search for appropriate codes of city and transportation type
"""
args = parse_arguments()
language = args.code
lang = re.search(r'^([a-z]{2})', language)
language = lang.group(0) if lang else config.get('language');
city = args.city
if not re.match(r'^\d+$', city):
# try to guess id by name
cities = config.get('cities')
# append dictionary of cities, read data from web
soup = BeautifulSoup(get_content(config.get('url')['cities'], language), 'lxml')
table = soup.find('div', attrs={'class': 'p20w'})
links = table.find_all('a')
for link in links:
city_name = link.text.lower()
id = re.search(r'\d+', link.get('href'))
if id:
city_id = id.group(0)
cities[city_name] = city_id
if re.match(r'[ะ-ะฏะ]', city_name, re.I):
transliterated = translit(city_name, reversed=True)
cities[transliterated] = city_id
if 'j' in transliterated:
cities[transliterated.replace('j', 'y')] = city_id
city = cities.get(city.lower(), cities['default'])
kind = args.type
if not re.match(r'^\d+$', kind):
kinds = config.get('types')
kind = kinds.get(kind.lower(), kinds['default'])
output = args.file
if output and not re.search(r'\.(csv|html?|js(on)?|xlsx?)$', output, re.I):
sys.exit('Output file {} has unknown type. '
+ 'Only CSV, HTML, and XSLX are available.\n'.format(output))
service = 0 # only for passengers
url = urljoin(
config.get('url')['wagons'],
'?' + urlencode({'t': kind, 'cid': city, 'serv': service}))
while url:
soup = BeautifulSoup(get_content(url, language), 'lxml')
next_link = soup.find('a', attrs={'id': 'NextLink'})
url = urljoin(url, next_link.get('href')) if next_link else None
if not title:
title = soup.find('h2').text
# get tables wrapped by div.rtable
tables = soup.find_all('div', attrs={'class': 'rtable'})
for table in tables:
rows = table.find_all('tr')
for row in rows:
row_classes = row.get('class')
# suitable rows has class s1 or s11
if not ('s1' in row_classes or 's11' in row_classes):
continue
cells = row.find_all('td')
if not cells:
continue
# header row doesn't contain any <td> cells
built = cells[3].text # YYYY, mm.YYYY or YYYY-mm
if built:
matched = re.search(r'\d{4}', built)
if matched:
years.append(int(matched.group(0)))
if years:
series = pd.Series(years)
counts = pd.DataFrame({'count': series.value_counts()}).sort_index()
if output:
if re.search(r'\.csv$', output, re.I):
counts.to_csv(output)
elif re.search(r'\.html?$', output, re.I):
counts.to_html(output)
elif re.search(r'\.js(on)?$', output, re.I):
counts.to_json(output)
elif re.search(r'\.xlsx?$', output, re.I):
counts.to_excel(output)
else:
print(title, '-' * len(title), sep='\n')
for year, count in counts['count'].items():
print('{:<4} {:>6} {}'.format(year, count, '#' * count))
print('-' * 12) # year + gap + count
print('Total {:>6}'.format(len(years)))
print(
'Mean: {:.5}, median: {:.5}, modes: {}'.format(
series.mean(),
series.quantile(), # median
series.mode().to_list(),
)
)
else:
print('No data')
if __name__ == '__main__':
main()
|
geral = list()
nomepeso = list()
maiorpeso = list()
while True:
nomepeso.append(str(input('Nome: ')))
nomepeso.append(int(input('Peso: ')))
geral.append(nomepeso[:])
nomepeso.clear()
sn = str(input('Deseja continuar? [S/N] '))
if sn in 'Nn':
break
for c in geral:
maiorpeso.append(c[1])
print('O maior peso foi {}Kg de '.format(max(maiorpeso)),end=' ')
for p in geral:
if p[1] == max(maiorpeso):
print(p[0],end=', ')
print('\nOs mais leves foram {}'.format(min(maiorpeso)),end=' ')
for p in geral:
if p == min(maiorpeso):
print(p[0])
|
import db
def GetResourceTypeMap(vars):
# DB connections
# --------------
c = vars['core']
core_db_selector = db.Selector(c['host'], c['user'], c['password'], c['port'], c['db'])
return {x['resource_type_content']: x['resource_type_id'] for x in core_db_selector.query("SELECT * FROM resource_types")}
def GetCollaborationTypeMap(vars):
# DB connections
# --------------
c = vars['core']
core_db_selector = db.Selector(c['host'], c['user'], c['password'], c['port'], c['db'])
return {x['name']: x['id'] for x in core_db_selector.query("SELECT * FROM collaboration_types")}
def GetProblemTypeMap(vars):
# DB connections
# --------------
c = vars['core']
core_db_selector = db.Selector(c['host'], c['user'], c['password'], c['port'], c['db'])
return {x['name']: x['id'] for x in core_db_selector.query("SELECT * FROM problem_types")}
def GetObservedEventTypeMap(vars):
# DB connections
# --------------
c = vars['core']
core_db_selector = db.Selector(c['host'], c['user'], c['password'], c['port'], c['db'])
return {x['name']: x['id'] for x in core_db_selector.query("SELECT * FROM observed_event_types")}
def GetUserTypeMap(vars):
# DB connections
# --------------
c = vars['core']
core_db_selector = db.Selector(c['host'], c['user'], c['password'], c['port'], c['db'])
return {x['name']: x['id'] for x in core_db_selector.query("SELECT * FROM user_types")} |
#!/usr/bin/python
"""
ZetCode PyQt6 tutorial
This example shows a QCalendarWidget widget.
Author: Jan Bodnar
Website: zetcode.com
"""
from PyQt6.QtWidgets import (QWidget, QCalendarWidget,
QLabel, QApplication, QVBoxLayout)
from PyQt6.QtCore import QDate
import sys
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
vbox = QVBoxLayout(self)
cal = QCalendarWidget(self)
cal.setGridVisible(True)
cal.clicked[QDate].connect(self.showDate)
vbox.addWidget(cal)
self.lbl = QLabel(self)
date = cal.selectedDate()
self.lbl.setText(date.toString())
vbox.addWidget(self.lbl)
self.setLayout(vbox)
self.setGeometry(300, 300, 350, 300)
self.setWindowTitle('Calendar')
self.show()
def showDate(self, date):
self.lbl.setText(date.toString())
def main():
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec())
if __name__ == '__main__':
main()
|
import random
def randomFirstPanel(verib):
if (verib == 1):
Typelist = [DIK_6, DIK_7, DIK_8, DIK_9, DIK_0]
elif verib == 2:
Typelist = [DIK_1, DIK_2, DIK_3, DIK_4, DIK_5]
elif verib == 3:
Typelist = [DIK_Q, DIK_E, DIK_R, DIK_T]
elif verib == 4:
Typelist = [DIK_Y, DIK_U, DIK_I]
elif verib == 5:
Typelist = [DIK_O, DIK_P]
keytup = random.choice(Typelist)
return keytup
DIK_1 = 0x02
DIK_2 = 0x03
DIK_3 = 0x04
DIK_4 = 0x05
DIK_5 = 0x06
DIK_6 = 0x07
DIK_7 = 0x08
DIK_8 = 0x09
DIK_9 = 0x0A
DIK_0 = 0x0B
DIK_Q = 0x10
DIK_E = 0x12
DIK_R = 0x13
DIK_T = 0x14
DIK_Y = 0x15
DIK_U = 0x16
DIK_I = 0x17
DIK_O = 0x18
DIK_P = 0x19
DIK_F1 = 0x3B
DIK_F2 = 0x3C
DIK_F3 = 0x3D
DIK_F4 = 0x3E
DIK_F5 = 0x3F
DIK_F6 = 0x40
DIK_F7 = 0x41
DIK_F8 = 0x42
DIK_F9 = 0x43
DIK_F10 = 0x44 |
__all__ = ['to_dt', 'Config']
import argparse
from dataclasses import dataclass
import datetime as dt
import json
import os
from pathlib import Path
from typing import Tuple, Dict, Optional, TYPE_CHECKING
import jsonschema
import numpy as np
from slim.types.TreatmentTypes import Treatment, TreatmentParams, GeneticMechanism, EMB, Money, Thermolicer
if TYPE_CHECKING:
from slim.simulation.lice_population import LifeStage, GenoDistribDict
def to_dt(string_date) -> dt.datetime:
"""Convert from string date to datetime date
:param string_date:: Date as string timestamp
:type string_date: str
:return: Date as Datetime
:rtype: [type]
"""
dt_format = "%Y-%m-%d %H:%M:%S"
return dt.datetime.strptime(string_date, dt_format)
def override(data, override_options: dict):
for k, v in override_options.items():
if k in data and v is not None:
data[k] = v
class RuntimeConfig:
"""Simulation parameters and constants"""
def __init__(self, hyperparam_file, _override_options):
with open(hyperparam_file) as f:
data = json.load(f)
override(data, _override_options)
hyperparam_dir = Path(hyperparam_file).parent
with (hyperparam_dir / "config.schema.json").open() as f:
schema = json.load(f)
jsonschema.validate(data, schema)
# Evolution constants
self.stage_age_evolutions: Dict[LifeStage, float] = data["stage_age_evolutions"]
self.delta_p: Dict[LifeStage, float] = data["delta_p"]
self.delta_s: Dict[LifeStage, float] = data["delta_s"]
self.delta_m10: Dict[LifeStage, float] = data["delta_m10"]
self.smolt_mass_params = SmoltParams(**data["smolt_mass_params"])
# Infection constants
self.infection_main_delta: float = data["infection_main_delta"]
self.infection_weight_delta: float = data["infection_weight_delta"]
self.delta_expectation_weight_log: float = data["delta_expectation_weight_log"]
# Treatment constants
self.emb = EMB(data["treatments"][0])
self.thermolicer = Thermolicer(data["treatments"][1])
# Fish mortality constants
self.fish_mortality_center: float = data["fish_mortality_center"]
self.fish_mortality_k: float = data["fish_mortality_k"]
self.male_detachment_rate: float = data["male_detachment_rate"]
# Background lice mortality constants
self.background_lice_mortality_rates: Dict[LifeStage, float] = data["background_lice_mortality_rates"]
# Reproduction and recruitment constants
self.reproduction_eggs_first_extruded: int = data["reproduction_eggs_first_extruded"]
self.reproduction_age_dependence: float = data["reproduction_age_dependence"]
self.dam_unavailability: int = data["dam_unavailability"]
self.genetic_mechanism = GeneticMechanism[data["genetic_mechanism"].upper()]
self.geno_mutation_rate: float = data["geno_mutation_rate"]
# TODO: take into account processing of non-discrete keys
self.reservoir_offspring_integration_ratio: float = data["reservoir_offspring_integration_ratio"]
self.reservoir_offspring_average: int = data["reservoir_offspring_average"]
# Other reward/payoff constants
self.gain_per_kg = Money(data["gain_per_kg"])
# Other constraints
self.aggregation_rate_threshold: float = data["aggregation_rate_threshold"]
# load in the seed if provided
# otherwise don't use a seed
self.seed = data.get("seed", 0)
self.rng = np.random.default_rng(seed=self.seed)
class Config(RuntimeConfig):
"""One-stop class to hold constants, farm setup and other settings."""
def __init__(
self,
config_file: str,
simulation_dir: str,
override_params: Optional[dict]= None,
save_rate: Optional[int] = None
):
"""Read the configuration from files
:param config_file: Path to the environment JSON file
:type config_file: string
:param simulation_dir: path to the simulator parameters JSON file
:param override_params: options that override the config
:param save_rate: if not null it determines how often (in terms of days) the simulator saves the state.
"""
if override_params is None:
override_params = dict()
super().__init__(config_file, override_params)
# read and set the params
with open(os.path.join(simulation_dir, "params.json")) as f:
data = json.load(f)
override(data, override_params)
with open(os.path.join(simulation_dir, "../params.schema.json")) as f:
schema = json.load(f)
jsonschema.validate(data, schema)
# time and dates
self.start_date = to_dt(data["start_date"])
self.end_date = to_dt(data["end_date"])
# Experiment-specific genetic ratios
self.min_ext_pressure = data["ext_pressure"]
self.initial_genetic_ratios: GenoDistribDict = {
tuple(sorted(key.split(","))): val for key, val in data["genetic_ratios"].items()}
self.monthly_cost = Money(data["monthly_cost"])
self.name: str = data["name"]
# farms
self.farms = [FarmConfig(farm_data)
for farm_data in data["farms"]]
self.nfarms = len(self.farms)
self.interfarm_times = np.loadtxt(os.path.join(simulation_dir, "interfarm_time.csv"), delimiter=",")
self.interfarm_probs = np.loadtxt(os.path.join(simulation_dir, "interfarm_prob.csv"), delimiter=",")
self.loch_temperatures = np.loadtxt(os.path.join(simulation_dir, "temperatures.csv"), delimiter=",")
# driver-specific settings
self.save_rate = save_rate
def get_treatment(self, treatment_type: Treatment) -> TreatmentParams:
return [self.emb, self.thermolicer][treatment_type.value]
@staticmethod
def generate_argparse_from_config(cfg_schema_path: str, simulation_schema_path: str): # pragma: no cover
parser = argparse.ArgumentParser(description="Sea lice simulation")
# TODO: we are parsing the config twice.
with open(cfg_schema_path) as fp:
cfg_dict: dict = json.load(fp)
with open(simulation_schema_path) as fp:
simulation_dict: dict = json.load(fp)
def add_to_group(group_name, data):
group = parser.add_argument_group(group_name)
schema_types_to_python = {
"string": str,
"number": float,
"integer": int
}
for k, v in data.items():
choices = None
nargs = None
type_ = None
if type(v) != dict:
continue
if "type" not in v:
if "enum" in v:
choices = v["enum"]
type_ = "string"
else:
# skip property
continue
else:
type_ = v["type"]
if type_ == "array":
nargs = v.get("minLength", "*")
if "items" in v:
type_ = v["items"]["type"] # this breaks with object arrays
if type_ == "object":
continue # TODO: deal with them later, e.g. prop_a.prop_b for dicts?
description = v["description"]
value_type = schema_types_to_python.get(type_, type_)
group.add_argument(f"--{k.replace('_', '-')}",
type=value_type, help=description, choices=choices, nargs=nargs)
add_to_group("Organisation parameters", simulation_dict["properties"])
add_to_group("Runtime parameters", cfg_dict["properties"])
return parser
class FarmConfig:
"""Config for individual farm"""
def __init__(self, data: dict):
"""Create farm configuration
:param data:: Dictionary with farm data
"""
# set params
self.num_fish: int = data["num_fish"]
self.n_cages: int = data["ncages"]
self.farm_location: Tuple[int, int] = data["location"]
self.farm_start = to_dt(data["start_date"])
self.cages_start = [to_dt(date)
for date in data["cages_start_dates"]]
self.max_num_treatments: int = data["max_num_treatments"]
self.sampling_spacing: int = data["sampling_spacing"]
# TODO: a farm may employ different chemicals
self.treatment_type = Treatment[data["treatment_type"].upper()]
# Defection probability
self.defection_proba: float = data["defection_proba"]
# fixed treatment schedules
self.treatment_starts = [to_dt(date) for date in data["treatment_dates"]]
@dataclass
class SmoltParams:
max_mass: float
skewness: float
x_shift: float
|
import unittest
from datetime import datetime
from datetime import timedelta
from smtm import MassSimulator
from unittest.mock import *
class MassSimulatorUtilTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@patch("builtins.print")
@patch("psutil.Process")
def test_memory_usage_should_print_correctly(self, mock_process, mock_print):
dummy_memory_info_return = MagicMock()
dummy_memory_info_return.rss = 777000000.123456789
dummy_process_return = MagicMock()
dummy_process_return.memory_info.return_value = dummy_memory_info_return
mock_process.return_value = dummy_process_return
MassSimulator.memory_usage()
mock_print.assert_called_with("[MainProcess] memory usage: 741.00494 MB")
class MassSimulatorAnalyzeTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@patch("builtins.open", new_callable=mock_open)
def test_analyze_result_should_call_file_write_correctly(self, mock_file):
mass = MassSimulator()
dummy_config = {
"title": "BnH-2Hour",
"budget": 50000,
"strategy": 0,
"interval": 1,
"currency": "BTC",
"description": "mass-simluation-unit-test",
"period_list": [
{"start": "2020-04-30T17:00:00", "end": "2020-04-30T19:00:00"},
{"start": "2020-04-30T18:00:00", "end": "2020-04-30T20:00:00"},
],
}
dummy_result = [
(0, 0, 1.12, 0, 0, 0, 2.99, 1.88),
(0, 0, 2.25, 0, 0, 0, 1.99, -1.88),
(0, 0, 2.01, 0, 0, 0, 4.99, 2.88),
]
mass.draw_graph = MagicMock()
mass.analyze_result(dummy_result, dummy_config)
self.assertEqual(mock_file.call_args_list[1][0][0], "output/BnH-2Hour.result")
self.assertEqual(mock_file.call_args_list[1][0][1], "w")
self.assertEqual(mock_file.call_args_list[1][1]["encoding"], "utf-8")
handle = mock_file()
expected = [
"Title: BnH-2Hour\n",
"Description: mass-simluation-unit-test\n",
"Strategy: BnH, Budget: 50000, Currency: BTC\n",
"2020-04-30T17:00:00 ~ 2020-04-30T20:00:00 (3)\n",
"์์ต๋ฅ ํ๊ท : 1.793\n",
"์์ต๋ฅ ํธ์ฐจ: 0.595\n",
"์์ต๋ฅ ์ต๋: 2.25, 1\n",
"์์ต๋ฅ ์ต์: 1.12, 0\n",
"์๋ฒ, ์ธ๋ฑ์ค, ๊ตฌ๊ฐ ์์ต๋ฅ , ์ต๋ ์์ต๋ฅ , ์ต์ ์์ต๋ฅ ===\n",
" 1, 1, 2.25, -1.88, 1.99\n",
" 2, 2, 2.01, 2.88, 4.99\n",
" 3, 0, 1.12, 1.88, 2.99\n",
]
for idx, val in enumerate(expected):
self.assertEqual(
handle.write.call_args_list[idx][0][0],
val,
)
self.assertEqual(mass.analyzed_result[0], 1.793)
self.assertEqual(mass.analyzed_result[1], 0.595)
self.assertEqual(mass.analyzed_result[2], 2.25)
self.assertEqual(mass.analyzed_result[3], 1.12)
mass.draw_graph.assert_called_with(
[1.12, 2.25, 2.01], mean=1.793, filename="output/BnH-2Hour.jpg"
)
@patch("matplotlib.pyplot.bar")
@patch("matplotlib.pyplot.plot")
@patch("matplotlib.pyplot.savefig")
def test_draw_graph_should_call_plt_correctly(self, mock_savefig, mock_plot, mock_bar):
MassSimulator.draw_graph([1.12, 2.25, 2.01], mean=1.793, filename="mango.jpg")
mock_bar.assert_called_once_with([0, 1, 2], [1.12, 2.25, 2.01])
mock_plot.assert_called_once_with([1.793, 1.793, 1.793], "r")
mock_savefig.assert_called_once_with("mango.jpg", dpi=300, pad_inches=0.25)
class MassSimulatorInitializeTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@patch("smtm.SimulationDataProvider.initialize_simulation")
@patch("smtm.SimulationTrader.initialize_simulation")
@patch("smtm.SimulationOperator.initialize")
@patch("smtm.SimulationOperator.set_interval")
def test_get_initialized_operator_should_initialize_correctly(
self, mock_interval, mock_op_init, mock_tr_init, mock_dp_init
):
budget = 50000
strategy_num = 1
interval = 60
currency = "BTC"
start = "2020-04-30T17:00:00"
end = "2020-04-30T18:00:00"
tag = "mango-test"
operator = MassSimulator.get_initialized_operator(
budget, strategy_num, interval, currency, start, end, tag
)
mock_dp_init.assert_called_once_with(end=end, count=60)
mock_tr_init.assert_called_once_with(end=end, count=60, budget=budget)
mock_op_init.assert_called_once_with(ANY, ANY, ANY, ANY, budget=budget)
mock_interval.assert_called_once_with(60)
self.assertEqual(operator.tag, tag)
class MassSimulatorRunTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@patch("smtm.LogManager.set_stream_level")
def test_run_should_call_run_simulation_correctly(self, mock_set_stream_level):
mass = MassSimulator()
dummy_config = {
"title": "BnH-2Hour",
"budget": 50000,
"strategy": 0,
"interval": 1,
"currency": "BTC",
"description": "mass-simluation-unit-test",
"period_list": [
{"start": "2020-04-30T17:00:00", "end": "2020-04-30T19:00:00"},
{"start": "2020-04-30T18:00:00", "end": "2020-04-30T20:00:00"},
],
}
mass._load_config = MagicMock(return_value=dummy_config)
mass.analyze_result = MagicMock()
mass.print_state = MagicMock()
mass._execute_simulation = MagicMock()
mass.run("mass_config_file_name", 2)
mass._load_config.assert_called_once_with("mass_config_file_name")
self.assertEqual(
mass._execute_simulation.call_args[0][0],
[
{
"title": "BnH-2Hour",
"budget": 50000,
"strategy": 0,
"interval": 1,
"currency": "BTC",
"partial_idx": 0,
"partial_period_list": [
{
"idx": 0,
"period": {
"start": "2020-04-30T17:00:00",
"end": "2020-04-30T19:00:00",
},
},
],
},
{
"title": "BnH-2Hour",
"budget": 50000,
"strategy": 0,
"interval": 1,
"currency": "BTC",
"partial_idx": 1,
"partial_period_list": [
{
"idx": 1,
"period": {
"start": "2020-04-30T18:00:00",
"end": "2020-04-30T20:00:00",
},
},
],
},
],
)
self.assertEqual(mass._execute_simulation.call_args[0][1], 2)
mass.analyze_result.assert_called_once_with(mass.result, dummy_config)
mass.print_state.assert_called()
def test_run_single_should_start_and_stop_operator(self):
mock_op = MagicMock()
MassSimulator.run_single(mock_op)
mock_op.start.assert_called_once()
mock_op.stop.assert_called_once()
mock_op.get_score.assert_called_once()
class MassSimulatorTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@patch("json.dump")
@patch("builtins.open", new_callable=mock_open)
def test_make_config_json_should_make_json_file_correctly(self, mock_file, mock_json):
result = MassSimulator.make_config_json(
title="get_money",
budget=50000000000,
strategy_num=7,
interval=0.777,
currency="USD",
from_dash_to="210804.000000-210804.030000",
offset_min=120,
)
self.assertEqual(result, "output/generated_config.json")
config = mock_json.call_args[0][0]
self.assertEqual(config["title"], "get_money")
self.assertEqual(config["budget"], 50000000000)
self.assertEqual(config["strategy"], 7)
self.assertEqual(config["interval"], 0.777)
self.assertEqual(config["currency"], "USD")
self.assertEqual(len(config["period_list"]), 2)
self.assertEqual(config["period_list"][0]["start"], "2021-08-04T00:00:00")
self.assertEqual(config["period_list"][0]["end"], "2021-08-04T02:00:00")
self.assertEqual(config["period_list"][1]["start"], "2021-08-04T02:00:00")
self.assertEqual(config["period_list"][1]["end"], "2021-08-04T04:00:00")
@patch("builtins.print")
def test_print_state_print_correctly_when_is_start_true(self, mock_print):
mass = MassSimulator()
mass.config = {
"title": "mass simulation test",
"currency": "ETH",
"description": "unit test config",
"budget": 5000000,
"strategy": "show me the money",
"period_list": [{"start": "today", "end": "tomorrow"}],
}
mass.print_state(is_start=True)
self.assertEqual(
mock_print.call_args_list[1][0][0], "Title: mass simulation test, Currency: ETH"
)
self.assertEqual(mock_print.call_args_list[2][0][0], "Description: unit test config")
self.assertEqual(
mock_print.call_args_list[3][0][0], "Budget: 5000000, Strategy: show me the money"
)
self.assertEqual(mock_print.call_args_list[4][0][0], "today ~ tomorrow (1)")
self.assertEqual(
mock_print.call_args_list[6][0][0].find("+0 simulation start!"), 24
)
@patch("builtins.print")
def test_print_state_print_correctly_when_is_end_true(self, mock_print):
mass = MassSimulator()
mass.config = {
"title": "mass simulation test",
"currency": "ETH",
"description": "unit test config",
"budget": 5000000,
"strategy": "show me the money",
"period_list": [{"start": "today", "end": "tomorrow"}],
}
mass.analyzed_result = (123.45, 456, 789, 10)
mass.start = mass.last_print = datetime.now() - timedelta(seconds=4)
mass.print_state(is_end=True)
self.assertEqual(mock_print.call_args_list[0][0][0].find("simulation completed"), 36)
self.assertEqual(mock_print.call_args_list[2][0][0], "์์ต๋ฅ ํ๊ท : 123.45")
self.assertEqual(mock_print.call_args_list[3][0][0], "์์ต๋ฅ ํธ์ฐจ: 456")
self.assertEqual(mock_print.call_args_list[4][0][0], "์์ต๋ฅ ์ต๋: 789")
self.assertEqual(mock_print.call_args_list[5][0][0], "์์ต๋ฅ ์ต์: 10")
@patch("builtins.print")
def test_print_state_print_correctly(self, mock_print):
mass = MassSimulator()
mass.start = mass.last_print = datetime.now() - timedelta(seconds=4)
mass.print_state()
self.assertEqual(mock_print.call_args[0][0].find("simulation is running"), 36)
def test__update_result_should_update_result_correctly(self):
mass = MassSimulator()
mass.result.append(0)
mass._update_result([{"idx": 0, "result": "mango"}])
self.assertEqual(mass.result[0], "mango")
def test__execute_single_process_simulation_should_run_simulation(self):
dummy_config = {
"title": "BnH-2Hour",
"budget": 50000,
"strategy": 0,
"interval": 1,
"currency": "BTC",
"description": "mass-simluation-unit-test",
"partial_idx": 1,
"partial_period_list": [
{
"idx": 7,
"period": {"start": "2020-04-30T17:00:00", "end": "2020-04-30T19:00:00"},
},
{
"idx": 8,
"period": {"start": "2020-04-30T18:00:00", "end": "2020-04-30T20:00:00"},
},
],
}
backup_run_single = MassSimulator.run_single
backup_memory_usage = MassSimulator.memory_usage
MassSimulator.run_single = MagicMock(return_value="mango_result")
MassSimulator.memory_usage = MagicMock()
MassSimulator.get_initialized_operator = MagicMock(return_value="dummy_operator")
result = MassSimulator._execute_single_process_simulation(dummy_config)
MassSimulator.memory_usage.assert_called_once()
MassSimulator.run_single.assert_called_with("dummy_operator")
self.assertEqual(result[0]["idx"], 7)
self.assertEqual(result[0]["result"], "mango_result")
self.assertEqual(result[1]["idx"], 8)
self.assertEqual(result[1]["result"], "mango_result")
self.assertEqual(MassSimulator.get_initialized_operator.call_args_list[0][0][0], 50000)
self.assertEqual(MassSimulator.get_initialized_operator.call_args_list[0][0][1], 0)
self.assertEqual(MassSimulator.get_initialized_operator.call_args_list[0][0][2], 1)
self.assertEqual(MassSimulator.get_initialized_operator.call_args_list[0][0][3], "BTC")
self.assertEqual(
MassSimulator.get_initialized_operator.call_args_list[0][0][4], "2020-04-30T17:00:00"
)
self.assertEqual(
MassSimulator.get_initialized_operator.call_args_list[0][0][5], "2020-04-30T19:00:00"
)
self.assertEqual(MassSimulator.get_initialized_operator.call_args_list[1][0][0], 50000)
self.assertEqual(MassSimulator.get_initialized_operator.call_args_list[1][0][1], 0)
self.assertEqual(MassSimulator.get_initialized_operator.call_args_list[1][0][2], 1)
self.assertEqual(MassSimulator.get_initialized_operator.call_args_list[1][0][3], "BTC")
self.assertEqual(
MassSimulator.get_initialized_operator.call_args_list[1][0][4], "2020-04-30T18:00:00"
)
self.assertEqual(
MassSimulator.get_initialized_operator.call_args_list[1][0][5], "2020-04-30T20:00:00"
)
MassSimulator.run_single = backup_run_single
MassSimulator.memory_usage = backup_memory_usage
def test_make_chunk_should_make_chunk_list_from_original_list(self):
a = [1, 2, 3, 4, 5, 6, 7]
a_result = MassSimulator.make_chunk(a, 3)
self.assertEqual(a_result[0], [1, 2, 3])
self.assertEqual(a_result[1], [4, 5])
self.assertEqual(a_result[2], [6, 7])
a_result = MassSimulator.make_chunk(a, 4)
self.assertEqual(a_result[0], [1, 2])
self.assertEqual(a_result[1], [3, 4])
self.assertEqual(a_result[2], [5, 6])
self.assertEqual(a_result[3], [7])
a_result = MassSimulator.make_chunk(a, 2)
self.assertEqual(a_result[0], [1, 2, 3, 4])
self.assertEqual(a_result[1], [5, 6, 7])
b = ["a", "b", "c", "d", "e"]
b_result = MassSimulator.make_chunk(b, 2)
self.assertEqual(b_result[0], ["a", "b", "c"])
self.assertEqual(b_result[1], ["d", "e"])
b_result = MassSimulator.make_chunk(b, 4)
self.assertEqual(b_result[0], ["a", "b"])
self.assertEqual(b_result[1], ["c"])
self.assertEqual(b_result[2], ["d"])
self.assertEqual(b_result[3], ["e"])
|
from SnakeGame import SnakeGame
import time
class Test(object):
def __init__(self):
self.game = None
def run(self):
self.game = SnakeGame(reinforcement_learning = False, population_size = 10)
self.game.train_agent(30)
self.game.test_agent(100)
#self.game.game_loop()
|
from .pconv2d_layer import myPConv2D
from .pconv2d_loss import total_loss
from keras.layers import BatchNormalization, Input
from keras.layers import ReLU, LeakyReLU, UpSampling2D, Concatenate
from keras.models import Model
from keras.optimizers import Adam
def encoder_block(input_img, input_mask, filters, kernel_size, batch_norm=True, freeze_bn=False, activation=None, count=''):
"""
Encoder block of layers.
Parameters
input_img: tensor, input image, output of an Input layer or a previous
encoder block.
input_mask: tensor, input binary mask, output of an Input layer or a
previous encoder block.
filters: integer, number of output channels.
kernel_size: integer, width and height of the kernel.
strides: integer, stride in both directions.
batch_norm: boolean, whether to apply BatchNorm to the feature map
(before activation if applied).
freeze_bn: boolean, whether to freeze the BatchNorm (fine-tuning stage).
activation: boolean, whether to apply a ReLU activation at the end.
count: string, block count to append to the end of layers' names.
"""
if count != '':
count = '_' + count
pconv, mask = myPConv2D(filters,
kernel_size,
strides=2,
padding='same',
use_bias=True,
kernel_initializer='he_uniform',
name='pconv2d_enc'+count
)([input_img, input_mask])
if batch_norm:
pconv = BatchNormalization(name='bn_enc'+count)(pconv, training=not freeze_bn)
pconv = ReLU(name='relu'+count)(pconv)
return pconv, mask
def decoder_block(prev_up_img, prev_up_mask, enc_img, enc_mask, filters, last_layer=False, count=''):
"""
Decoder block of layers.
Parameters
prev_up_img: previous image layer to up-sample.
prev_up_mask: previous mask layer to up-sample.
enc_img: image from encoder stage to concatenate with up-sampled image.
enc_mask: mask from encoder stage to concatenate with up-sampled mask.
filters: integer, number of output channels in the PConv2D layer.
count: string, block count to append to the end of layers' names.
last_layer: boolean, whether this is the last decoder block (no mask will
be returned, no BatchNorm and no activation will be applied).
"""
if count != '':
count = '_' + count
up_img = UpSampling2D(size=2, name='img_upsamp_dec' + count)(prev_up_img)
up_mask = UpSampling2D(size=2, name='mask_upsamp_dec' + count)(prev_up_mask)
conc_img = Concatenate(name='img_concat_dec' + count)([up_img, enc_img])
conc_mask = Concatenate(name='mask_concat_dec' + count)([up_mask, enc_mask])
if last_layer:
return myPConv2D(filters, 3, strides=1, padding='same', use_bias=True, kernel_initializer='he_uniform', last_layer=last_layer, name='pconv2d_dec'+count)([conc_img, conc_mask])
pconv, mask = myPConv2D(filters, 3, strides=1, padding='same', use_bias=True, kernel_initializer='he_uniform', name='pconv2d_dec'+count)([conc_img, conc_mask])
pconv = BatchNormalization(name='bn_dec'+count)(pconv)
pconv = LeakyReLU(alpha=0.2, name='leaky_dec'+count)(pconv)
return pconv, mask
def pconv_model(fine_tuning=False, lr=0.0002, predict_only=False, image_size=(512, 512), vgg16_weights='imagenet'):
"""Inpainting model."""
img_input = Input(shape=(image_size[0], image_size[1], 3), name='input_img')
mask_input = Input(shape=(image_size[0], image_size[1], 3), name='input_mask')
# Encoder:
# --------
e_img_1, e_mask_1 = encoder_block(img_input, mask_input, 64, 7, batch_norm=False, count='1')
e_img_2, e_mask_2 = encoder_block(e_img_1, e_mask_1, 128, 5, freeze_bn=fine_tuning, count='2')
e_img_3, e_mask_3 = encoder_block(e_img_2, e_mask_2, 256, 5, freeze_bn=fine_tuning, count='3')
e_img_4, e_mask_4 = encoder_block(e_img_3, e_mask_3, 512, 3, freeze_bn=fine_tuning, count='4')
e_img_5, e_mask_5 = encoder_block(e_img_4, e_mask_4, 512, 3, freeze_bn=fine_tuning, count='5')
e_img_6, e_mask_6 = encoder_block(e_img_5, e_mask_5, 512, 3, freeze_bn=fine_tuning, count='6')
e_img_7, e_mask_7 = encoder_block(e_img_6, e_mask_6, 512, 3, freeze_bn=fine_tuning, count='7')
e_img_8, e_mask_8 = encoder_block(e_img_7, e_mask_7, 512, 3, freeze_bn=fine_tuning, count='8')
# Decoder:
# --------
d_img_9, d_mask_9 = decoder_block(e_img_8, e_mask_8, e_img_7, e_mask_7, 512, count='9')
d_img_10, d_mask_10 = decoder_block(d_img_9, d_mask_9, e_img_6, e_mask_6, 512, count='10')
d_img_11, d_mask_11 = decoder_block(d_img_10, d_mask_10, e_img_5, e_mask_5, 512, count='11')
d_img_12, d_mask_12 = decoder_block(d_img_11, d_mask_11, e_img_4, e_mask_4, 512, count='12')
d_img_13, d_mask_13 = decoder_block(d_img_12, d_mask_12, e_img_3, e_mask_3, 256, count='13')
d_img_14, d_mask_14 = decoder_block(d_img_13, d_mask_13, e_img_2, e_mask_2, 128, count='14')
d_img_15, d_mask_15 = decoder_block(d_img_14, d_mask_14, e_img_1, e_mask_1, 64, count='15')
d_img_16 = decoder_block(d_img_15, d_mask_15, img_input, mask_input, 3, last_layer=True, count='16')
model = Model(inputs=[img_input, mask_input], outputs=d_img_16)
# This will also freeze bn parameters `beta` and `gamma`:
if fine_tuning:
for l in model.layers:
if 'bn_enc' in l.name:
l.trainable = False
if predict_only:
return model
model.compile(Adam(lr=lr), loss=total_loss(mask_input, vgg16_weights=vgg16_weights))
return model
|
import sys
from data_storing.assets.common import Timespan
from utilities.common_methods import Methods as methods
from utilities.common_methods import getDebugInfo
from data_storing.assets.common import Benchmark
import fundamentals.miscellaneous as fund_utils
from utilities.exchange_rates import Exchange
from utilities import log
def get_price_to_cash_flow_ratio(equity, year=None, market_cap=None):
"""
This ratio can be found by dividing the current price of the stock by its operating cash flow per share,
Easy way is to get it from the ratios object extracted from investing.
"""
try:
price_to_cash_flow = None
if year is None:
# get it from the ratios
ratios = equity.fundamentals.ratios
sorted_ratios = sorted(ratios, key=lambda x: x.current_period, reverse=True) # the newest in front
# Starting from the first going down the list.
for ratio in sorted_ratios:
if ratio.benchmark == Benchmark.company:
price_to_cash_flow = ratio.price_to_cash_flow_mrq
break
if price_to_cash_flow is None:
price_to_cash_flow = 1000
else:
if market_cap is None:
raise Exception(f"Market cap for {equity.exchange}:{equity.symbol_1}:{equity.id} not available!")
operating_cash_flow = None
normalised_operating_cash_flow = None
multiplier = None
exchange = None
# The cash flow of interest
cash_flow = fund_utils.gm.get_annual_financial_statement(equity.fundamentals.cash_flow, year)
if cash_flow is not None:
multiplier = fund_utils.gm.get_measure_unit_multiplier(cash_flow.measure_unit)
exchange = fund_utils.gm.get_exchange_rate(methods.validate(cash_flow.currency), equity)
operating_cash_flow = methods.validate(cash_flow.cash_from_operating_activities)
if operating_cash_flow is not None and multiplier is not None and exchange is not None:
normalised_operating_cash_flow = operating_cash_flow * multiplier * exchange
if normalised_operating_cash_flow is not None:
price_to_cash_flow = market_cap / (normalised_operating_cash_flow + sys.float_info.epsilon)
return price_to_cash_flow
except Exception as e:
log.error(f"There is a problem in the code!: {e}\n{getDebugInfo()}")
|
from scannerpy import Database, Job
import os.path
import glob
import argparse
from os.path import join
import pickle
import cv2
import soccer.depracated.kernels_c.resize_op.build.resize_pb2 as resize_pb2
import numpy as np
parser = argparse.ArgumentParser(description='Depth estimation using Stacked Hourglass')
parser.add_argument('--path_to_data', default='/home/krematas/Mountpoints/grail/data/barcelona/')
parser.add_argument('--visualize', action='store_true')
parser.add_argument('--cloud', action='store_true')
parser.add_argument('--bucket', default='', type=str)
parser.add_argument('--nframes', type=int, default=5, help='Margin around the pose')
opt, _ = parser.parse_known_args()
dataset = opt.path_to_data
total_files = 5
################################################################################
# This tutorial shows how to write and use your own C++ custom op. #
################################################################################
# Look at resize_op/resize_op.cpp to start this tutorial.
db = Database()
config = db.config.config['storage']
params = {'bucket': opt.bucket,
'storage_type': config['type'],
'endpoint': 'storage.googleapis.com',
'region': 'US'}
# ======================================================================================================================
# Images
# ======================================================================================================================
image_files = glob.glob(join(dataset, 'images', '*.jpg'))
image_files.sort()
image_files = image_files[:total_files]
encoded_image = db.sources.Files(**params)
frame = db.ops.ImageDecoder(img=encoded_image)
# ======================================================================================================================
# Instances
# ======================================================================================================================
file_to_save = join(opt.path_to_data, 'tmp.p')
with open(file_to_save, 'rb') as f:
results = pickle.load(f)
data = []
for i, res in enumerate(results):
buff = pickle.loads(res)
for sel in range(len(buff)):
h, w = buff[sel]['img'].shape[:2]
_img = resize_pb2.MyImage()
_, buffer = cv2.imencode('.jpg', buff[sel]['img'].astype(np.float32))
_img.image_data = bytes(buffer)
data.append([_img.SerializeToString()])
if i == 0:
break
data = data[:10]
print(len(data))
db.new_table('test', ['img'], data, force=True)
img = db.sources.FrameColumn()
cwd = os.path.dirname(os.path.abspath(__file__))
if not os.path.isfile(os.path.join(cwd, 'resize_op/build/libresize_op.so')):
print(
'You need to build the custom op first: \n'
'$ pushd {}/resize_op; mkdir build && cd build; cmake ..; make; popd'.
format(cwd))
exit()
# To load a custom op into the Scanner runtime, we use db.load_op to open the
# shared library we compiled. If the op takes arguments, it also optionally
# takes a path to the generated python file for the arg protobuf.
db.load_op(
os.path.join(cwd, 'resize_op/build/libresize_op.so'),
os.path.join(cwd, 'resize_op/build/resize_pb2.py'))
# Then we use our op just like in the other examples.
resize = db.ops.MyResize(frame=img, width=200, height=300)
output_op = db.sinks.Column(columns={'resized_frame': resize})
job = Job(op_args={
img: db.table('test').column('img'),
# encoded_image: {'paths': image_files, **params},
output_op: 'example_resized',
})
db.run(output_op, [job], force=True) |
from .Fields import * |
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='cabot-alert-twilio',
version='1.3.3',
description='A twilio alert plugin for Cabot by Arachnys',
author='Arachnys',
author_email='info@arachnys.org',
url='http://cabotapp.com',
packages=find_packages(),
download_url='https://github.com/cabotapp/cabot-alert-twilio/archive/1.3.1.zip'
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.