python – Using only part of tensorflow (keras.models.model_from_json)

I am fairly new to using tensorflow. I am attempting to use tensorflow in Python 3.7 to load an already-created deep learning model file (.json) and associated weights (.h5). I have a model_from_json (tf.keras.models.model_from_json) call that works great, but I would like to keep my virtual environment size down for distribution and use within my research team (tensorflow is comparatively large).

Is it at all possible to use this specific model_from_json call without installing the entire tensorflow package (I am currently using tensorflow==2.2.0)? Selective tensorflow installation or somethings similar?
Thanks!

Paassword generator project in Python

A few days ago I posted my password generator project to help me learn and become more comfortable. I got a lot of great replies from that and I’ve sense updated and would love another look at the program.

I’ve made it so that I can import it and use it to generator a password. I’ve also added support for completely custom subsets of characters.

Throw any suggests or comments you have! Anything is welcome.

import string
from string import ascii_lowercase
from string import ascii_uppercase
from string import digits as numeric
from string import punctuation
import secrets
import argparse
from argparse import HelpFormatter

def generate_characters(character_set, character_amount):
    for _ in range(0, character_amount):
        yield secrets.choice(character_set)

def shuffle(input_str):
    output = ""
    for _ in range(0, len(input_str)):
        index = secrets.randbelow(len(input_str))
        output += "".join(input_str(index))
        input_str = "".join((input_str(:index), input_str(index + 1 :)))
    return output

def generate_password(password_length,
                      subset_lowercase=ascii_lowercase, subset_uppercase=ascii_uppercase,
                      subset_numeric=numeric, subset_special="!@#$%^&*",
                      min_lowercase=1, min_uppercase=1,
                      min_numeric=1, min_special=1):
    superset = "".join((subset_lowercase, subset_uppercase, subset_numeric, subset_special))
    password = "".join(generate_characters(subset_lowercase, min_lowercase))
    password += "".join(generate_characters(subset_uppercase, min_uppercase))
    password += "".join(generate_characters(subset_numeric, min_numeric))
    password += "".join(generate_characters(subset_special, min_special))
    password += "".join(generate_characters(superset, password_length-len(password)))
    return shuffle(password)

if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        formatter_class=HelpFormatter,
        description="Generates a password",
        usage="")

    parser.add_argument(
        "-len",
        "--length",
        type=int,
        default=24,
        dest="password_length",
        help="Length of the generated password")
    parser.add_argument(
        "-lc",
        "--lower",
        type=int,
        default=1,
        dest="min_lowercase",
        help="Minimum number of lowercase alpha characters")
    parser.add_argument(
        "-uc",
        "--upper",
        type=int,
        default=1,
        dest="min_uppercase",
        help="Minimum number of uppercase alpha characters")
    parser.add_argument(
        "-num",
        "--numeric",
        type=int,
        default=1,
        dest="min_numeric",
        help="Minimum number of numeric characters")
    parser.add_argument(
        "-sp",
        "--special",
        type=int,
        default=1,
        dest="min_special",
        help="Minimum number of special characters")
    parser.add_argument(
        "-ext",
        "--extended",
        action="store_const",
        default=False,
        const=True,
        dest="special_extended",
        help="Toggles the extended special character subset. Passwords may not be accepted by all services")
    parser.add_argument(
        "-sl",
        "--subset_lower",
        type=str,
        default=ascii_lowercase,
        dest="subset_lower",
        help="Allows for a custom subset of lowercase characters")
    parser.add_argument(
        "-su",
        "--subset_upper",
        type=str,
        default=ascii_uppercase,
        dest="subset_upper",
        help="Allows for a custom subset of uppercase characters")
    parser.add_argument(
        "-sn",
        "--subset_numeric",
        type=str,
        default=numeric,
        dest="subset_numeric",
        help="Allows for a custom subset of numeric characters")
    parser.add_argument(
        "-ss",
        "--subset_special",
        default="",
        type=str,
        dest="subset_special",
        help="Allows for a custom subset of special characters")

    args = parser.parse_args()

    if args.subset_special:
        special = args.subset_special
    elif args.special_extended:
        special = punctuation
    else:
        special = "!@#$%^&*"

    generated_password = generate_password(
        args.password_length,
        args.subset_lower,
        args.subset_upper,
        args.subset_numeric,
        special,
        args.min_lowercase,
        args.min_uppercase,
        args.min_numeric,
        args.min_special,
    )

    print("Password:", generated_password)

Python: Database efficiency with peewee

I have this code to get a lot of records from different databases of peewee but I feel it could be improved a lot. Any feedback?

from peewee import fn as sqlfunc

def search_records(self):
    """Request a list of records from the database, providing a list of specific conditions."""
    # Query the databases to return just the next fields in that exact order
    limited_to = self.spinMaximumRecords.value()
    records = ()
    # Facturas
    if self.checkBuscaFacturas.isChecked():
        for record in (Facturas.select()
                       .where(*self.build_where_conditions(Facturas))
                       .order_by(Facturas.idunica.desc())
                       .limit(limited_to)):
            records.append(
                (record.idunica, record.fecha, record.codigo, "Factura", record.clienterazonsocial,
                 record.totalimporte, record.pagada, record.contabilizar, record.concepto1,
                 record.cantidad1, record.precio1, record.observaciones))
    # Facturas al contado
    if self.checkBuscaAlcontado.isChecked():
        if self.comboCliente.currentText() == "":  # SPECIAL: cashinvoice doesn't have client
            for record in (Facturasalcontado.select()
                           .where(*self.build_where_conditions(Facturasalcontado))
                           .order_by(Facturasalcontado.idunica.desc())
                           .limit(limited_to)):
                records.append(
                    (record.idunica, record.fecha, record.codigo, "Fact. Contado", "",
                     record.totalimporte, record.pagada, record.contabilizar, record.concepto1,
                     record.cantidad1, record.precio1, record.observaciones))
    # Albaranes
    if self.checkBuscaAlbaranes.isChecked():
        for record in (Albaranes.select()
                       .where(*self.build_where_conditions(Albaranes))
                       .order_by(Albaranes.idunica.desc())
                       .limit(limited_to)):
            records.append(
                (record.idunica, record.fecha, record.codigo, "Albarán", record.clienterazonsocial,
                 record.totalimporte, record.pagada, record.contabilizar, record.concepto1,
                 record.cantidad1, record.precio1, record.observaciones))
    # Presupuestos
    if self.checkBuscaPresupuestos.isChecked():
        for record in (Presupuestos.select()
                       .where(*self.build_where_conditions(Presupuestos))
                       .order_by(Presupuestos.idunica.desc())
                       .limit(limited_to)):
            records.append(
                (record.idunica, record.fecha, record.codigo, "Presupuesto", record.clienterazonsocial,
                 record.totalimporte, "-IGNORAR-", "", record.concepto1,
                 record.cantidad1, record.precio1, record.observaciones))
    return records

def build_where_conditions(self, database) -> Union(bool, Any):
    """Get the right prefix to use depending on the type of table.
    Build the Where conditions depending on the check states."""
    # Get prefixes
    prefix = ""
    if database == Facturasalcontado:
        prefix = self.program.BUSINESS_DATA("PrefixFacturasalcontado")
    elif database == Presupuestos:
        prefix = self.program.BUSINESS_DATA("PrefixPresupuestos")
    elif database == Albaranes:
        prefix = self.program.BUSINESS_DATA("PrefixAlbaranes")
    elif database == Facturas:
        prefix = self.program.BUSINESS_DATA("PrefixFacturas")
    # Build the where condition
    where_condition = (database.idempresa == self.program.BUSINESS_ID)
    # Pagado
    if self.checkPagado.isChecked():
        if not database == Presupuestos:
            where_condition.append(database.pagada == self.comboPagada.currentText())
    # Contabilizar
    if self.checkContabilizar.isChecked():
        if not database == Presupuestos:
            where_condition.append(database.contabilizar == self.comboContabilizar.currentText())
    # Cliente
    if self.checkCliente.isChecked():
        if not database == Facturasalcontado:
            client_id = self.comboCliente.get_selected_id()
            where_condition.append(database.clientecid == client_id)
    # Serie
    if self.checkSerie.isChecked():
        where_condition.append(database.codigo.startswith(f"{prefix}{self.comboSerie.currentText()}"))
    # Codigo
    if self.checkCodigo.isChecked():
        where_condition.append(database.codigo == f"{prefix}{self.Buscadorline_codigo.text()}")
    # Fecha
    if self.checkFecha.isChecked():
        # We need to use sql date formats for "between", so reverse it
        where_condition.append(sqlfunc.Substr(database.fecha, 7).concat("https://codereview.stackexchange.com/")
                               .concat(sqlfunc.Substr(database.fecha, 4, 2)).concat("https://codereview.stackexchange.com/")
                               .concat(sqlfunc.Substr(database.fecha, 1, 2))
                               .between(reverse_date(self.Buscadorline_fecha.text()),
                                        reverse_date(self.Buscadorline_fecha2.text())))
    # Texto Parcial
    if self.checkTextoParcial.isChecked():
        if not self.lineBuscador_textoparcialcon.text() == "":
            text_to_find = self.lineBuscador_textoparcialcon.text()
            where_condition.append(
                database.concepto1.contains(text_to_find) | database.concepto2.contains(text_to_find) |
                database.concepto3.contains(text_to_find) | database.concepto4.contains(text_to_find) |
                database.concepto5.contains(text_to_find) | database.concepto6.contains(text_to_find) |
                database.concepto7.contains(text_to_find) | database.concepto8.contains(text_to_find) |
                database.concepto9.contains(text_to_find) | database.concepto10.contains(text_to_find) |
                database.concepto11.contains(text_to_find) | database.concepto12.contains(text_to_find) |
                database.concepto13.contains(text_to_find) | database.concepto14.contains(text_to_find) |
                database.concepto15.contains(text_to_find) | database.concepto16.contains(text_to_find) |
                database.concepto17.contains(text_to_find) |
                database.observaciones.contains(text_to_find) |
                database.formadepago.contains(text_to_find) |
                database.codigo.contains(text_to_find) |
                database.clienterazonsocial.contains(text_to_find))
    # Forma de pago
    if self.checkFormadepago.isChecked():
        where_condition.append(database.fechadepago == self.comboFormadepago.currentText())
    # NOTE: Must always return a tuple
    if len(where_condition) >= 1:
        return where_condition
    return (None,)

def reverse_date(date: str) -> str:
    """Reverse the date order for betweens sql consults."""
    split = date.split("/")
    return f"{split(2)}/{split(1)}/{split(0)}"

pandas – Problema al contar elementos de dataframe python

Hola tengo el siguiente código que cuando hago el filtrado me arroja 5 filas pero al hacer el conteo me devuelve 4, mon es mi dataframe

a='palabra'
filtro=mon(mon.Año.isin((2020)) & mon.Channel.isin(('CANAL 5'))&mon.Week.isin((11)))  
filtro = filtro(filtro('Description').str.contains(a, case=False)) 
f1=filtro("Description").value_counts()

Cuando imprimo filtro me arroja en la columna Description:

Description
PALABRA AND OTHER
PALABRA AND OTHER
PALABRA AND OTHER
ESP.PALABRA AND OTHER
PALABRA AND OTHER

Pero el f1 me dice que solo tengo 4 valores, al parecer los separa en PALABRA AND OTHER 4 y
ESP.PALABRA AND OTHER 1 . Hice en lugar de .value_counts()

f1=filtro.shape(0) 

y me da el resultado correcto pero el dato es un int y no se como hacerlo dataframe para manejarlo, quisiera que me ayudaran a corregir el .value_counts() o bien me digan como hacer el dato int a dataframe .

python – Combining data to create a list

I am trying to create a list based on some data, but the code I am using is very slow when I run it on large data. So I suspect I am not using all of the Python power for this task. Is there a more efficient and faster way of doing this in Python?

Here an explanantion of the code:

You can think of this problem as a list of games each with a list of participating teams and the scores for each team in the game. For each of the pairs in the current game it calculates the sum of the differences in score from the previous competitions; including only the pairs in the current game. Then it update each pair in the current game with the difference in scores. Then it keeps track of the scores for each pair in each game and update this score as each game is played.

In the example below, based on some data, there are for-loops used to create a new variable list_zz.

The data and the for-loop code:

from collections import Counter, defaultdict
from itertools import combinations
import math

# test data
games = (('A', 'B'), ('B'), ('A', 'B', 'C', 'D', 'E'), ('B'), ('A', 'B', 'C'), ('A'), ('B', 'C'), ('A', 'B'), ('C', 'A', 'B'), ('A'), ('B', 'C'))

gamescores = ((1.0, 5.0), (3.0), (2.0, 7.0, 3.0, 1.0, 6.0), (3.0), (5.0, 2.0, 3.0), (1.0), (9.0, 3.0), (2.0, 7.0), (3.0, 6.0, 8.0), (2.0), (7.0, 9.0))

list_zz= ()

wd = defaultdict(Counter)
past_diffs = defaultdict(float)
this_diff = defaultdict(Counter)

for players, scores in zip(games, gamescores):
    if len(players) == 1:
        list_zz.append(math.nan)
        continue
        
    past_diffs.clear()
    this_diff.clear()
    
    for (player1, score1), (player2, score2) in combinations(zip(players, scores), 2):
        past_diffs(player1) += wd(player1)(player2)
        past_diffs(player2) += wd(player2)(player1)
        
        this_diff(player1)(player2) = score1 - score2
        this_diff(player2)(player1) = score2 - score1
        
    list_zz.extend(past_diffs(p) for p in players)
    
    for player in players:
        wd(player).update(this_diff(player))
        
print(list_zz)

Which looks like this:

(0.0,
 0.0,
 nan,
 -4.0,
 4.0,
 0.0,
 0.0,
 0.0,
 nan,
 -10.0,
 13.0,
 -3.0,
 nan,
 3.0,
 -3.0,
 -6.0,
 6.0,
 -10.0,
 -10.0,
 20.0,
 nan,
 14.0,
 -14.0)

If you could elaborate on the code to make it more efficient and execute faster, I would really appreciate it.

python – módulo e bibliotéca

EU criei um módulo em python e gostaria de importá -lo para um novo documento pelo comando import, assim como é feito com todas as biblioteca Ex: import math – ok
import meu_módulo -(só importa se eu estiver no mesmo arquivo que eu criei)
Como faço para importar logo que eu abro o IDLE?

python – Leetcode 957. Prison Cells After N Days

The important part

They want something below O(n). Using map is a good idea but actually you should find the cycles and return the right position on the cycle instead of computing line by line.

Spoiler (possible solution)

Change:

            if tuple(cells) in step_map:
                return  step_map(tuple(cells))

to:

            if tuple(cells) in step_map:
                cycle = list()
                head = tuple(cells)
                cycle.append(head)
                previous = head
                while True:
                    next_node = tuple(step_map(previous))
                    if next_node == head:
                        return list(cycle((N - k) % len(cycle)))
                    cycle.append(next_node)
                    previous = next_node

Old edit – Some small improvements

There are some O(m) operations multiple times…

For example:

 tmp = (0) + tmp + (0)

Python operation for that is O(m). Therefore, your solution is O(nm).

step_map(tuple(cells)) = tmp

this is also O(m).

data structures – What is the difference between python list and c++ vector?

Both python list and c++ vector are implemented as dynamic arrays (https://en.wikipedia.org/wiki/Dynamic_array).
(Essentially arrays that get reallocated when they are too small.)
Now the important difference between the python and c++ version don’t come from the data structures themselves, but rather from the language. In c++ you can store structs, primitive data types pointers etc. in a vector. But in python everything is a pointer in the sense that an e.g. an integer is stored in the heap and it has an annotation attached to it that says “i am an integer”. That’s required because python has a dynamic type system. And the elements stored in the list are only pointers to the actual elements. This mechanism also allows python to store different data types in the same list. This is another reason why python is kinda slow.

Python selenium web scrapping – how to refactor the code and use explicit wait?

Written following code to extract table form portal. The code works fine (except the net connectivity). But 1. I want to make it more simple, more flat. 2. If I can use explicit wait to avoid stale element error but at the same time save the time. The URL address is available in code itself.

from bs4 import BeautifulSoup as BS
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, 
    TimeoutException, StaleElementReferenceException, WebDriverException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
from FIR_logging import logger
import os
import time
import pandas as pd


# base function

def get_url(some_url):
    while True:
        try:
            driver.get(some_url)
            break
        except WebDriverException:
            time.sleep(60)
            continue
    driver.refresh()



# Some constants:

URL = r'https://www.mhpolice.maharashtra.gov.in/Citizen/MH/PublishedFIRs.aspx'
options = FirefoxOptions()
options.add_argument("--headless")
options.add_argument("--private-window")
driver = webdriver.Firefox(options=options)
get_url(URL)
time.sleep(10)

Download_Directory = r'/some_directory/raw_footage7'

COLUMNS = ('Sr.No.', 'State', 'District', 'Police Station', 'Year', 'FIR No.', 'Registration Date', 'FIR No',
           'Sections')

ALL_Districts = ('AKOLA', 'AMRAVATI CITY', 'AMRAVATI RURAL', 'AURANGABAD CITY',
                 'AURANGABAD RURAL', 'BEED', 'BHANDARA', 'BRIHAN MUMBAI CITY', 'BULDHANA',
                 'CHANDRAPUR', 'DHULE', 'GADCHIROLI', 'GONDIA', 'HINGOLI', 'JALGAON', 'JALNA',
                 'KOLHAPUR', 'LATUR', 'NAGPUR CITY', 'NAGPUR RURAL', 'NANDED', 'NANDURBAR',
                 'NASHIK CITY', 'NASHIK RURAL', 'NAVI MUMBAI', 'OSMANABAD', 'PALGHAR', 'PARBHANI',
                 'PIMPRI-CHINCHWAD', 'PUNE CITY', 'PUNE RURAL', 'RAIGAD', 'RAILWAY AURANGABAD',
                 'RAILWAY MUMBAI', 'RAILWAY NAGPUR', 'RAILWAY PUNE', 'RATNAGIRI', 'SANGLI', 'SATARA',
                 'SINDHUDURG', 'SOLAPUR CITY', 'SOLAPUR RURAL', 'THANE CITY', 'THANE RURAL', 'WARDHA',
                 'WASHIM', 'YAVATMAL')


# other functions


def district_selection(name):
    dist_list = Select(driver.find_element_by_css_selector(
        "#ContentPlaceHolder1_ddlDistrict"))
    dist_list_options = dist_list.options
    names = (o.get_attribute("text")
             for o in dist_list.options if o.get_attribute("text") not in (
                 'Select'))
    if name not in names:
        logger.info(f"{name} is not in list")
        return False
    dist_list.select_by_visible_text(name)
    time.sleep(8)


def enter_date(date):
    # enters start as well as end dates with "action chains."
    WebDriverWait(driver, 160).until(
        EC.presence_of_element_located((By.CSS_SELECTOR,
                                        '#ContentPlaceHolder1_txtDateOfRegistrationFrom')))
    from_date_field = driver.find_element_by_css_selector(
        '#ContentPlaceHolder1_txtDateOfRegistrationFrom')

    to_date_field = driver.find_element_by_css_selector(
        '#ContentPlaceHolder1_txtDateOfRegistrationTo')

    ActionChains(driver).click(from_date_field).send_keys(
        date).move_to_element(to_date_field).click().send_keys(
        date).perform()

    logger.info(f'date entered: {date}')


def search():
    driver.find_element_by_css_selector('#ContentPlaceHolder1_btnSearch').click()


def number_of_records():
    """captures the text indicating number of records.
    converts it to integer. if 0 returns and appends name of district to the list
    if page is not loaded. it tries one more time for 15 secs."""
    time_counter = 1
    while time_counter < 19:
        try:
            records_number = driver.find_element_by_css_selector(
                '#ContentPlaceHolder1_lbltotalrecord').text
            if records_number == '':
                time.sleep(1)
                continue
            else:
                records_number = int(records_number)
            if records_number != 0:
                logger.info(f"{district}: {records_number}")

                return records_number
            else:
                logger.info(f"no records @ {district}")
                return False
        except (NoSuchElementException, TimeoutException, StaleElementReferenceException):
            logger.info("page is not loaded")
            time_counter += 1
            continue


def extract_table_current(name, single):
    # entire table of record to be taken to the list.
    soup = BS(driver.page_source, 'html.parser')
    main_table = soup.find("table", {"id": "ContentPlaceHolder1_gdvDeadBody"})
    time_counter = 1
    while main_table is None:
        if time_counter < 16:
            logger.info(f"the table did not load @ {name}")
            time_counter += 1
        else:
            logger.info(f"the table did not load @ {name}."
                        f"stopped trying")
            return
    links_for_pages = driver.find_elements_by_css_selector('.gridPager a')
    rows = main_table.find_all("tr")
    if links_for_pages is None:

        for row in rows:
            time.sleep(8)
            if '...' not in row.text:
                cells = row.find_all('td')
                cells = cells(0:9)  # drop the last column
                # store data in list
                single.append((cell.text for cell in cells))
    else:
        for row in rows(0:(len(rows)) - 2):
            time.sleep(8)
            cells = row.find_all('td')
            cells = cells(0:9)  # drop the last column

            # store data in list
            single.append((cell.text for cell in cells))


def next_page(name, data):
    # check if any link to next page is available
    # iterate every page.
    try:
        driver.find_element_by_css_selector('.gridPager a')
    except NoSuchElementException:
        return False
    links_for_pages = driver.find_elements_by_css_selector('.gridPager a')
    for page in range(len(links_for_pages)):
        # new list, to by pass stale element exception
        links_for_pages_new = driver.find_elements_by_css_selector('.gridPager a')
        # do not click on link for new page slot
        if links_for_pages_new(page).text != '...':
            links_for_pages_new(page).click()
            # if this can be replaced with some other wait method to save the time
            time.sleep(8)
            extract_table_current(name, data)


def second_page_slot():
    # find specific link for going to page 11 and click.
    try:
        link_for_page_slot = driver.find_element_by_link_text('...')
        link_for_page_slot.click()
    except NoSuchElementException:
        return False


# main code

page_data = ()

time.sleep(5)
view = Select(driver.find_element_by_css_selector(
    '#ContentPlaceHolder1_ucRecordView_ddlPageSize'))
view.select_by_value('50')
driver.close()
for district in ALL_Districts:

    b = "06"
    c = "2020"
    district_directory = os.path.join(Download_Directory, f'{district}{b}{c}')
    if not os.path.exists(district_directory):
        os.mkdir(district_directory)
    for i in range(1, 30):
        # reoping the page to wipe out the catch.
        options = FirefoxOptions()
        options.add_argument("--headless")
        options.add_argument("--private-window")
        driver = webdriver.Firefox(options=options)
        get_url(URL)
        # entering date and assuring that 01 to 09 is entered correctly
        if i < 10:
            i = f'{str("0")}{str(i)}'
        date_from = str(i) + b + c
        enter_date(date_from)
        # select district
        district_selection(district)
        time.sleep(3)
        # start the search
        search()
        time.sleep(7)
        if not number_of_records():
            continue
        extract_table_current(district, page_data)
        time.sleep(3)
        if not next_page(district, page_data):
            district_data = pd.DataFrame(page_data, columns=COLUMNS)
            district_data.to_csv(os.path.join(district_directory, f'{district}{i}{b}{c}.csv'))
            continue
        extract_table_current(district, page_data)
        district_data = pd.DataFrame(page_data, columns=COLUMNS)
        district_data.to_csv(os.path.join(district_directory, f'{district}{i}{b}{c}.csv'))
        driver.close()

json – Python arbitrary objects Serialization and Deserialization

I am trying to convert objects inside an object(arbitrary objects) to Json and later retrieve it. I have developed some codes and would like to share if there is any problem in the code.

import json

Here is the first class

class Foo():
    def __init__(self,x,y,bar):
        self.x =x
        self.y = y
        self.bar = bar #Second class object is here

    def toJson(self):
        return json.dumps(Foo(self.x,self.y,self.bar).__dict__)

    @staticmethod
    def fromJson(jsonData):
        data =  json.loads(jsonData)
        return Foo(
            x = data('x'),
            y = data('y'),
            bar = Bar.fromJson(data('bar'))
        )

Here is the second class

class Bar():
    def __init__(self, z):
        self.z = z


    def toJson(self):
        return json.dumps(Bar(self.z).__dict__)

    @staticmethod
    def fromJson(jsonData):
        data =  json.loads(jsonData)
        return Bar(
            z = data('z'),
        )

Convert to Json

jsonData = Foo(100,500,Bar(900).toJson()).toJson()

Retrieve the Json and convert to Object

foo = Foo.fromJson(jsonData)

Print the Object attributes

print(foo.bar.z)

It works actually. But is there any memory leakage? Any security issue?