mpy
mpy

Reputation: 632

Multiprocess.pool.map() raise ValueError: No objects to concatenate

I have to run a for loop and each loop will access data from a database, do some manipulation and run the Dijkstra algorithm, then append the results to a final list. The code looks like below:

def log_transform(x):
        transformed = math.e**(-x)
        return transformed 
input_region = '1.199'
t1 = '20200101'
t2 = '20200115' 
candid_sale_invoices = pd.read_excel('candid_sale_invoices.xlsx')
candid_barcodes = pd.read_excel('candid_barcodes.xlsx')

weights = []   
    for i in range(int(t1),(int(t2) + 1)):
        input_date = str(i)
        sql_data = """select trim(cast(p.Barcode as nvarchar(20))) Barcode ,cast(s.invoiceid as nvarchar(20)) invoiceid
        from sales s inner join Product_981115 p on s.productid = p.productid 
        where s.date = """+ input_date +""" and s.qty != 0 and p.sectionid != 1691.199 and s.RegionID = """ + input_region
        data = [] 
        for chunk in pd.read_sql(sql_data,conn,chunksize = 1000000):
            data.append(chunk)
        data = pd.concat(data, ignore_index = True)
        data = data.merge(candid_sale_invoices)
        data = data.merge(candid_barcodes)
        final_edges_df = data.iloc[:,[2,3,4]]
        final_edges_tuples = [tuple(x) for x in final_edges_df.values]

        Gm = ig.Graph.TupleList(final_edges_tuples, directed = True, edge_attrs = ['weight'])

        longest_paths = pd.DataFrame(Gm.shortest_paths_dijkstra(None,None, weights = 'weight'))
        longest_paths = longest_paths.swifter.apply(log_transform)
        longest_paths["Date"] = input_date
        longest_paths["RegionID"] = input_region
        weights.append(longest_paths)

weights = pd.concat(weights, ignore_index = True)

The problem is with the process time which will take hours to finish. So since each iteration is independent from other iterations, I decided to run this loop in parallel with the help of this link

import psutil
from multiprocess import Pool
pool = Pool(psutil.cpu_count(logical=False))
def graph_analysis(i):
    input_date = str(i)
    sql_data = """select trim(cast(p.Barcode as nvarchar(20))) Barcode ,cast(s.invoiceid as 
    nvarchar(20)) invoiceid
    from sales s inner join Product_981115 p on s.productid = p.productid 
    where s.date = """+ input_date +""" and s.qty != 0 and p.sectionid != 1691.199 and s.RegionID = """ + input_region
    data = [] 
    for chunk in pd.read_sql(sql_data,conn,chunksize = 1000000):
         data.append(chunk)
    data = pd.concat(data, ignore_index = True)
    data = data.merge(candid_sale_invoices)
    data = data.merge(candid_barcodes)
    final_edges_df = data.iloc[:,[2,3,4]]
    final_edges_tuples = [tuple(x) for x in final_edges_df.values]

    Gm = ig.Graph.TupleList(final_edges_tuples, directed = True, edge_attrs = ['weight'])

    longest_paths = pd.DataFrame(Gm.shortest_paths_dijkstra(None,None, weights = 'weight'))
    longest_paths = longest_paths.swifter.apply(log_transform)
    longest_paths["Date"] = input_date
    longest_paths["RegionID"] = input_region
    Return longest_paths

results = pool.map(graph_analysis,range(int(t1),(int(t2) + 1)))
pool.close()

With running the code, it seems like the code is doing its job and compute in parallel but after a while it raise this error:

Traceback (most recent call last):

File "", line 78, in weights = pool.map(graph_analysis,range(int(t1),(int(t2) + 1)))

File "C:\Users\AppData\Local\Continuum\anaconda3\lib\site-packages\multiprocess\pool.py", line 268, in map return self._map_async(func, iterable, mapstar, chunksize).get()

File "C:\Users\AppData\Local\Continuum\anaconda3\lib\site-packages\multiprocess\pool.py", line 657, in get raise self._value

ValueError: No objects to concatenate

Does this error relate to gathering the "longest_paths" dataframe from all of the iterations?

Upvotes: 1

Views: 4078

Answers (1)

AKX
AKX

Reputation: 169338

"No objects to concatenate" is a Pandas error returned when you call pd.concat() with an empty iterable:

>>> pd.concat([])
Traceback (most recent call last):
  File "<stdin>", line 1, in <module>
  File "pandas/core/reshape/concat.py", line 281, in concat
    sort=sort,
  File "pandas/core/reshape/concat.py", line 329, in __init__
    raise ValueError("No objects to concatenate")
ValueError: No objects to concatenate
>>>

I would assume you'll just need to quit early when the SQL query does not return anything:

# ...

    for chunk in pd.read_sql(sql_data, conn, chunksize=1000000):
        data.append(chunk)

    if not data: # <-- add this bit!
        return None

    data = pd.concat(data, ignore_index=True)

# ...

I'd also suggest using pool.imap_unordered(); you also don't need psutil since Pool will default to the number of CPUs anyway.

All in all something like this – note I changed the return type to (i, x) so you get the index you passed in back too. This is still missing saving the actual results anywhere, of course. :)

from multiprocess import Pool


def log_transform(x):
    transformed = math.e ** (-x)
    return transformed


input_region = "1.199"
t1 = "20200101"
t2 = "20200115"
candid_sale_invoices = pd.read_excel("candid_sale_invoices.xlsx")
candid_barcodes = pd.read_excel("candid_barcodes.xlsx")


def graph_analysis(i):
    input_date = str(i)
    sql_data = (
        """select trim(cast(p.Barcode as nvarchar(20))) Barcode ,cast(s.invoiceid as 
    nvarchar(20)) invoiceid
    from sales s inner join Product_981115 p on s.productid = p.productid 
    where s.date = """
        + input_date
        + """ and s.qty != 0 and p.sectionid != 1691.199 and s.RegionID = """
        + input_region
    )
    data = []
    for chunk in pd.read_sql(sql_data, conn, chunksize=1000000):
        data.append(chunk)
    if not data:
        return (i, None)
    data = pd.concat(data, ignore_index=True)
    data = data.merge(candid_sale_invoices)
    data = data.merge(candid_barcodes)
    final_edges_df = data.iloc[:, [2, 3, 4]]
    final_edges_tuples = [tuple(x) for x in final_edges_df.values]

    Gm = ig.Graph.TupleList(final_edges_tuples, directed=True, edge_attrs=["weight"])

    longest_paths = pd.DataFrame(Gm.shortest_paths_dijkstra(None, None, weights="weight"))
    longest_paths = longest_paths.swifter.apply(log_transform)
    longest_paths["Date"] = input_date
    longest_paths["RegionID"] = input_region
    return (i, longest_paths)


if __name__ == "__main__":
    with Pool() as pool:
        for i, result in pool.imap_unordered(graph_analysis, range(int(t1), (int(t2) + 1))):
            print(i, result)

Upvotes: 0

Related Questions