Sanjay
Sanjay

Reputation: 73

hive tez mapr distribution query have issues with tez engine

Query fails on tez but fine on MR. We have tried many different permutations and combinations of set ... paarmeters but unable to run this query successfully in tez. Query in MR runs in about 20 mins but tez engine, we saw that query was consuming huge memory in cluster, when this query runs, I see memory usage on cluster keep going up and reaches 100% on many nodes and query was failing with container exit on lost node message

tmp_track_tempo_category_link_rc is 600 Million rows and less than 10GB size on hdfs


set hive.execution.engine=tez;
DROP TEMPORARY FUNCTION row_rank;
CREATE TEMPORARY FUNCTION row_rank AS 'com.mycompany.hadoop.hive.udf.UDFRank';
SET hive.map.aggr=true;
SET hive.exec.parallel=true;
SET hive.merge.mapfiles=false;

--set hive.mapjoin.hybridgrace.hashtable=false ;
SET mapred.max.split.size=100000000;
SET mapred.reduce.tasks=32;

--set mapreduce.map.memory.mb=4096;
--set mapreduce.reduce.memory.mb=4096;
--set mapreduce.map.java.opts=-Xmx3686m;
--set mapreduce.reduce.java.opts=-Xmx3686m;
set hive.tez.container.size=6144;
set hive.tez.java.opts=-Xmx4096m ;
--set tez.am.task.max.failed.attempts=20 ;
--set tez.am.max.app.attempts=10;
--set tez.am.node-unhealthy-reschedule-tasks=true;
set tez.task.am.heartbeat.interval-ms.max=500 ;
--set tez.task.am.heartbeat.counter.interval-ms.max=8000;
--set tez.task.resource.memory.mb=2048;

set tez.am.resource.memory.mb=6144
set tez.am.java.opts=-Xmx4096m

set hive.vectorized.execution.enabled=true;
set hive.vectorized.execution.reduce.enabled=true;
set hive.exec.parallel=true;
set hive.auto.convert.join=true;
set hive.cbo.enable=true;
set hive.compute.query.using.stats=true;
set hive.stats.fetch.column.stats=true;
set hive.stats.fetch.partition.stats=true;
set hive.exec.compress.output=true;
set hive.exec.compress.intermediate=true;
--set hive.tez.container.size=1024;
--set hive.tez.java.opts=-Xmx512m;
set tez.runtime.io.sort.mb=2048;
set tez.grouping.min-size=16777216;
set tez.grouping.max-size=1073741824; 
set tez.grouping.split-count=8;
set tez.am.resource.memory.mb=1024;
set hive.exec.reducers.bytes.per.reducer=10240000;
set hive.exec.reducers.max=100;
set hive.tez.auto.reducer.parallelism = true;
--set tez.runtime.unordered.output.buffer.size-mb=1024;
set hive.exec.dynamic.partition=true;
set hive.exec.dynamic.partition.mode=nonstrict;
set hive.optimize.skewjoin=true ;
set hive.skewjoin.key=5000;

USE mydb;

SELECT track_id,
               (category_id + 100000) category_id,
               category_weight,
               CAST (1 AS TINYINT) section_type_id
          FROM (SELECT tmp4.*, row_rank (track_id) tempo_rank
                  FROM (SELECT tempo.track_id,
                               tempo.category_id,
                               tempo.category_weight
                          FROM tmp_track_tempo_category_link_rc tempo
                               JOIN
                               (SELECT tmp2.*
                                  FROM (SELECT tmp1.track_id,
                                               tmp1.apple_track_id,
                                               row_rank (track_id) RANK
                                          FROM (  SELECT b.track_id,
                                                         b.apple_track_id,
                                                         SUM (c.cnt) cnt,
                                                         SUM (
                                                            b.category_weight)
                                                            mx
                                                    FROM (  SELECT track_id,
                                                                   category_id,
                                                                   COUNT (
                                                                      category_id)
                                                                      cnt
                                                              FROM tmp_track_tempo_category_link_rc a
                                                             WHERE section_type_id =
                                                                      1
                                                          GROUP BY track_id,
                                                                   category_id)
                                                         c
                                                         JOIN
                                                         tmp_track_tempo_category_link_rc b
                                                            ON (    b.track_id =
                                                                       c.track_id
                                                                AND b.category_id =
                                                                       c.category_id
                                                                AND b.section_type_id =
                                                                       1)
                                                GROUP BY b.track_id,
                                                         b.apple_track_id
                                                DISTRIBUTE BY track_id SORT BY track_id, cnt DESC, mx DESC, apple_track_id
                                               ) tmp1) tmp2
                                 WHERE RANK = 1) tmp3
                                  ON (    tmp3.track_id = tempo.track_id
                                      AND tmp3.apple_track_id =
                                             tempo.apple_track_id
                                      AND tempo.section_type_id = 1)
                                 DISTRIBUTE BY track_id SORT BY track_id, category_weight DESC, category_id
                       ) tmp4) tmp5
         WHERE tempo_rank <= 3 ;


    Some of set now make memory situation better but it fails with Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: org.apache.hadoop.hive.ql.metadata.HiveException: Unexpected exception from MapJoinOperator : null

at org.apache.hadoop.hive.ql.exec.tez.TezProcessor.initializeAndRunProcessor(TezProcessor.java:211)
    at org.apache.hadoop.hive.ql.exec.tez.TezProcessor.run(TezProcessor.java:168)
    at org.apache.tez.runtime.LogicalIOProcessorRuntimeTask.run(LogicalIOProcessorRuntimeTask.java:374)
    at org.apache.tez.runtime.task.TaskRunner2Callable$1.run(TaskRunner2Callable.java:73)
    at org.apache.tez.runtime.task.TaskRunner2Callable$1.run(TaskRunner2Callable.java:61)
    at java.security.AccessController.doPrivileged(Native Method)
    at javax.security.auth.Subject.doAs(Subject.java:422)
    at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
    at org.apache.tez.runtime.task.TaskRunner2Callable.callInternal(TaskRunner2Callable.java:61)
    at org.apache.tez.runtime.task.TaskRunner2Callable.callInternal(TaskRunner2Callable.java:37)
    at org.apache.tez.common.CallableWithNdc.call(CallableWithNdc.java:36)
    at java.util.concurrent.FutureTask.run(FutureTask.java:266)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
    at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.RuntimeException: Hive Runtime Error while closing operators
    at org.apache.hadoop.hive.ql.exec.tez.MapRecordProcessor.close(MapRecordProcessor.java:488)
    at org.apache.hadoop.hive.ql.exec.tez.TezProcessor.initializeAndRunProcessor(TezProcessor.java:199)
    ... 14 more
Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: org.apache.hadoop.hive.ql.metadata.HiveException: Unexpected exception from MapJoinOperator : null
    at org.apache.hadoop.hive.ql.exec.MapJoinOperator.closeOp(MapJoinOperator.java:546)
    at org.apache.hadoop.hive.ql.exec.Operator.close(Operator.java:697)
    at org.apache.hadoop.hive.ql.exec.Operator.close(Operator.java:711)
    at org.apache.hadoop.hive.ql.exec.tez.MapRecordProcessor.close(MapRecordProcessor.java:477)
    ... 15 more
Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: Unexpected exception from MapJoinOperator : null

Upvotes: 1

Views: 451

Answers (1)

Ted Dunning
Ted Dunning

Reputation: 1907

Last I remember looking at Tez, it had a pretty simple-minded set of optimizations for sort and join exchanges. IT would be quite plausible if your (relatively large) query ran into those limits.

MapReduce doesn't have these problems because every step is completely written to disk so there aren't any in-memory joins or sorts. That leads to really slow performance sometimes, but it also gives some simple-minded guarantees and robustness.

Have you asked on the appropriate Apache mailing list? I know that Cloudera has pretty much abandoned the Hadoop community, but there are likely still some fairly astute people around.

Upvotes: 0

Related Questions