Skip to content

Commit ec503de

Browse files
committed
Accommodate Hive API changes.
1 parent b7fd0ab commit ec503de

File tree

8 files changed

+42
-52
lines changed

8 files changed

+42
-52
lines changed

contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePlugin.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,7 @@ public synchronized void registerSchemas(SchemaConfig schemaConfig, SchemaPlus p
186186
public Set<StoragePluginOptimizerRule> getOptimizerRules(OptimizerRulesContext optimizerContext, PlannerPhase phase) {
187187
switch (phase) {
188188
case PARTITION_PRUNING:
189-
final String defaultPartitionValue = hiveConf.get(ConfVars.DEFAULTPARTITIONNAME.varname);
189+
final String defaultPartitionValue = hiveConf.get(ConfVars.DEFAULT_PARTITION_NAME.varname);
190190
ImmutableSet.Builder<StoragePluginOptimizerRule> ruleBuilder = ImmutableSet.builder();
191191
ruleBuilder.add(HivePushPartitionFilterIntoScan.getFilterOnProject(optimizerContext, defaultPartitionValue));
192192
ruleBuilder.add(HivePushPartitionFilterIntoScan.getFilterOnScan(optimizerContext, defaultPartitionValue));

contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveUtilities.java

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,7 @@
5757
import org.apache.drill.exec.vector.ValueVector;
5858
import org.apache.drill.exec.work.ExecErrorConstants;
5959

60+
import org.apache.hadoop.conf.Configuration;
6061
import org.apache.hadoop.hive.common.type.HiveDecimal;
6162
import org.apache.hadoop.hive.metastore.api.FieldSchema;
6263
import org.apache.hadoop.hive.metastore.api.Partition;
@@ -582,7 +583,7 @@ public static boolean hasHeaderOrFooter(HiveTableWithColumnCache table) {
582583
*/
583584
public static void verifyAndAddTransactionalProperties(JobConf job, StorageDescriptor sd) {
584585

585-
if (AcidUtils.isTablePropertyTransactional(job)) {
586+
if (AcidUtils.isTablePropertyTransactional(job.getPropsWithPrefix("transactional"))) {
586587
HiveConf.setBoolVar(job, HiveConf.ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN, true);
587588

588589
// No work is needed, if schema evolution is used
@@ -594,7 +595,7 @@ public static void verifyAndAddTransactionalProperties(JobConf job, StorageDescr
594595
String colNames;
595596
String colTypes;
596597

597-
// Try to get get column names and types from table or partition properties. If they are absent there, get columns
598+
// Try to get column names and types from table or partition properties. If they are absent there, get columns
598599
// data from storage descriptor of the table
599600
colNames = job.get(serdeConstants.LIST_COLUMNS);
600601
colTypes = job.get(serdeConstants.LIST_COLUMN_TYPES);
@@ -749,6 +750,7 @@ public static HiveConf generateHiveConf(HiveConf hiveConf, Map<String, String> p
749750
return newHiveConf;
750751
}
751752

753+
752754
/**
753755
* Helper method which stores partition columns in table columnListCache. If table columnListCache has exactly the
754756
* same columns as partition, in partition stores columns index that corresponds to identical column list.

contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/readers/HiveDefaultRecordReader.java

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@
5555
import org.apache.hadoop.hive.conf.HiveConf;
5656
import org.apache.hadoop.hive.metastore.api.FieldSchema;
5757
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
58+
import org.apache.hadoop.hive.serde2.AbstractSerDe;
5859
import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
5960
import org.apache.hadoop.hive.serde2.Deserializer;
6061
import org.apache.hadoop.hive.serde2.SerDeException;
@@ -261,8 +262,9 @@ public void setup(OperatorContext context, OutputMutator output) throws Executio
261262
private Callable<Void> getInitTask(OutputMutator output) {
262263
return () -> {
263264
this.job = new JobConf(hiveConf);
265+
Properties partitionProperties = HiveUtilities.getPartitionMetadata(partition, hiveTable);
264266
Properties hiveTableProperties = HiveUtilities.getTableMetadata(hiveTable);
265-
final Deserializer tableDeserializer = createDeserializer(job, hiveTable.getSd(), hiveTableProperties);
267+
final Deserializer tableDeserializer = createDeserializer(job, hiveTable.getSd(), hiveTableProperties, partitionProperties);
266268
final StructObjectInspector tableObjInspector = getStructOI(tableDeserializer);
267269

268270
if (partition == null) {
@@ -275,9 +277,8 @@ private Callable<Void> getInitTask(OutputMutator output) {
275277
job.setInputFormat(HiveUtilities.getInputFormatClass(job, hiveTable.getSd(), hiveTable));
276278
HiveUtilities.verifyAndAddTransactionalProperties(job, hiveTable.getSd());
277279
} else {
278-
Properties partitionProperties = HiveUtilities.getPartitionMetadata(partition, hiveTable);
279280
HiveUtilities.addConfToJob(job, partitionProperties);
280-
this.partitionDeserializer = createDeserializer(job, partition.getSd(), partitionProperties);
281+
this.partitionDeserializer = createDeserializer(job, partition.getSd(), hiveTableProperties, partitionProperties);
281282
this.partitionObjInspector = getStructOI(partitionDeserializer);
282283

283284
this.finalObjInspector = (StructObjectInspector) ObjectInspectorConverters.getConvertedOI(partitionObjInspector, tableObjInspector);
@@ -326,7 +327,8 @@ private Callable<Void> getInitTask(OutputMutator output) {
326327
List<String> nestedColumnPaths = getColumns().stream()
327328
.map(SchemaPath::getRootSegmentPath)
328329
.collect(Collectors.toList());
329-
ColumnProjectionUtils.appendReadColumns(job, idsOfProjectedColumns, selectedColumnNames, nestedColumnPaths);
330+
331+
ColumnProjectionUtils.appendReadColumns(job, idsOfProjectedColumns, selectedColumnNames, nestedColumnPaths, false);
330332

331333
// Initialize selectedStructFieldRefs and columnValueWriters, which are two key collections of
332334
// objects used to read and save columns row data into Drill's value vectors
@@ -345,7 +347,7 @@ private Callable<Void> getInitTask(OutputMutator output) {
345347
if (partition != null && selectedPartitionColumnNames.size() > 0) {
346348
List<ValueVector> partitionVectorList = new ArrayList<>(selectedPartitionColumnNames.size());
347349
List<Object> partitionValueList = new ArrayList<>(selectedPartitionColumnNames.size());
348-
String defaultPartitionValue = hiveConf.get(HiveConf.ConfVars.DEFAULTPARTITIONNAME.varname);
350+
String defaultPartitionValue = hiveConf.get(HiveConf.ConfVars.DEFAULT_PARTITION_NAME.varname);
349351
OptionManager options = fragmentContext.getOptions();
350352
for (int i = 0; i < partitionKeyFields.size(); i++) {
351353
FieldSchema field = partitionKeyFields.get(i);
@@ -447,10 +449,10 @@ public void close() {
447449
closeMapredReader();
448450
}
449451

450-
private static Deserializer createDeserializer(JobConf job, StorageDescriptor sd, Properties properties) throws Exception {
452+
private static Deserializer createDeserializer(JobConf job, StorageDescriptor sd, Properties hiveTableproperties, Properties partitionProperties) throws Exception {
451453
final Class<? extends Deserializer> c = Class.forName(sd.getSerdeInfo().getSerializationLib()).asSubclass(Deserializer.class);
452454
final Deserializer deserializer = c.getConstructor().newInstance();
453-
deserializer.initialize(job, properties);
455+
((AbstractSerDe) deserializer).initialize(job, hiveTableproperties, partitionProperties);
454456

455457
return deserializer;
456458
}

contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/HiveTestFixture.java

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -133,8 +133,8 @@ public HiveDriverManager getDriverManager() {
133133
* from pluginConf or driverConf
134134
*/
135135
public String getWarehouseDir() {
136-
String warehouseDir = pluginConf.get(ConfVars.METASTOREWAREHOUSE.varname);
137-
return nonNull(warehouseDir) ? warehouseDir : driverConf.get(ConfVars.METASTOREWAREHOUSE.varname);
136+
String warehouseDir = pluginConf.get(ConfVars.METASTORE_WAREHOUSE.varname);
137+
return nonNull(warehouseDir) ? warehouseDir : driverConf.get(ConfVars.METASTORE_WAREHOUSE.varname);
138138
}
139139

140140
public static class Builder {
@@ -153,22 +153,22 @@ private Builder(File baseDir) {
153153
String warehouseDir = new File(baseDir, "warehouse").getAbsolutePath();
154154
// Drill Hive Storage plugin defaults
155155
pluginName("hive");
156-
pluginOption(ConfVars.METASTOREURIS, "");
157-
pluginOption(ConfVars.METASTORECONNECTURLKEY, jdbcUrl);
158-
pluginOption(ConfVars.METASTOREWAREHOUSE, warehouseDir);
156+
pluginOption(ConfVars.METASTORE_URIS, "");
157+
pluginOption(ConfVars.METASTORE_CONNECT_URL_KEY, jdbcUrl);
158+
pluginOption(ConfVars.METASTORE_WAREHOUSE, warehouseDir);
159159
pluginOption(FileSystem.FS_DEFAULT_NAME_KEY, FileSystem.DEFAULT_FS);
160160
// Hive Driver defaults
161-
driverOption(ConfVars.METASTORECONNECTURLKEY, jdbcUrl);
161+
driverOption(ConfVars.METASTORE_CONNECT_URL_KEY, jdbcUrl);
162162
driverOption(FileSystem.FS_DEFAULT_NAME_KEY, FileSystem.DEFAULT_FS);
163-
driverOption(ConfVars.METASTOREWAREHOUSE, warehouseDir);
163+
driverOption(ConfVars.METASTORE_URIS, warehouseDir);
164164
driverOption("mapred.job.tracker", "local");
165-
driverOption(ConfVars.SCRATCHDIR, createDirWithPosixPermissions(baseDir, "scratch_dir").getAbsolutePath());
166-
driverOption(ConfVars.LOCALSCRATCHDIR, createDirWithPosixPermissions(baseDir, "local_scratch_dir").getAbsolutePath());
167-
driverOption(ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
165+
driverOption(ConfVars.SCRATCH_DIR, createDirWithPosixPermissions(baseDir, "scratch_dir").getAbsolutePath());
166+
driverOption(ConfVars.LOCAL_SCRATCH_DIR, createDirWithPosixPermissions(baseDir, "local_scratch_dir").getAbsolutePath());
167+
driverOption(ConfVars.DYNAMIC_PARTITIONING_MODE, "nonstrict");
168168
driverOption(ConfVars.METASTORE_AUTO_CREATE_ALL, Boolean.toString(true));
169169
driverOption(ConfVars.METASTORE_SCHEMA_VERIFICATION, Boolean.toString(false));
170170
driverOption(ConfVars.HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING, Boolean.toString(false));
171-
driverOption(HiveConf.ConfVars.HIVESESSIONSILENT, Boolean.toString(true));
171+
driverOption(ConfVars.HIVE_SESSION_SILENT, Boolean.toString(true));
172172
driverOption(ConfVars.HIVE_CBO_ENABLED, Boolean.toString(false));
173173
}
174174

contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/HiveTestUtilities.java

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,11 @@
2525
import java.util.EnumSet;
2626
import java.util.Set;
2727

28+
import org.apache.drill.common.exceptions.DrillRuntimeException;
2829
import org.apache.drill.test.QueryBuilder;
2930
import org.apache.drill.test.TestTools;
3031
import org.apache.hadoop.hive.ql.Driver;
32+
import org.apache.hadoop.hive.ql.processors.CommandProcessorException;
3133
import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
3234
import org.apache.hadoop.util.ComparableVersion;
3335
import org.apache.hive.common.util.HiveVersionInfo;
@@ -50,16 +52,10 @@ public class HiveTestUtilities {
5052
* Execute the give <i>query</i> on given <i>hiveDriver</i> instance.
5153
*/
5254
public static void executeQuery(Driver hiveDriver, String query) {
53-
CommandProcessorResponse response;
5455
try {
55-
response = hiveDriver.run(query);
56-
} catch (Exception e) {
57-
throw new RuntimeException(e);
58-
}
59-
60-
if (response.getResponseCode() != 0 ) {
61-
throw new RuntimeException(String.format("Failed to execute command '%s', errorMsg = '%s'",
62-
query, response.getErrorMessage()));
56+
hiveDriver.run(query);
57+
} catch (CommandProcessorException e) {
58+
throw new DrillRuntimeException(String.format("Failed to execute command '%s'", query, e));
6359
}
6460
}
6561

contrib/storage-hive/core/src/test/java/org/apache/drill/exec/impersonation/hive/BaseTestHiveImpersonation.java

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@
4141
import static org.apache.drill.exec.hive.HiveTestUtilities.createDirWithPosixPermissions;
4242
import static org.apache.drill.exec.hive.HiveTestUtilities.executeQuery;
4343
import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY;
44-
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.METASTOREURIS;
44+
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.METASTORE_URIS;
4545

4646
public class BaseTestHiveImpersonation extends BaseTestImpersonation {
4747
protected static final String hivePluginName = "hive";
@@ -79,21 +79,21 @@ protected static void prepHiveConfAndData() throws Exception {
7979

8080
// Configure metastore persistence db location on local filesystem
8181
final String dbUrl = String.format("jdbc:derby:;databaseName=%s;create=true", metaStoreDBDir.getAbsolutePath());
82-
hiveConf.set(ConfVars.METASTORECONNECTURLKEY.varname, dbUrl);
82+
hiveConf.set(ConfVars.METASTORE_CONNECT_URL_KEY.varname, dbUrl);
8383

84-
hiveConf.set(ConfVars.SCRATCHDIR.varname, "file://" + scratchDir.getAbsolutePath());
85-
hiveConf.set(ConfVars.LOCALSCRATCHDIR.varname, localScratchDir.getAbsolutePath());
84+
hiveConf.set(ConfVars.SCRATCH_DIR.varname, "file://" + scratchDir.getAbsolutePath());
85+
hiveConf.set(ConfVars.LOCAL_SCRATCH_DIR.varname, localScratchDir.getAbsolutePath());
8686
hiveConf.set(ConfVars.METASTORE_SCHEMA_VERIFICATION.varname, "false");
8787
hiveConf.set(ConfVars.METASTORE_AUTO_CREATE_ALL.varname, "true");
8888
hiveConf.set(ConfVars.HIVE_CBO_ENABLED.varname, "false");
8989
hiveConf.set(ConfVars.HIVESTATSAUTOGATHER.varname, "false");
9090
hiveConf.set(ConfVars.HIVESTATSCOLAUTOGATHER.varname, "false");
91-
hiveConf.set(ConfVars.HIVESESSIONSILENT.varname, "true");
91+
hiveConf.set(ConfVars.HIVE_SESSION_SILENT.varname, "true");
9292

9393
// Set MiniDFS conf in HiveConf
9494
hiveConf.set(FS_DEFAULT_NAME_KEY, dfsConf.get(FS_DEFAULT_NAME_KEY));
9595

96-
whDir = hiveConf.get(ConfVars.METASTOREWAREHOUSE.varname);
96+
whDir = hiveConf.get(ConfVars.METASTORE_WAREHOUSE.varname);
9797
FileSystem.mkdirs(fs, new Path(whDir), new FsPermission((short) 0777));
9898

9999
dirTestWatcher.copyResourceToRoot(Paths.get("student.txt"));
@@ -122,7 +122,7 @@ protected static void startHiveMetaStore() throws Exception {
122122
}
123123
final int port = (int) metaStoreUtilsClass.getDeclaredMethod("findFreePort").invoke(null);
124124

125-
hiveConf.set(METASTOREURIS.varname, "thrift://localhost:" + port);
125+
hiveConf.set(METASTORE_URIS.varname, "thrift://localhost:" + port);
126126

127127
metaStoreUtilsClass.getDeclaredMethod("startMetaStore", int.class, hadoopThriftAuthBridgeClass, confClass)
128128
.invoke(null, port, hadoopThriftAuthBridge, hiveConf);

contrib/storage-hive/core/src/test/java/org/apache/drill/exec/impersonation/hive/TestSqlStdBasedAuthorization.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@
4545
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER;
4646
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_CBO_ENABLED;
4747
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS;
48-
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.METASTOREURIS;
48+
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.METASTORE_URIS;
4949
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.METASTORE_AUTO_CREATE_ALL;
5050
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.METASTORE_EXECUTE_SET_UGI;
5151
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION;
@@ -100,7 +100,7 @@ private static void setSqlStdBasedAuthorizationInHiveConf() {
100100

101101
private static Map<String, String> getHivePluginConfig() {
102102
final Map<String, String> hiveConfig = Maps.newHashMap();
103-
hiveConfig.put(METASTOREURIS.varname, hiveConf.get(METASTOREURIS.varname));
103+
hiveConfig.put(METASTORE_URIS.varname, hiveConf.get(METASTORE_URIS.varname));
104104
hiveConfig.put(FS_DEFAULT_NAME_KEY, dfsConf.get(FS_DEFAULT_NAME_KEY));
105105
hiveConfig.put(HIVE_SERVER2_ENABLE_DOAS.varname, hiveConf.get(HIVE_SERVER2_ENABLE_DOAS.varname));
106106
hiveConfig.put(METASTORE_EXECUTE_SET_UGI.varname, hiveConf.get(METASTORE_EXECUTE_SET_UGI.varname));

contrib/storage-hive/core/src/test/java/org/apache/drill/exec/impersonation/hive/TestStorageBasedHiveAuthorization.java

Lines changed: 3 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -43,17 +43,7 @@
4343
import static org.apache.drill.exec.hive.HiveTestUtilities.executeQuery;
4444
import static com.google.common.collect.Lists.newArrayList;
4545
import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY;
46-
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.DYNAMICPARTITIONINGMODE;
47-
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_CBO_ENABLED;
48-
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_METASTORE_AUTHENTICATOR_MANAGER;
49-
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_METASTORE_AUTHORIZATION_AUTH_READS;
50-
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_METASTORE_AUTHORIZATION_MANAGER;
51-
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS;
52-
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.METASTOREURIS;
53-
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.METASTORE_AUTO_CREATE_ALL;
54-
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.METASTORE_EXECUTE_SET_UGI;
55-
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS;
56-
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION;
46+
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.*;
5747

5848
@Category({SlowTest.class, HiveStorageTest.class})
5949
public class TestStorageBasedHiveAuthorization extends BaseTestHiveImpersonation {
@@ -184,12 +174,12 @@ private static void setStorabaseBasedAuthorizationInHiveConf() {
184174
hiveConf.set(HIVE_METASTORE_AUTHORIZATION_MANAGER.varname, StorageBasedAuthorizationProvider.class.getName());
185175
hiveConf.set(HIVE_METASTORE_AUTHORIZATION_AUTH_READS.varname, "true");
186176
hiveConf.set(METASTORE_EXECUTE_SET_UGI.varname, "true");
187-
hiveConf.set(DYNAMICPARTITIONINGMODE.varname, "nonstrict");
177+
hiveConf.set(DYNAMIC_PARTITIONING_MODE.varname, "nonstrict");
188178
}
189179

190180
private static Map<String, String> getHivePluginConfig() {
191181
final Map<String, String> hiveConfig = Maps.newHashMap();
192-
hiveConfig.put(METASTOREURIS.varname, hiveConf.get(METASTOREURIS.varname));
182+
hiveConfig.put(METASTORE_URIS.varname, hiveConf.get(METASTORE_URIS.varname));
193183
hiveConfig.put(FS_DEFAULT_NAME_KEY, dfsConf.get(FS_DEFAULT_NAME_KEY));
194184
hiveConfig.put(HIVE_SERVER2_ENABLE_DOAS.varname, hiveConf.get(HIVE_SERVER2_ENABLE_DOAS.varname));
195185
hiveConfig.put(METASTORE_EXECUTE_SET_UGI.varname, hiveConf.get(METASTORE_EXECUTE_SET_UGI.varname));

0 commit comments

Comments
 (0)