diff --git a/.gitignore b/.gitignore index 91ad75bb4..08edd2660 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ *.class +classes/ target/ .classpath .project @@ -16,3 +17,7 @@ sentry-core/sentry-core-common/src/gen *.ear test-output/ maven-repo/ +*.orig +*.rej +.DS_Store +**/thirdparty/* diff --git a/LICENSE.txt b/LICENSE.txt index d64569567..c29b59dda 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -200,3 +200,70 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +================================================================================ + +The Apache Sentry (incubating) distribution includes the following sources/binaries. +The use of these sources/binaries is subject to the terms and conditions of +their respective licenses. + +For sentry-tests/sentry-tests-solr/src/test/resources/solr/collection1/conf/velocity/jquery.autocomplete.js: + +The MIT License (MIT) + +Copyright (c) 2007 Dylan Verheul, Dan G. Switzer, Anjesh Tuladhar, Jörn Zaefferer + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +For sentry-tests/sentry-tests-solr/src/test/resources/solr/collection1/conf/lang: +stopwords_ar.txt +stopwords_bg.txt +stopwords_fa.txt +stopwords_hi.txt +stopwords_ro.txt + +BSD License + +Copyright (c) 2005, Jacques Savoy. + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +For sentry-tests/sentry-tests-solr/src/test/resources/solr/collection1/conf/lang: +stopwords_da.txt +stopwords_de.txt +stopwords_es.txt +stopwords_fi.txt +stopwords_fr.txt +stopwords_hu.txt +stopwords_it.txt +stopwords_nl.txt +stopwords_no.txt +stopwords_pt.txt +stopwords_ru.txt +stopwords_sv.txt + +BSD License + +Copyright (c) 2001, Dr Martin Porter, and (for the Java developments) Copyright (c) 2002, Richard Boulton. + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/README.md b/README.md index 3aabc60a9..24701f53d 100644 --- a/README.md +++ b/README.md @@ -10,15 +10,16 @@ Bug and Issues tracker * https://issues.apache.org/jira/browse/SENTRY +Wiki + +* https://cwiki.apache.org/confluence/display/SENTRY/Home + Building Sentry Building Sentry requires the following tools: -* Apache Maven 3.0+ -* Java JDK 1.6+ - -Running hive end to end tests requires: -* wget +* Apache Maven 3.2.5+ (Might hit issues with pentaho library with older maven versions) +* Java JDK7 (can't access TBase errors with JDK8) To compile Sentry, run: @@ -26,7 +27,7 @@ mvn install -DskipTests To run Sentry tests, run: -mvn test -Pdownload-hadoop +mvn test To build a distribution, run: diff --git a/bin/sentryShell b/bin/sentryShell new file mode 100755 index 000000000..d21a65f7a --- /dev/null +++ b/bin/sentryShell @@ -0,0 +1,71 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +bin=`dirname "$0"` +myhome=`cd "$bin/.."; pwd` + +if [[ -z $SENTRY_HOME ]] ; then + export SENTRY_HOME=$myhome +fi + +# check for hadoop in the path +HADOOP_IN_PATH=`which hadoop 2>/dev/null` +if [ -f ${HADOOP_IN_PATH} ]; then + HADOOP_DIR=`dirname "$HADOOP_IN_PATH"`/.. +fi +# HADOOP_HOME env variable overrides hadoop in the path +HADOOP_HOME=${HADOOP_HOME:-${HADOOP_PREFIX:-$HADOOP_DIR}} +if [ "$HADOOP_HOME" == "" ]; then + echo "Cannot find hadoop installation: \$HADOOP_HOME or \$HADOOP_PREFIX must be set or hadoop must be in the path"; + exit 4; +fi + +HADOOP=$HADOOP_HOME/bin/hadoop +if [ ! -f ${HADOOP} ]; then + echo "Cannot find hadoop installation: \$HADOOP_HOME or \$HADOOP_PREFIX must be set or hadoop must be in the path"; + exit 4; +fi + +export _CMD_JAR=${SENTRY_SHELL_JAR:-sentry-provider-db-*.jar} +for f in ${SENTRY_HOME}/lib/*.jar; do + HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:${f} +done +export HADOOP_CLASSPATH + +for f in ${SENTRY_HOME}/lib/server/*.jar; do + HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:${f} +done +for f in ${SENTRY_HOME}/lib/plugins/*.jar; do + HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:${f} +done + +args=() +# get the type argument for the command, and check use the shell for hive model or for generic model. +# todo: currently, supoort hive only, need add generic model support +while [ $# -gt 0 ]; do # Until you run out of parameters . . . + if [[ "$1" = "-t" || "$1" = "--type" ]]; then + # currently, only support the hive model + if ! [[ $2 =~ ^[H|h][I|i][V|v][E|e]$ ]]; then + echo "Doesn't support the type $2!" + exit 1 + fi + fi + args+=" $1" + shift +done + +exec $HADOOP jar ${SENTRY_HOME}/lib/${_CMD_JAR} org.apache.sentry.provider.db.tools.SentryShellHive ${args[@]} diff --git a/build-tools/sentry-pmd-ruleset.xml b/build-tools/sentry-pmd-ruleset.xml new file mode 100644 index 000000000..8a2644693 --- /dev/null +++ b/build-tools/sentry-pmd-ruleset.xml @@ -0,0 +1,46 @@ + + + + + A PMD ruleset for Apache Sentry + + + + + + + + + + + + + + + + + + + + + + diff --git a/conf/sentry-site.xml.hive-client.example b/conf/sentry-site.xml.hive-client.example index fd87c62ce..c9f1d0588 100644 --- a/conf/sentry-site.xml.hive-client.example +++ b/conf/sentry-site.xml.hive-client.example @@ -55,7 +55,7 @@ sentry.service.server.principal - hivemeta/centos64.cloudera.com@HS2.CLOUDERA.COM + sentry/centos64.example.com@EXAMPLE.COM sentry.metastore.service.users diff --git a/conf/sentry-site.xml.hive-client.template b/conf/sentry-site.xml.hive-client.template index 0491de787..0e8a74ed9 100644 --- a/conf/sentry-site.xml.hive-client.template +++ b/conf/sentry-site.xml.hive-client.template @@ -86,7 +86,7 @@ sentry.hive.failure.hooks - Deprecated Name: hive.sentry.failure.hooks. Any failure hooks to be configured like navigator (i.e. com.cloudera.navigator.audit.hive.HiveSentryOnFailureHook) + Deprecated Name: hive.sentry.failure.hooks @@ -97,4 +97,4 @@ - \ No newline at end of file + diff --git a/dev-support/smart-apply-patch.sh b/dev-support/smart-apply-patch.sh old mode 100644 new mode 100755 diff --git a/dev-support/test-patch.py b/dev-support/test-patch.py index 7e701c393..f9f79eadc 100644 --- a/dev-support/test-patch.py +++ b/dev-support/test-patch.py @@ -88,7 +88,7 @@ def jira_post_comment(result, defect, branch, username, password): # hack (from hadoop) but REST api doesn't list attachments? def jira_get_attachment(result, defect, username, password): html = jira_get_defect_html(result, defect, username, password) - pattern = "(/secure/attachment/[0-9]+/%s[0-9\.\-]*\.(patch|txt|patch\.txt))" % (re.escape(defect)) + pattern = "(/secure/attachment/\d+/%s[\w\.\-]*\.(patch|txt|patch\.txt))" % (re.escape(defect)) matches = [] for match in re.findall(pattern, html, re.IGNORECASE): matches += [ match[0] ] @@ -282,6 +282,16 @@ def post_jira_comment_and_exit(): print "ERROR: No attachments found for %s" % (defect) sys.exit(1) result.attachment = attachment + # parse branch info + branchPattern = re.compile('/secure/attachment/\d+/%s(\.\d+)-(\S+)\.(patch|txt|patch.\txt)' % (re.escape(defect))) + try: + branchInfo = re.search(branchPattern,attachment) + if branchInfo: + branch = branchInfo.group(2) + print "INFO: Branch info is detected from attachment name: " + branch + except: + branch = "master" + print "INFO: Branch info is not detected from attachment name, use branch: " + branch patch_contents = jira_request(result, result.attachment, username, password, None, {}).read() patch_file = "%s/%s.patch" % (output_dir, defect) with open(patch_file, 'a') as fh: diff --git a/pom.xml b/pom.xml index 2f9788062..d25c314e0 100644 --- a/pom.xml +++ b/pom.xml @@ -20,12 +20,12 @@ limitations under the License. org.apache apache - 13 + 17 org.apache.sentry sentry - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT Sentry component Sentry pom @@ -50,49 +50,54 @@ limitations under the License. UTF-8 - 1.6 - 1.6 + ${basedir}/build-tools + 1.7 + 1.7 1.0b3 - 1.7 - 2.9 - 1.3.1 + 0.7.1.RELEASE 1.8 + 2.2 + 1.2 + 2.2 2.6 1.2 - 0.7.1.RELEASE - 3.3.0-release + 2.7.1 3.2.6 3.2.12 3.2.12 - 3.0.1 + 4.0.1 10.10.2.0 - 1.2 - 1.1.0 - 2.5.0-cdh5.2.0-SNAPSHOT + 3.0 1.4.1 11.0.2 - 4.9 - 0.9.2 - 0.9.2 - 2.6.0 + 2.6.0 + 1.3 + 1.3.0-SNAPSHOT + 1.1.0 + 1.8.8 + 3.0.1 + 7.6.16.v20140903 + 2.5 4.10 + 0.9.2 + 0.9.2 1.2.16 + 1.7 + 2.9 + 1.3.1 + 3.0.1 1.8.5 - 1.2.1 + 1.2 + 0.12.0 + 1.2.3 1.6.1 4.10.2 - 3.4.5 - 0.12.0 - 1.8.8 - 3.1.0 - 7.6.16.v20140903 - 2.6.0 - 2.5 + 1.99.6 ${maven.test.classpath} - 3.0 - 1.2 - 2.2 + 3.4.5 + 0.9.0.0 + 1.3.2 @@ -156,6 +161,16 @@ limitations under the License. org.apache.hadoop hadoop-common ${hadoop.version} + + + curator-client + org.apache.curator + + + curator-framework + org.apache.curator + + org.apache.hadoop @@ -172,6 +187,12 @@ limitations under the License. org.apache.hadoop hadoop-minicluster ${hadoop.version} + + + curator-client + org.apache.curator + + org.apache.hadoop @@ -232,6 +253,11 @@ limitations under the License. + + org.apache.sentry + solr-sentry-core + ${project.version} + org.apache.sentry solr-sentry-handlers @@ -312,6 +338,11 @@ limitations under the License. sentry-core-model-sqoop ${project.version} + + org.apache.sentry + sentry-core-model-kafka + ${project.version} + org.apache.hive hive-jdbc @@ -331,6 +362,12 @@ limitations under the License. org.apache.hive hive-exec ${hive.version} + + + apache-curator + org.apache.curator + + org.apache.hive @@ -357,6 +394,11 @@ limitations under the License. sentry-binding-hive ${project.version} + + org.apache.sentry + sentry-binding-hive-v2 + ${project.version} + org.apache.sentry sentry-binding-solr @@ -368,6 +410,16 @@ limitations under the License. ${project.version} test-jar + + org.apache.sentry + sentry-binding-sqoop + ${project.version} + + + org.apache.sentry + sentry-binding-kafka + ${project.version} + org.apache.sentry sentry-provider-common @@ -408,6 +460,12 @@ limitations under the License. sentry-provider-db ${project.version} + + org.apache.sentry + sentry-provider-db + ${project.version} + test-jar + org.apache.sentry sentry-policy-common @@ -428,6 +486,16 @@ limitations under the License. sentry-policy-search ${project.version} + + org.apache.sentry + sentry-policy-sqoop + ${project.version} + + + org.apache.sentry + sentry-policy-kafka + ${project.version} + org.apache.sentry sentry-dist @@ -514,6 +582,41 @@ limitations under the License. cglib-nodep ${cglib.version} + + org.apache.commons + commons-pool2 + ${commons-pool2.version} + + + org.apache.sqoop + sqoop-common + ${sqoop.version} + + + org.apache.sqoop + sqoop-security + ${sqoop.version} + + + org.apache.sqoop + sqoop-server + ${sqoop.version} + + + org.apache.sqoop + test + ${sqoop.version} + + + org.hamcrest + hamcrest-all + ${hamcrest.version} + + + org.apache.kafka + kafka_2.11 + ${kafka.version} + @@ -540,6 +643,34 @@ limitations under the License. org.apache.rat apache-rat-plugin + + org.apache.maven.plugins + maven-pmd-plugin + 3.5 + + + ${buildtools.dir}/sentry-pmd-ruleset.xml + + UTF-8 + true + false + true + true + ${targetJdk} + + ${basedir}/src/main/generated + + + + + validate + validate + + check + + + + org.apache.maven.plugins maven-eclipse-plugin @@ -605,7 +736,7 @@ limitations under the License. org.apache.felix maven-bundle-plugin - 2.4.0 + 2.5.4 org.apache.maven.plugins @@ -628,7 +759,7 @@ limitations under the License. org.apache.maven.plugins maven-compiler-plugin - 2.5.1 + 3.1 ${maven.compile.source} ${maven.compile.target} @@ -647,15 +778,15 @@ limitations under the License. org.apache.maven.plugins maven-surefire-plugin - 2.16 + 2.18 - always + 3 -Xmx1500m -Dhive.log.dir=./target/ 900 true - -Xms512m -Xmx2g + -Xms512m -Xmx2g -XX:MaxPermSize=256m true @@ -710,6 +841,11 @@ limitations under the License. **/upgrade.* **/datanucleus.log **/metastore_db/ + **/*.rej + **/thirdparty/ + + **/*.crt + **/*.jks @@ -736,26 +872,39 @@ limitations under the License. + + + nochecks + + true + + + + activate-buildtools-in-module + + + ${basedir}/../build-tools/sentry-pmd-ruleset.xml + + + + ${basedir}/../build-tools + + + + activate-buildtools-in-submodule + + + ${basedir}/../../build-tools/sentry-pmd-ruleset.xml + + + + ${basedir}/../../build-tools + + + + + - - cdh.repo - https://repository.cloudera.com/artifactory/cloudera-repos - Cloudera Repositories - - false - - - - cdh.snapshots.repo - https://repository.cloudera.com/artifactory/libs-snapshot-local - Cloudera Snapshots Repository - - true - - - false - - apache https://repository.apache.org/content/repositories/ diff --git a/sentry-binding/pom.xml b/sentry-binding/pom.xml index 7428aa5e6..9e4999bef 100644 --- a/sentry-binding/pom.xml +++ b/sentry-binding/pom.xml @@ -22,7 +22,7 @@ limitations under the License. org.apache.sentry sentry - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-binding @@ -31,7 +31,20 @@ limitations under the License. sentry-binding-hive + sentry-binding-kafka sentry-binding-solr + sentry-binding-sqoop + + + hive-authz2 + + false + + + sentry-binding-hive-v2 + + + diff --git a/sentry-binding/sentry-binding-hive-v2/pom.xml b/sentry-binding/sentry-binding-hive-v2/pom.xml new file mode 100644 index 000000000..ef6048cef --- /dev/null +++ b/sentry-binding/sentry-binding-hive-v2/pom.xml @@ -0,0 +1,158 @@ + + + + 4.0.0 + + + org.apache.sentry + sentry-binding + 1.7.0-incubating-SNAPSHOT + + + sentry-binding-hive-v2 + Sentry Binding v2 for Hive + + + + org.apache.sentry + sentry-binding-hive + + + org.apache.httpcomponents + httpclient + + + org.apache.httpcomponents + httpcore + + + + + org.apache.thrift + libthrift + + + org.apache.httpcomponents + httpclient + + + org.apache.httpcomponents + httpcore + + + + + org.apache.derby + derby + + + junit + junit + test + + + org.apache.hive + hive-exec + ${hive-v2.version} + provided + + + org.apache.hive + hive-service + ${hive-v2.version} + provided + + + org.apache.hive + hive-metastore + ${hive-v2.version} + provided + + + org.apache.hive + hive-shims + ${hive-v2.version} + provided + + + org.apache.hive + hive-serde + ${hive-v2.version} + provided + + + org.apache.hive + hive-common + ${hive-v2.version} + provided + + + org.apache.sentry + sentry-core-common + + + org.apache.sentry + sentry-core-model-db + + + org.apache.sentry + sentry-provider-common + + + + org.apache.sentry + sentry-provider-db + + + org.apache.hive + hive-beeline + + + org.apache.hive + hive-metastore + + + + + org.apache.sentry + sentry-provider-file + + + org.apache.sentry + sentry-policy-db + + + org.apache.hadoop + hadoop-common + provided + + + org.apache.hadoop + hadoop-client + ${hadoop.version} + provided + + + org.mockito + mockito-all + test + + + + diff --git a/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/HiveAuthzBindingHookV2.java b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/HiveAuthzBindingHookV2.java new file mode 100644 index 000000000..67cf2663a --- /dev/null +++ b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/HiveAuthzBindingHookV2.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.binding.hive.v2; + +import java.io.Serializable; +import java.util.List; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.exec.DDLTask; +import org.apache.hadoop.hive.ql.exec.SentryFilterDDLTask; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.AbstractSemanticAnalyzerHook; +import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.DDLWork; +import org.apache.hadoop.hive.ql.plan.HiveOperation; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.sentry.binding.hive.HiveAuthzBindingHook; +import org.apache.sentry.binding.hive.authz.HiveAuthzBinding; +import org.apache.sentry.binding.hive.conf.HiveAuthzConf; +import org.apache.sentry.core.common.Subject; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class HiveAuthzBindingHookV2 extends AbstractSemanticAnalyzerHook { + private static final Logger LOG = LoggerFactory + .getLogger(HiveAuthzBindingHookV2.class); + private final HiveAuthzBinding hiveAuthzBinding; + private final HiveAuthzConf authzConf; + + public HiveAuthzBindingHookV2() throws Exception { + SessionState session = SessionState.get(); + if(session == null) { + throw new IllegalStateException("Session has not been started"); + } + + HiveConf hiveConf = session.getConf(); + if(hiveConf == null) { + throw new IllegalStateException("Session HiveConf is null"); + } + authzConf = HiveAuthzBindingHook.loadAuthzConf(hiveConf); + hiveAuthzBinding = new HiveAuthzBinding(hiveConf, authzConf); + } + + @Override + public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast) + throws SemanticException { + return ast; + } + + /** + * Post analyze hook that invokes hive auth bindings + */ + @Override + public void postAnalyze(HiveSemanticAnalyzerHookContext context, + List> rootTasks) throws SemanticException { + HiveOperation stmtOperation = getCurrentHiveStmtOp(); + Subject subject = new Subject(context.getUserName()); + for (int i = 0; i < rootTasks.size(); i++) { + Task task = rootTasks.get(i); + if (task instanceof DDLTask) { + SentryFilterDDLTask filterTask = + new SentryFilterDDLTask(hiveAuthzBinding, subject, stmtOperation); + filterTask.setWork((DDLWork)task.getWork()); + rootTasks.set(i, filterTask); + } + } + } + + private HiveOperation getCurrentHiveStmtOp() { + SessionState sessState = SessionState.get(); + if (sessState == null) { + LOG.warn("SessionState is null"); + return null; + } + return sessState.getHiveOperation(); + } + +} diff --git a/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/HiveAuthzBindingSessionHookV2.java b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/HiveAuthzBindingSessionHookV2.java new file mode 100644 index 000000000..3fbb62662 --- /dev/null +++ b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/HiveAuthzBindingSessionHookV2.java @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.binding.hive.v2; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.mapreduce.JobContext; +import org.apache.hive.service.cli.HiveSQLException; +import org.apache.hive.service.cli.session.HiveSessionHookContext; +import org.apache.sentry.binding.hive.HiveAuthzBindingHook; +import org.apache.sentry.binding.hive.conf.HiveAuthzConf; + +import com.google.common.base.Joiner; + +/** + * The session hook is the Session Hook for HiveAuthzBindingSessionHookV2, The configuration of + * session will update for Hive Authz v2. + */ +public class HiveAuthzBindingSessionHookV2 implements + org.apache.hive.service.cli.session.HiveSessionHook { + public static final String SCRATCH_DIR_PERMISSIONS = "700"; + public static final String SEMANTIC_HOOK = HiveAuthzBindingHookV2.class.getName(); + public static final String ACCESS_RESTRICT_LIST = Joiner.on(",").join( + ConfVars.SEMANTIC_ANALYZER_HOOK.varname, ConfVars.PREEXECHOOKS.varname, + ConfVars.SCRATCHDIR.varname, ConfVars.LOCALSCRATCHDIR.varname, + ConfVars.METASTOREURIS.varname, ConfVars.METASTORECONNECTURLKEY.varname, + ConfVars.HADOOPBIN.varname, ConfVars.HIVESESSIONID.varname, ConfVars.HIVEAUXJARS.varname, + ConfVars.HIVESTATSDBCONNECTIONSTRING.varname, ConfVars.SCRATCHDIRPERMISSION.varname, + ConfVars.HIVE_SECURITY_COMMAND_WHITELIST.varname, + ConfVars.HIVE_AUTHORIZATION_TASK_FACTORY.varname, + ConfVars.HIVE_CAPTURE_TRANSFORM_ENTITY.varname, HiveAuthzConf.HIVE_ACCESS_CONF_URL, + HiveAuthzConf.HIVE_SENTRY_CONF_URL, HiveAuthzConf.HIVE_ACCESS_SUBJECT_NAME, + HiveAuthzConf.HIVE_SENTRY_SUBJECT_NAME, HiveAuthzConf.SENTRY_ACTIVE_ROLE_SET); + + /** + * The session hook for sentry authorization that sets the required session level configuration 1. + * Setup the sentry hooks - semantic, exec and filter hooks 2. Set additional config properties + * required for auth set HIVE_EXTENDED_ENITITY_CAPTURE = true set SCRATCHDIRPERMISSION = 700 3. + * Add sensitive config parameters to the config restrict list so that they can't be overridden by + * users + */ + @Override + public void run(HiveSessionHookContext sessionHookContext) throws HiveSQLException { + // Add sentry hooks to the session configuration + HiveConf sessionConf = sessionHookContext.getSessionConf(); + + appendConfVar(sessionConf, ConfVars.SEMANTIC_ANALYZER_HOOK.varname, SEMANTIC_HOOK); + // enable sentry authorization V2 + sessionConf.setBoolean(HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED.varname, true); + sessionConf.setBoolean(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname, false); + sessionConf.set(HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER.varname, + "org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator"); + + // grant all privileges for table to its owner + sessionConf.setVar(ConfVars.HIVE_AUTHORIZATION_TABLE_OWNER_GRANTS, ""); + + // Enable compiler to capture transform URI referred in the query + sessionConf.setBoolVar(ConfVars.HIVE_CAPTURE_TRANSFORM_ENTITY, true); + + // set security command list + HiveAuthzConf authzConf = HiveAuthzBindingHook.loadAuthzConf(sessionConf); + String commandWhitelist = + authzConf.get(HiveAuthzConf.HIVE_SENTRY_SECURITY_COMMAND_WHITELIST, + HiveAuthzConf.HIVE_SENTRY_SECURITY_COMMAND_WHITELIST_DEFAULT); + sessionConf.setVar(ConfVars.HIVE_SECURITY_COMMAND_WHITELIST, commandWhitelist); + + // set additional configuration properties required for auth + sessionConf.setVar(ConfVars.SCRATCHDIRPERMISSION, SCRATCH_DIR_PERMISSIONS); + + // setup restrict list + sessionConf.addToRestrictList(ACCESS_RESTRICT_LIST); + + // set user name + sessionConf.set(HiveAuthzConf.HIVE_ACCESS_SUBJECT_NAME, sessionHookContext.getSessionUser()); + sessionConf.set(HiveAuthzConf.HIVE_SENTRY_SUBJECT_NAME, sessionHookContext.getSessionUser()); + + // Set MR ACLs to session user + appendConfVar(sessionConf, JobContext.JOB_ACL_VIEW_JOB, sessionHookContext.getSessionUser()); + appendConfVar(sessionConf, JobContext.JOB_ACL_MODIFY_JOB, sessionHookContext.getSessionUser()); + } + + // Setup given sentry hooks + private void appendConfVar(HiveConf sessionConf, String confVar, String sentryConfVal) { + String currentValue = sessionConf.get(confVar, "").trim(); + if (currentValue.isEmpty()) { + currentValue = sentryConfVal; + } else { + currentValue = sentryConfVal + "," + currentValue; + } + sessionConf.set(confVar, currentValue); + } + +} diff --git a/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/SentryAuthorizerFactory.java b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/SentryAuthorizerFactory.java new file mode 100644 index 000000000..4a5cbcf85 --- /dev/null +++ b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/SentryAuthorizerFactory.java @@ -0,0 +1,164 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.binding.hive.v2; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizer; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizerFactory; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzSessionContext; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzSessionContext.CLIENT_TYPE; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveMetastoreClientFactory; +import org.apache.sentry.binding.hive.HiveAuthzBindingHook; +import org.apache.sentry.binding.hive.conf.HiveAuthzConf; +import org.apache.sentry.binding.hive.v2.authorizer.DefaultSentryAccessController; +import org.apache.sentry.binding.hive.v2.authorizer.DefaultSentryValidator; +import org.apache.sentry.binding.hive.v2.authorizer.SentryHiveAccessController; +import org.apache.sentry.binding.hive.v2.authorizer.SentryHiveAuthorizationValidator; +import org.apache.sentry.binding.hive.v2.authorizer.SentryHiveAuthorizer; + +import com.google.common.annotations.VisibleForTesting; + +public class SentryAuthorizerFactory implements HiveAuthorizerFactory { + public static final String HIVE_SENTRY_ACCESS_CONTROLLER = + "hive.security.sentry.access.controller"; + public static final String HIVE_SENTRY_AUTHORIZATION_CONTROLLER = + "hive.security.sentry.authorization.controller"; + private HiveAuthzConf authzConf; + + @Override + public HiveAuthorizer createHiveAuthorizer(HiveMetastoreClientFactory metastoreClientFactory, + HiveConf conf, HiveAuthenticationProvider authenticator, HiveAuthzSessionContext ctx) + throws HiveAuthzPluginException { + HiveAuthzSessionContext sessionContext; + try { + this.authzConf = HiveAuthzBindingHook.loadAuthzConf(conf); + sessionContext = applyTestSettings(ctx, conf); + assertHiveCliAuthDisabled(conf, sessionContext); + } catch (Exception e) { + throw new HiveAuthzPluginException(e); + } + SentryHiveAccessController accessController = + getAccessController(conf, authzConf, authenticator, sessionContext); + SentryHiveAuthorizationValidator authzValidator = + getAuthzValidator(conf, authzConf, authenticator); + + return new SentryHiveAuthorizer(accessController, authzValidator); + } + + private HiveAuthzSessionContext applyTestSettings(HiveAuthzSessionContext ctx, HiveConf conf) { + if (conf.getBoolVar(ConfVars.HIVE_TEST_AUTHORIZATION_SQLSTD_HS2_MODE) + && ctx.getClientType() == CLIENT_TYPE.HIVECLI) { + // create new session ctx object with HS2 as client type + HiveAuthzSessionContext.Builder ctxBuilder = new HiveAuthzSessionContext.Builder(ctx); + ctxBuilder.setClientType(CLIENT_TYPE.HIVESERVER2); + return ctxBuilder.build(); + } + return ctx; + } + + private void assertHiveCliAuthDisabled(HiveConf conf, HiveAuthzSessionContext ctx) + throws HiveAuthzPluginException { + if (ctx.getClientType() == CLIENT_TYPE.HIVECLI + && conf.getBoolVar(ConfVars.HIVE_AUTHORIZATION_ENABLED)) { + throw new HiveAuthzPluginException( + "SQL standards based authorization should not be enabled from hive cli" + + "Instead the use of storage based authorization in hive metastore is reccomended. Set " + + ConfVars.HIVE_AUTHORIZATION_ENABLED.varname + "=false to disable authz within cli"); + } + } + + /** + * just for testing + */ + @VisibleForTesting + protected HiveAuthorizer createHiveAuthorizer(HiveMetastoreClientFactory metastoreClientFactory, + HiveConf conf, HiveAuthzConf authzConf, HiveAuthenticationProvider authenticator, + HiveAuthzSessionContext ctx) throws HiveAuthzPluginException { + SentryHiveAccessController accessController = + getAccessController(conf, authzConf, authenticator, ctx); + SentryHiveAuthorizationValidator authzValidator = + getAuthzValidator(conf, authzConf, authenticator); + + return new SentryHiveAuthorizer(accessController, authzValidator); + } + + /** + * Get instance of SentryAccessController from configuration + * Default return DefaultSentryAccessController + * + * @param conf + * @param authzConf + * @param hiveAuthzBinding + * @param authenticator + * @throws HiveAuthzPluginException + */ + public static SentryHiveAccessController getAccessController(HiveConf conf, + HiveAuthzConf authzConf, HiveAuthenticationProvider authenticator, + HiveAuthzSessionContext ctx) throws HiveAuthzPluginException { + Class clazz = + conf.getClass(HIVE_SENTRY_ACCESS_CONTROLLER, DefaultSentryAccessController.class, + SentryHiveAccessController.class); + + if (clazz == null) { + // should not happen as default value is set + throw new HiveAuthzPluginException("Configuration value " + HIVE_SENTRY_ACCESS_CONTROLLER + + " is not set to valid SentryAccessController subclass"); + } + + try { + return new DefaultSentryAccessController(conf, authzConf, authenticator, ctx); + } catch (Exception e) { + throw new HiveAuthzPluginException(e); + } + + } + + /** + * Get instance of SentryAuthorizationValidator from configuration + * Default return DefaultSentryAuthorizationValidator + * + * @param conf + * @param authzConf + * @param authenticator + * @throws HiveAuthzPluginException + */ + public static SentryHiveAuthorizationValidator getAuthzValidator(HiveConf conf, + HiveAuthzConf authzConf, HiveAuthenticationProvider authenticator) + throws HiveAuthzPluginException { + Class clazz = + conf.getClass(HIVE_SENTRY_AUTHORIZATION_CONTROLLER, DefaultSentryValidator.class, + SentryHiveAuthorizationValidator.class); + + if (clazz == null) { + // should not happen as default value is set + throw new HiveAuthzPluginException("Configuration value " + + HIVE_SENTRY_AUTHORIZATION_CONTROLLER + + " is not set to valid SentryAuthorizationValidator subclass"); + } + + try { + return new DefaultSentryValidator(conf, authzConf, authenticator); + } catch (Exception e) { + throw new HiveAuthzPluginException(e); + } + + } +} diff --git a/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/SentryHiveAuthorizationTaskFactoryImplV2.java b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/SentryHiveAuthorizationTaskFactoryImplV2.java new file mode 100644 index 000000000..2d4bf6436 --- /dev/null +++ b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/SentryHiveAuthorizationTaskFactoryImplV2.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.sentry.binding.hive.v2; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.exec.SentryHivePrivilegeObjectDesc; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactoryImpl; +import org.apache.hadoop.hive.ql.plan.PrivilegeObjectDesc; + +public class SentryHiveAuthorizationTaskFactoryImplV2 extends HiveAuthorizationTaskFactoryImpl { + + public SentryHiveAuthorizationTaskFactoryImplV2(HiveConf conf, Hive db) { + super(conf, db); + } + + @Override + protected PrivilegeObjectDesc parsePrivObject(ASTNode ast) throws SemanticException { + SentryHivePrivilegeObjectDesc subject = new SentryHivePrivilegeObjectDesc(); + ASTNode child = (ASTNode) ast.getChild(0); + ASTNode gchild = (ASTNode) child.getChild(0); + if (child.getType() == HiveParser.TOK_TABLE_TYPE) { + subject.setTable(true); + String[] qualified = BaseSemanticAnalyzer.getQualifiedTableName(gchild); + subject.setObject(BaseSemanticAnalyzer.getDotName(qualified)); + } else if (child.getType() == HiveParser.TOK_URI_TYPE) { + subject.setUri(true); + subject.setObject(gchild.getText()); + } else if (child.getType() == HiveParser.TOK_SERVER_TYPE) { + subject.setServer(true); + subject.setObject(gchild.getText()); + } else { + subject.setTable(false); + subject.setObject(BaseSemanticAnalyzer.unescapeIdentifier(gchild.getText())); + } + // if partition spec node is present, set partition spec + for (int i = 1; i < child.getChildCount(); i++) { + gchild = (ASTNode) child.getChild(i); + if (gchild.getType() == HiveParser.TOK_PARTSPEC) { + subject.setPartSpec(DDLSemanticAnalyzer.getPartSpec(gchild)); + } else if (gchild.getType() == HiveParser.TOK_TABCOLNAME) { + subject.setColumns(BaseSemanticAnalyzer.getColumnNames(gchild)); + } + } + return subject; + } +} diff --git a/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/SentryHivePrivilegeObject.java b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/SentryHivePrivilegeObject.java new file mode 100644 index 000000000..62773855c --- /dev/null +++ b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/SentryHivePrivilegeObject.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.sentry.binding.hive.v2; + +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject; + +public class SentryHivePrivilegeObject extends HivePrivilegeObject { + + boolean isServer = false; + + boolean isUri = false; + + String objectName = ""; + + public SentryHivePrivilegeObject(HivePrivilegeObjectType type, String objectName) { + super(type, null, objectName); + } + +} diff --git a/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/authorizer/DefaultSentryAccessController.java b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/authorizer/DefaultSentryAccessController.java new file mode 100644 index 000000000..57de2ac5a --- /dev/null +++ b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/authorizer/DefaultSentryAccessController.java @@ -0,0 +1,553 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.sentry.binding.hive.v2.authorizer; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.apache.hadoop.hive.SentryHiveConstants; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.ql.metadata.AuthorizationException; +import org.apache.hadoop.hive.ql.plan.HiveOperation; +import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessControlException; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzSessionContext; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzSessionContext.CLIENT_TYPE; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrincipal; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrincipal.HivePrincipalType; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilege; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeInfo; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveRoleGrant; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.sentry.SentryUserException; +import org.apache.sentry.binding.hive.SentryOnFailureHookContext; +import org.apache.sentry.binding.hive.SentryOnFailureHookContextImpl; +import org.apache.sentry.binding.hive.authz.HiveAuthzBinding; +import org.apache.sentry.binding.hive.authz.HiveAuthzBinding.HiveHook; +import org.apache.sentry.binding.hive.conf.HiveAuthzConf; +import org.apache.sentry.binding.hive.conf.HiveAuthzConf.AuthzConfVars; +import org.apache.sentry.binding.hive.v2.util.SentryAuthorizerUtil; +import org.apache.sentry.core.common.ActiveRoleSet; +import org.apache.sentry.core.common.Authorizable; +import org.apache.sentry.core.model.db.AccessConstants; +import org.apache.sentry.core.model.db.DBModelAuthorizable; +import org.apache.sentry.core.model.db.Server; +import org.apache.sentry.provider.db.SentryAccessDeniedException; +import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient; +import org.apache.sentry.provider.db.service.thrift.TSentryPrivilege; +import org.apache.sentry.provider.db.service.thrift.TSentryRole; +import org.apache.sentry.service.thrift.SentryServiceClientFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Sets; + +public class DefaultSentryAccessController extends SentryHiveAccessController { + + public static final Logger LOG = LoggerFactory.getLogger(DefaultSentryAccessController.class); + + public static final String REQUIRED_AUTHZ_SERVER_NAME = "Config " + + AuthzConfVars.AUTHZ_SERVER_NAME.getVar() + " is required"; + + private HiveAuthenticationProvider authenticator; + private String serverName; + private HiveConf conf; + private HiveAuthzConf authzConf; + private HiveAuthzSessionContext ctx; + + private HiveHook hiveHook; + private HiveAuthzBinding hiveAuthzBinding; + protected SentryPolicyServiceClient sentryClient; + + + public DefaultSentryAccessController(HiveConf conf, HiveAuthzConf authzConf, + HiveAuthenticationProvider authenticator, HiveAuthzSessionContext ctx) throws Exception { + initilize(conf, authzConf, authenticator, ctx); + this.hiveHook = HiveHook.HiveServer2; + } + + public DefaultSentryAccessController(HiveHook hiveHook, HiveConf conf, HiveAuthzConf authzConf, + HiveAuthenticationProvider authenticator, HiveAuthzSessionContext ctx) throws Exception { + initilize(conf, authzConf, authenticator, ctx); + this.hiveHook = hiveHook; + } + + /** + * initialize authenticator and hiveAuthzBinding. + */ + protected void initilize(HiveConf conf, HiveAuthzConf authzConf, + HiveAuthenticationProvider authenticator, HiveAuthzSessionContext ctx) throws Exception { + Preconditions.checkNotNull(conf, "HiveConf cannot be null"); + Preconditions.checkNotNull(authzConf, "HiveAuthzConf cannot be null"); + Preconditions.checkNotNull(authenticator, "Hive authenticator provider cannot be null"); + Preconditions.checkNotNull(ctx, "HiveAuthzSessionContext cannot be null"); + + this.conf = conf; + this.authzConf = authzConf; + this.authenticator = authenticator; + this.ctx = ctx; + this.serverName = + Preconditions.checkNotNull(authzConf.get(AuthzConfVars.AUTHZ_SERVER_NAME.getVar()), + REQUIRED_AUTHZ_SERVER_NAME); + } + + @Override + public void createRole(String roleName, HivePrincipal adminGrantor) + throws HiveAuthzPluginException, HiveAccessControlException { + if (AccessConstants.RESERVED_ROLE_NAMES.contains(roleName.toUpperCase())) { + String msg = + "Roles cannot be one of the reserved roles: " + AccessConstants.RESERVED_ROLE_NAMES; + throw new HiveAccessControlException(msg); + } + try { + sentryClient = getSentryClient(); + sentryClient.createRole(authenticator.getUserName(), roleName); + } catch (SentryAccessDeniedException e) { + HiveOperation hiveOp = HiveOperation.CREATEROLE; + executeOnFailureHooks(hiveOp, e); + } catch (SentryUserException e) { + String msg = "Error occurred when Sentry client creating role: " + e.getMessage(); + executeOnErrorHooks(msg, e); + } finally { + if (sentryClient != null) { + sentryClient.close(); + } + } + } + + @Override + public void dropRole(String roleName) throws HiveAuthzPluginException, HiveAccessControlException { + if (AccessConstants.RESERVED_ROLE_NAMES.contains(roleName.toUpperCase())) { + String msg = + "Roles cannot be one of the reserved roles: " + AccessConstants.RESERVED_ROLE_NAMES; + throw new HiveAccessControlException(msg); + } + try { + sentryClient = getSentryClient(); + sentryClient.dropRole(authenticator.getUserName(), roleName); + } catch (SentryAccessDeniedException e) { + HiveOperation hiveOp = HiveOperation.DROPROLE; + executeOnFailureHooks(hiveOp, e); + } catch (SentryUserException e) { + String msg = "Error occurred when Sentry client creating role: " + e.getMessage(); + executeOnErrorHooks(msg, e); + } finally { + if (sentryClient != null) { + sentryClient.close(); + } + } + } + + @Override + public List getAllRoles() throws HiveAccessControlException, HiveAuthzPluginException { + List roles = new ArrayList(); + try { + sentryClient = getSentryClient(); + roles = convert2RoleList(sentryClient.listRoles(authenticator.getUserName())); + } catch (SentryAccessDeniedException e) { + HiveOperation hiveOp = HiveOperation.SHOW_ROLES; + executeOnFailureHooks(hiveOp, e); + } catch (SentryUserException e) { + String msg = "Error when sentryClient listRoles: " + e.getMessage(); + executeOnErrorHooks(msg, e); + } finally { + if (sentryClient != null) { + sentryClient.close(); + } + } + return roles; + } + + @Override + public void grantPrivileges(List hivePrincipals, + List hivePrivileges, HivePrivilegeObject hivePrivObject, + HivePrincipal grantorPrincipal, boolean grantOption) throws HiveAuthzPluginException, + HiveAccessControlException { + grantOrRevokePrivlegeOnRole(hivePrincipals, hivePrivileges, hivePrivObject, grantOption, true); + } + + @Override + public void revokePrivileges(List hivePrincipals, + List hivePrivileges, HivePrivilegeObject hivePrivObject, + HivePrincipal grantorPrincipal, boolean grantOption) throws HiveAuthzPluginException, + HiveAccessControlException { + grantOrRevokePrivlegeOnRole(hivePrincipals, hivePrivileges, hivePrivObject, grantOption, false); + } + + @Override + public void grantRole(List hivePrincipals, List roles, + boolean grantOption, HivePrincipal grantorPrinc) throws HiveAuthzPluginException, + HiveAccessControlException { + grantOrRevokeRoleOnGroup(hivePrincipals, roles, grantorPrinc, true); + } + + @Override + public void revokeRole(List hivePrincipals, List roles, + boolean grantOption, HivePrincipal grantorPrinc) throws HiveAuthzPluginException, + HiveAccessControlException { + grantOrRevokeRoleOnGroup(hivePrincipals, roles, grantorPrinc, false); + } + + + @Override + public List showPrivileges(HivePrincipal principal, HivePrivilegeObject privObj) + throws HiveAuthzPluginException, HiveAccessControlException { + if (principal.getType() != HivePrincipalType.ROLE) { + String msg = + SentryHiveConstants.GRANT_REVOKE_NOT_SUPPORTED_FOR_PRINCIPAL + principal.getType(); + throw new HiveAuthzPluginException(msg); + } + List infoList = new ArrayList(); + try { + sentryClient = getSentryClient(); + List> authorizables = + SentryAuthorizerUtil.getAuthzHierarchy(new Server(serverName), privObj); + Set tPrivilges = new HashSet(); + if (authorizables != null && !authorizables.isEmpty()) { + for (List authorizable : authorizables) { + tPrivilges.addAll(sentryClient.listPrivilegesByRoleName(authenticator.getUserName(), + principal.getName(), authorizable)); + } + } else { + tPrivilges.addAll(sentryClient.listPrivilegesByRoleName(authenticator.getUserName(), + principal.getName(), null)); + } + + if (tPrivilges != null && !tPrivilges.isEmpty()) { + for (TSentryPrivilege privilege : tPrivilges) { + infoList.add(SentryAuthorizerUtil.convert2HivePrivilegeInfo(privilege, principal)); + } + } + } catch (SentryAccessDeniedException e) { + HiveOperation hiveOp = HiveOperation.SHOW_GRANT; + executeOnFailureHooks(hiveOp, e); + } catch (SentryUserException e) { + String msg = "Error when sentryClient listPrivilegesByRoleName: " + e.getMessage(); + executeOnErrorHooks(msg, e); + } finally { + if (sentryClient != null) { + sentryClient.close(); + } + } + return infoList; + } + + @Override + public void setCurrentRole(String roleName) throws HiveAccessControlException, + HiveAuthzPluginException { + try { + sentryClient = getSentryClient(); + hiveAuthzBinding = new HiveAuthzBinding(hiveHook, conf, authzConf); + hiveAuthzBinding.setActiveRoleSet(roleName, + sentryClient.listUserRoles(authenticator.getUserName())); + } catch (SentryAccessDeniedException e) { + HiveOperation hiveOp = HiveOperation.GRANT_ROLE; + executeOnFailureHooks(hiveOp, e); + } catch (Exception e) { + String msg = "Error when sentryClient setCurrentRole: " + e.getMessage(); + executeOnErrorHooks(msg, e); + } finally { + if (sentryClient != null) { + sentryClient.close(); + } + if (hiveAuthzBinding != null) { + hiveAuthzBinding.close(); + } + } + } + + @Override + public List getCurrentRoleNames() throws HiveAuthzPluginException { + List roles = new ArrayList(); + try { + sentryClient = getSentryClient(); + hiveAuthzBinding = new HiveAuthzBinding(hiveHook, conf, authzConf); + ActiveRoleSet roleSet = hiveAuthzBinding.getActiveRoleSet(); + if (roleSet.isAll()) { + roles = convert2RoleList(sentryClient.listUserRoles(authenticator.getUserName())); + } else { + roles.addAll(roleSet.getRoles()); + } + } catch (Exception e) { + String msg = "Error when sentryClient listUserRoles: " + e.getMessage(); + executeOnErrorHooks(msg, e); + } finally { + if (sentryClient != null) { + sentryClient.close(); + } + if (hiveAuthzBinding != null) { + hiveAuthzBinding.close(); + } + } + return roles; + } + + @Override + public List getPrincipalGrantInfoForRole(String roleName) + throws HiveAuthzPluginException { + // TODO we will support in future + throw new HiveAuthzPluginException("Not supported of SHOW_ROLE_PRINCIPALS in Sentry"); + } + + @Override + public List getRoleGrantInfoForPrincipal(HivePrincipal principal) + throws HiveAccessControlException, HiveAuthzPluginException { + List hiveRoleGrants = new ArrayList(); + try { + sentryClient = getSentryClient(); + + if (principal.getType() != HivePrincipalType.GROUP) { + String msg = + SentryHiveConstants.GRANT_REVOKE_NOT_SUPPORTED_FOR_PRINCIPAL + principal.getType(); + throw new HiveAuthzPluginException(msg); + } + Set roles = + sentryClient.listRolesByGroupName(authenticator.getUserName(), principal.getName()); + if (roles != null && !roles.isEmpty()) { + for (TSentryRole role : roles) { + hiveRoleGrants.add(SentryAuthorizerUtil.convert2HiveRoleGrant(role)); + } + } + } catch (SentryAccessDeniedException e) { + HiveOperation hiveOp = HiveOperation.SHOW_ROLE_GRANT; + executeOnFailureHooks(hiveOp, e); + } catch (SentryUserException e) { + String msg = "Error when sentryClient listRolesByGroupName: " + e.getMessage(); + executeOnErrorHooks(msg, e); + } finally { + if (sentryClient != null) { + sentryClient.close(); + } + } + return hiveRoleGrants; + } + + @Override + public void applyAuthorizationConfigPolicy(HiveConf hiveConf) throws HiveAuthzPluginException { + // Apply rest of the configuration only to HiveServer2 + if (ctx.getClientType() != CLIENT_TYPE.HIVESERVER2 + || !hiveConf.getBoolVar(ConfVars.HIVE_AUTHORIZATION_ENABLED)) { + throw new HiveAuthzPluginException("Sentry just support for hiveserver2"); + } + } + + /** + * Grant(isGrant is true) or revoke(isGrant is false) db privileges to/from role via sentryClient, + * which is a instance of SentryPolicyServiceClientV2 + * + * @param hivePrincipals + * @param hivePrivileges + * @param hivePrivObject + * @param grantOption + * @param isGrant + */ + private void grantOrRevokePrivlegeOnRole(List hivePrincipals, + List hivePrivileges, HivePrivilegeObject hivePrivObject, boolean grantOption, + boolean isGrant) throws HiveAuthzPluginException, HiveAccessControlException { + try { + sentryClient = getSentryClient(); + + for (HivePrincipal principal : hivePrincipals) { + // Sentry only support grant privilege to ROLE + if (principal.getType() != HivePrincipalType.ROLE) { + String msg = + SentryHiveConstants.GRANT_REVOKE_NOT_SUPPORTED_FOR_PRINCIPAL + principal.getType(); + throw new HiveAuthzPluginException(msg); + } + for (HivePrivilege privilege : hivePrivileges) { + String grantorName = authenticator.getUserName(); + String roleName = principal.getName(); + String action = SentryAuthorizerUtil.convert2SentryAction(privilege); + List columnNames = privilege.getColumns(); + Boolean grantOp = null; + if (isGrant) { + grantOp = grantOption; + } + + switch (hivePrivObject.getType()) { + case GLOBAL: + if (isGrant) { + sentryClient.grantServerPrivilege(grantorName, roleName, + hivePrivObject.getObjectName(), action, grantOp); + } else { + sentryClient.revokeServerPrivilege(grantorName, roleName, + hivePrivObject.getObjectName(), action, grantOp); + } + break; + case DATABASE: + if (isGrant) { + sentryClient.grantDatabasePrivilege(grantorName, roleName, serverName, + hivePrivObject.getDbname(), action, grantOp); + } else { + sentryClient.revokeDatabasePrivilege(grantorName, roleName, serverName, + hivePrivObject.getDbname(), action, grantOp); + } + break; + case TABLE_OR_VIEW: + // For column level security + if (columnNames != null && !columnNames.isEmpty()) { + if (action.equalsIgnoreCase(AccessConstants.INSERT) + || action.equalsIgnoreCase(AccessConstants.ALL)) { + String msg = + SentryHiveConstants.PRIVILEGE_NOT_SUPPORTED + privilege.getName() + + " on Column"; + throw new HiveAuthzPluginException(msg); + } + if (isGrant) { + sentryClient.grantColumnsPrivileges(grantorName, roleName, serverName, + hivePrivObject.getDbname(), hivePrivObject.getObjectName(), columnNames, + action, grantOp); + } else { + sentryClient.revokeColumnsPrivilege(grantorName, roleName, serverName, + hivePrivObject.getDbname(), hivePrivObject.getObjectName(), columnNames, + action, grantOp); + } + } else { + if (isGrant) { + sentryClient.grantTablePrivilege(grantorName, roleName, serverName, + hivePrivObject.getDbname(), hivePrivObject.getObjectName(), action, grantOp); + } else { + sentryClient.revokeTablePrivilege(grantorName, roleName, serverName, + hivePrivObject.getDbname(), hivePrivObject.getObjectName(), action, grantOp); + } + } + break; + case LOCAL_URI: + case DFS_URI: + String uRIString = hivePrivObject.getObjectName().replace("'", "").replace("\"", ""); + if (isGrant) { + sentryClient.grantURIPrivilege(grantorName, roleName, serverName, + uRIString, grantOp); + } else { + sentryClient.revokeURIPrivilege(grantorName, roleName, serverName, + uRIString, grantOp); + } + break; + case FUNCTION: + case PARTITION: + case COLUMN: + case COMMAND_PARAMS: + // not support these type + throw new HiveAuthzPluginException(hivePrivObject.getType().name() + + " are not supported in sentry"); + default: + break; + } + } + } + } catch (SentryAccessDeniedException e) { + HiveOperation hiveOp = + isGrant ? HiveOperation.GRANT_PRIVILEGE : HiveOperation.REVOKE_PRIVILEGE; + executeOnFailureHooks(hiveOp, e); + } catch (SentryUserException e) { + String msg = "Error when sentryClient grant/revoke privilege:" + e.getMessage(); + executeOnErrorHooks(msg, e); + } finally { + if (sentryClient != null) { + sentryClient.close(); + } + } + } + + /** + * Grant(isGrant is true) or revoke(isGrant is false) role to/from group via sentryClient, which + * is a instance of SentryPolicyServiceClientV2 + * + * @param hivePrincipals + * @param roles + * @param grantorPrinc + * @param isGrant + */ + private void grantOrRevokeRoleOnGroup(List hivePrincipals, List roles, + HivePrincipal grantorPrinc, boolean isGrant) throws HiveAuthzPluginException, + HiveAccessControlException { + try { + sentryClient = getSentryClient(); + // get principals + Set groups = Sets.newHashSet(); + for (HivePrincipal principal : hivePrincipals) { + if (principal.getType() != HivePrincipalType.GROUP) { + String msg = + SentryHiveConstants.GRANT_REVOKE_NOT_SUPPORTED_FOR_PRINCIPAL + principal.getType(); + throw new HiveAuthzPluginException(msg); + } + groups.add(principal.getName()); + } + + // grant/revoke role to/from principals + for (String roleName : roles) { + if (isGrant) { + sentryClient.grantRoleToGroups(grantorPrinc.getName(), roleName, groups); + } else { + sentryClient.revokeRoleFromGroups(grantorPrinc.getName(), roleName, groups); + } + } + + } catch (SentryAccessDeniedException e) { + HiveOperation hiveOp = isGrant ? HiveOperation.GRANT_ROLE : HiveOperation.REVOKE_ROLE; + executeOnFailureHooks(hiveOp, e); + } catch (SentryUserException e) { + String msg = "Error when sentryClient grant/revoke role:" + e.getMessage(); + executeOnErrorHooks(msg, e); + } finally { + if (sentryClient != null) { + sentryClient.close(); + } + } + } + + private void executeOnFailureHooks(HiveOperation hiveOp, SentryAccessDeniedException e) + throws HiveAccessControlException { + SentryOnFailureHookContext hookCtx = + new SentryOnFailureHookContextImpl(SessionState.get().getCmd(), null, null, hiveOp, null, + null, null, null, authenticator.getUserName(), null, new AuthorizationException(e), + authzConf); + SentryAuthorizerUtil.executeOnFailureHooks(hookCtx, authzConf); + throw new HiveAccessControlException(e.getMessage(), e); + } + + private void executeOnErrorHooks(String msg, Exception e) throws HiveAuthzPluginException { + LOG.error(msg, e); + throw new HiveAuthzPluginException(msg, e); + } + + private List convert2RoleList(Set roleSet) { + List roles = new ArrayList(); + if (roleSet != null && !roleSet.isEmpty()) { + for (TSentryRole tRole : roleSet) { + roles.add(tRole.getRoleName()); + } + } + return roles; + } + + private SentryPolicyServiceClient getSentryClient() throws HiveAuthzPluginException { + try { + Preconditions.checkNotNull(authzConf, "HiveAuthConf cannot be null"); + return SentryServiceClientFactory.create(authzConf); + } catch (Exception e) { + String msg = "Error occurred when creating Sentry client: " + e.getMessage(); + throw new HiveAuthzPluginException(msg, e); + } + } + + +} diff --git a/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/authorizer/DefaultSentryValidator.java b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/authorizer/DefaultSentryValidator.java new file mode 100644 index 000000000..70e0720c9 --- /dev/null +++ b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/authorizer/DefaultSentryValidator.java @@ -0,0 +1,479 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.sentry.binding.hive.v2.authorizer; + +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; + +import java.security.CodeSource; +import java.util.ArrayList; +import java.util.EnumSet; +import java.util.List; +import java.util.Set; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.metadata.AuthorizationException; +import org.apache.hadoop.hive.ql.plan.HiveOperation; +import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessControlException; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzContext; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject.HivePrivilegeObjectType; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.sentry.binding.hive.SentryOnFailureHookContext; +import org.apache.sentry.binding.hive.SentryOnFailureHookContextImpl; +import org.apache.sentry.binding.hive.authz.HiveAuthzBinding; +import org.apache.sentry.binding.hive.authz.HiveAuthzBinding.HiveHook; +import org.apache.sentry.binding.hive.authz.HiveAuthzPrivileges; +import org.apache.sentry.binding.hive.authz.HiveAuthzPrivileges.HiveOperationScope; +import org.apache.sentry.binding.hive.authz.HiveAuthzPrivilegesMap; +import org.apache.sentry.binding.hive.conf.HiveAuthzConf; +import org.apache.sentry.binding.hive.v2.util.SentryAuthorizerUtil; +import org.apache.sentry.binding.hive.v2.util.SimpleSemanticAnalyzer; +import org.apache.sentry.core.common.Subject; +import org.apache.sentry.core.model.db.AccessURI; +import org.apache.sentry.core.model.db.Column; +import org.apache.sentry.core.model.db.DBModelAction; +import org.apache.sentry.core.model.db.DBModelAuthorizable; +import org.apache.sentry.core.model.db.DBModelAuthorizable.AuthorizableType; +import org.apache.sentry.core.model.db.Database; +import org.apache.sentry.core.model.db.Table; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.collect.Sets; + +/** + * This class used to do authorization. Check if current user has privileges to do the operation. + */ +public class DefaultSentryValidator extends SentryHiveAuthorizationValidator { + + public static final Logger LOG = LoggerFactory.getLogger(DefaultSentryValidator.class); + + protected HiveConf conf; + protected HiveAuthzConf authzConf; + protected HiveAuthenticationProvider authenticator; + + public DefaultSentryValidator(HiveConf conf, HiveAuthzConf authzConf, + HiveAuthenticationProvider authenticator) throws Exception { + initilize(conf, authzConf, authenticator); + this.hiveHook = HiveHook.HiveServer2; + } + + public DefaultSentryValidator(HiveHook hiveHook, HiveConf conf, HiveAuthzConf authzConf, + HiveAuthenticationProvider authenticator) throws Exception { + initilize(conf, authzConf, authenticator); + this.hiveHook = hiveHook; + } + + /** + * initialize authenticator and hiveAuthzBinding. + */ + protected void initilize(HiveConf conf, HiveAuthzConf authzConf, + HiveAuthenticationProvider authenticator) throws Exception { + Preconditions.checkNotNull(conf, "HiveConf cannot be null"); + Preconditions.checkNotNull(authzConf, "HiveAuthzConf cannot be null"); + Preconditions.checkNotNull(authenticator, "Hive authenticator provider cannot be null"); + this.conf = conf; + this.authzConf = authzConf; + this.authenticator = authenticator; + } + + private HiveHook hiveHook; + + // all operations need to extend at DB scope + private static final Set EX_DB_ALL = Sets.newHashSet(HiveOperation.DROPDATABASE, + HiveOperation.CREATETABLE, HiveOperation.IMPORT, HiveOperation.DESCDATABASE, + HiveOperation.ALTERTABLE_RENAME, HiveOperation.LOCKDB, HiveOperation.UNLOCKDB); + // input operations need to extend at DB scope + private static final Set EX_DB_INPUT = Sets.newHashSet(HiveOperation.DROPDATABASE, + HiveOperation.DESCDATABASE, HiveOperation.ALTERTABLE_RENAME, HiveOperation.LOCKDB, + HiveOperation.UNLOCKDB); + + // all operations need to extend at Table scope + private static final Set EX_TB_ALL = Sets.newHashSet(HiveOperation.DROPTABLE, + HiveOperation.DROPVIEW, HiveOperation.DESCTABLE, HiveOperation.SHOW_TBLPROPERTIES, + HiveOperation.SHOWINDEXES, HiveOperation.ALTERTABLE_PROPERTIES, + HiveOperation.ALTERTABLE_SERDEPROPERTIES, HiveOperation.ALTERTABLE_CLUSTER_SORT, + HiveOperation.ALTERTABLE_FILEFORMAT, HiveOperation.ALTERTABLE_TOUCH, + HiveOperation.ALTERTABLE_PROTECTMODE, HiveOperation.ALTERTABLE_RENAMECOL, + HiveOperation.ALTERTABLE_ADDCOLS, HiveOperation.ALTERTABLE_REPLACECOLS, + HiveOperation.ALTERTABLE_RENAMEPART, HiveOperation.ALTERTABLE_ARCHIVE, + HiveOperation.ALTERTABLE_UNARCHIVE, HiveOperation.ALTERTABLE_SERIALIZER, + HiveOperation.ALTERTABLE_MERGEFILES, HiveOperation.ALTERTABLE_SKEWED, + HiveOperation.ALTERTABLE_DROPPARTS, HiveOperation.ALTERTABLE_ADDPARTS, + HiveOperation.ALTERTABLE_RENAME, HiveOperation.ALTERTABLE_LOCATION, + HiveOperation.ALTERVIEW_PROPERTIES, HiveOperation.ALTERPARTITION_FILEFORMAT, + HiveOperation.ALTERPARTITION_PROTECTMODE, HiveOperation.ALTERPARTITION_SERDEPROPERTIES, + HiveOperation.ALTERPARTITION_SERIALIZER, HiveOperation.ALTERPARTITION_MERGEFILES, + HiveOperation.ALTERPARTITION_LOCATION, HiveOperation.ALTERTBLPART_SKEWED_LOCATION, + HiveOperation.MSCK, HiveOperation.ALTERINDEX_REBUILD, HiveOperation.LOCKTABLE, + HiveOperation.UNLOCKTABLE, HiveOperation.SHOWCOLUMNS, HiveOperation.SHOW_TABLESTATUS, HiveOperation.LOAD); + // input operations need to extend at Table scope + private static final Set EX_TB_INPUT = Sets.newHashSet(HiveOperation.DROPTABLE, + HiveOperation.DROPVIEW, HiveOperation.SHOW_TBLPROPERTIES, HiveOperation.SHOWINDEXES, + HiveOperation.ALTERINDEX_REBUILD, HiveOperation.LOCKTABLE, HiveOperation.UNLOCKTABLE, + HiveOperation.SHOW_TABLESTATUS); + private static final Set META_TB_INPUT = Sets.newHashSet(HiveOperation.DESCTABLE, + HiveOperation.SHOWCOLUMNS); + + /** + * Check if current user has privileges to perform given operation type hiveOpType on the given + * input and output objects + * + * @param hiveOpType + * @param inputHObjs + * @param outputHObjs + * @param context + * @throws SentryAccessControlException + */ + @Override + public void checkPrivileges(HiveOperationType hiveOpType, List inputHObjs, + List outputHObjs, HiveAuthzContext context) + throws HiveAuthzPluginException, HiveAccessControlException { + if (LOG.isDebugEnabled()) { + String msg = + "Checking privileges for operation " + hiveOpType + " by user " + + authenticator.getUserName() + " on " + " input objects " + inputHObjs + + " and output objects " + outputHObjs + ". Context Info: " + context; + LOG.debug(msg); + } + + HiveOperation hiveOp = SentryAuthorizerUtil.convert2HiveOperation(hiveOpType.name()); + HiveAuthzPrivileges stmtAuthPrivileges = null; + if (HiveOperation.DESCTABLE.equals(hiveOp) && + !(context.getCommandString().contains("EXTENDED") || context.getCommandString().contains("FORMATTED")) ) { + stmtAuthPrivileges = HiveAuthzPrivilegesMap.getHiveAuthzPrivileges(HiveOperation.SHOWCOLUMNS); + } else { + stmtAuthPrivileges = HiveAuthzPrivilegesMap.getHiveAuthzPrivileges(hiveOp); + } + + HiveAuthzBinding hiveAuthzBinding = null; + try { + hiveAuthzBinding = getAuthzBinding(); + if (stmtAuthPrivileges == null) { + // We don't handle authorizing this statement + return; + } + + List> inputHierarchyList = + SentryAuthorizerUtil.convert2SentryPrivilegeList(hiveAuthzBinding.getAuthServer(), + inputHObjs); + List> outputHierarchyList = + SentryAuthorizerUtil.convert2SentryPrivilegeList(hiveAuthzBinding.getAuthServer(), + outputHObjs); + + // Workaround for metadata queries + addExtendHierarchy(hiveOp, stmtAuthPrivileges, inputHierarchyList, outputHierarchyList, + context.getCommandString(), hiveAuthzBinding); + + hiveAuthzBinding.authorize(hiveOp, stmtAuthPrivileges, + new Subject(authenticator.getUserName()), inputHierarchyList, outputHierarchyList); + } catch (AuthorizationException e) { + Database db = null; + Table tab = null; + AccessURI udfURI = null; + AccessURI partitionURI = null; + if (outputHObjs != null) { + for (HivePrivilegeObject obj : outputHObjs) { + switch (obj.getType()) { + case DATABASE: + db = new Database(obj.getObjectName()); + break; + case TABLE_OR_VIEW: + db = new Database(obj.getDbname()); + tab = new Table(obj.getObjectName()); + break; + case PARTITION: + db = new Database(obj.getDbname()); + tab = new Table(obj.getObjectName()); + case LOCAL_URI: + case DFS_URI: + } + } + } + String permsRequired = ""; + SentryOnFailureHookContext hookCtx = + new SentryOnFailureHookContextImpl(context.getCommandString(), null, null, hiveOp, db, + tab, udfURI, partitionURI, authenticator.getUserName(), context.getIpAddress(), e, + authzConf); + SentryAuthorizerUtil.executeOnFailureHooks(hookCtx, authzConf); + for (String perm : hiveAuthzBinding.getLastQueryPrivilegeErrors()) { + permsRequired += perm + ";"; + } + SessionState.get().getConf().set(HiveAuthzConf.HIVE_SENTRY_AUTH_ERRORS, permsRequired); + String msg = + HiveAuthzConf.HIVE_SENTRY_PRIVILEGE_ERROR_MESSAGE + + "\n Required privileges for this query: " + permsRequired; + throw new HiveAccessControlException(msg, e); + } catch (Exception e) { + throw new HiveAuthzPluginException(e.getClass()+ ": " + e.getMessage(), e); + } finally { + if (hiveAuthzBinding != null) { + hiveAuthzBinding.close(); + } + } + + if ("true".equalsIgnoreCase(SessionState.get().getConf() + .get(HiveAuthzConf.HIVE_SENTRY_MOCK_COMPILATION))) { + throw new HiveAccessControlException(HiveAuthzConf.HIVE_SENTRY_MOCK_ERROR + + " Mock query compilation aborted. Set " + HiveAuthzConf.HIVE_SENTRY_MOCK_COMPILATION + + " to 'false' for normal query processing"); + } + } + + @VisibleForTesting + public HiveAuthzBinding getAuthzBinding() throws Exception { + return new HiveAuthzBinding(hiveHook, conf, authzConf); + } + + private void addExtendHierarchy(HiveOperation hiveOp, HiveAuthzPrivileges stmtAuthPrivileges, + List> inputHierarchyList, + List> outputHierarchyList, String command, + HiveAuthzBinding hiveAuthzBinding) throws HiveAuthzPluginException, + HiveAccessControlException { + String currDatabase = null; + switch (stmtAuthPrivileges.getOperationScope()) { + case SERVER: + // validate server level privileges if applicable. Eg create UDF,register jar etc .. + List serverHierarchy = new ArrayList(); + serverHierarchy.add(hiveAuthzBinding.getAuthServer()); + inputHierarchyList.add(serverHierarchy); + break; + case DATABASE: + // workaround for metadata queries. + if (EX_DB_ALL.contains(hiveOp)) { + SimpleSemanticAnalyzer analyzer = new SimpleSemanticAnalyzer(hiveOp, command); + currDatabase = analyzer.getCurrentDb(); + + List externalAuthorizableHierarchy = + new ArrayList(); + externalAuthorizableHierarchy.add(hiveAuthzBinding.getAuthServer()); + externalAuthorizableHierarchy.add(new Database(currDatabase)); + + if (EX_DB_INPUT.contains(hiveOp)) { + inputHierarchyList.add(externalAuthorizableHierarchy); + } else { + outputHierarchyList.add(externalAuthorizableHierarchy); + } + } + break; + case TABLE: + case COLUMN: + // workaround for drop table/view. + if (EX_TB_ALL.contains(hiveOp)) { + SimpleSemanticAnalyzer analyzer = new SimpleSemanticAnalyzer(hiveOp, command); + currDatabase = analyzer.getCurrentDb(); + String currTable = analyzer.getCurrentTb(); + + List externalAuthorizableHierarchy = + new ArrayList(); + externalAuthorizableHierarchy.add(hiveAuthzBinding.getAuthServer()); + externalAuthorizableHierarchy.add(new Database(currDatabase)); + externalAuthorizableHierarchy.add(new Table(currTable)); + + if (EX_TB_INPUT.contains(hiveOp)) { + inputHierarchyList.add(externalAuthorizableHierarchy); + } else if (META_TB_INPUT.contains(hiveOp)) { + externalAuthorizableHierarchy.add(Column.SOME); + inputHierarchyList.add(externalAuthorizableHierarchy); + } else { + outputHierarchyList.add(externalAuthorizableHierarchy); + } + } + break; + case FUNCTION: + if (hiveOp.equals(HiveOperation.CREATEFUNCTION)) { + SimpleSemanticAnalyzer analyzer = new SimpleSemanticAnalyzer(hiveOp, command); + currDatabase = analyzer.getCurrentDb(); + String udfClassName = analyzer.getCurrentTb(); + try { + CodeSource udfSrc = Class.forName(udfClassName).getProtectionDomain().getCodeSource(); + if (udfSrc == null) { + throw new HiveAuthzPluginException("Could not resolve the jar for UDF class " + + udfClassName); + } + String udfJar = udfSrc.getLocation().getPath(); + if (udfJar == null || udfJar.isEmpty()) { + throw new HiveAuthzPluginException("Could not find the jar for UDF class " + + udfClassName + "to validate privileges"); + } + AccessURI udfURI = SentryAuthorizerUtil.parseURI(udfSrc.getLocation().toString(), true); + List udfUriHierarchy = new ArrayList(); + udfUriHierarchy.add(hiveAuthzBinding.getAuthServer()); + udfUriHierarchy.add(udfURI); + inputHierarchyList.add(udfUriHierarchy); + } catch (Exception e) { + throw new HiveAuthzPluginException("Error retrieving udf class", e); + } + } + break; + case CONNECT: + /* + * The 'CONNECT' is an implicit privilege scope currently used for - USE It's allowed + * when the user has any privilege on the current database. For application backward + * compatibility, we allow (optional) implicit connect permission on 'default' db. + */ + List connectHierarchy = new ArrayList(); + connectHierarchy.add(hiveAuthzBinding.getAuthServer()); + if (hiveOp.equals(HiveOperation.SWITCHDATABASE)) { + currDatabase = command.split(" ")[1]; + } + // by default allow connect access to default db + Table currTbl = Table.ALL; + Database currDB = new Database(currDatabase); + Column currCol = Column.ALL; + if (DEFAULT_DATABASE_NAME.equalsIgnoreCase(currDatabase) && "false" + .equalsIgnoreCase(authzConf.get( + HiveAuthzConf.AuthzConfVars.AUTHZ_RESTRICT_DEFAULT_DB.getVar(), "false"))) { + currDB = Database.ALL; + currTbl = Table.SOME; + } + + connectHierarchy.add(currDB); + connectHierarchy.add(currTbl); + connectHierarchy.add(currCol); + + inputHierarchyList.add(connectHierarchy); + break; + } + } + + @Override + public List filterListCmdObjects(List listObjs, + HiveAuthzContext context) { + if (listObjs != null && listObjs.size() >= 1) { + HivePrivilegeObjectType pType = listObjs.get(0).getType(); + HiveAuthzBinding hiveAuthzBinding = null; + try { + switch (pType) { + case DATABASE: + hiveAuthzBinding = getAuthzBinding(); + listObjs = filterShowDatabases(listObjs, authenticator.getUserName(), hiveAuthzBinding); + break; + case TABLE_OR_VIEW: + hiveAuthzBinding = getAuthzBinding(); + listObjs = filterShowTables(listObjs, authenticator.getUserName(), hiveAuthzBinding); + break; + } + } catch (Exception e) { + LOG.debug(e.getMessage(),e); + } finally { + if (hiveAuthzBinding != null) { + hiveAuthzBinding.close(); + } + } + } + return listObjs; + } + + private List filterShowTables(List listObjs, + String userName, HiveAuthzBinding hiveAuthzBinding) { + List filteredResult = new ArrayList(); + Subject subject = new Subject(userName); + HiveAuthzPrivileges tableMetaDataPrivilege = + new HiveAuthzPrivileges.AuthzPrivilegeBuilder() + .addInputObjectPriviledge(AuthorizableType.Column, + EnumSet.of(DBModelAction.SELECT, DBModelAction.INSERT)) + .setOperationScope(HiveOperationScope.TABLE) + .setOperationType( + org.apache.sentry.binding.hive.authz.HiveAuthzPrivileges.HiveOperationType.INFO) + .build(); + + for (HivePrivilegeObject obj : listObjs) { + // if user has privileges on table, add to filtered list, else discard + Table table = new Table(obj.getObjectName()); + Database database; + database = new Database(obj.getDbname()); + + List> inputHierarchy = new ArrayList>(); + List> outputHierarchy = new ArrayList>(); + List externalAuthorizableHierarchy = + new ArrayList(); + externalAuthorizableHierarchy.add(hiveAuthzBinding.getAuthServer()); + externalAuthorizableHierarchy.add(database); + externalAuthorizableHierarchy.add(table); + externalAuthorizableHierarchy.add(Column.ALL); + inputHierarchy.add(externalAuthorizableHierarchy); + + try { + hiveAuthzBinding.authorize(HiveOperation.SHOWTABLES, tableMetaDataPrivilege, subject, + inputHierarchy, outputHierarchy); + filteredResult.add(obj); + } catch (AuthorizationException e) { + // squash the exception, user doesn't have privileges, so the table is + // not added to + // filtered list. + } + } + return filteredResult; + } + + private List filterShowDatabases(List listObjs, + String userName, HiveAuthzBinding hiveAuthzBinding) { + List filteredResult = new ArrayList(); + Subject subject = new Subject(userName); + HiveAuthzPrivileges anyPrivilege = + new HiveAuthzPrivileges.AuthzPrivilegeBuilder() + .addInputObjectPriviledge( + AuthorizableType.Column, + EnumSet.of(DBModelAction.SELECT, DBModelAction.INSERT, DBModelAction.ALTER, + DBModelAction.CREATE, DBModelAction.DROP, DBModelAction.INDEX, + DBModelAction.LOCK)) + .setOperationScope(HiveOperationScope.CONNECT) + .setOperationType( + org.apache.sentry.binding.hive.authz.HiveAuthzPrivileges.HiveOperationType.QUERY) + .build(); + + for (HivePrivilegeObject obj : listObjs) { + // if user has privileges on database, add to filtered list, else discard + Database database = null; + + // if default is not restricted, continue + if (DEFAULT_DATABASE_NAME.equalsIgnoreCase(obj.getObjectName()) + && "false".equalsIgnoreCase(hiveAuthzBinding.getAuthzConf().get( + HiveAuthzConf.AuthzConfVars.AUTHZ_RESTRICT_DEFAULT_DB.getVar(), "false"))) { + filteredResult.add(obj); + continue; + } + + database = new Database(obj.getObjectName()); + + List> inputHierarchy = new ArrayList>(); + List> outputHierarchy = new ArrayList>(); + List externalAuthorizableHierarchy = + new ArrayList(); + externalAuthorizableHierarchy.add(hiveAuthzBinding.getAuthServer()); + externalAuthorizableHierarchy.add(database); + externalAuthorizableHierarchy.add(Table.ALL); + externalAuthorizableHierarchy.add(Column.ALL); + inputHierarchy.add(externalAuthorizableHierarchy); + + try { + hiveAuthzBinding.authorize(HiveOperation.SHOWDATABASES, anyPrivilege, subject, + inputHierarchy, outputHierarchy); + filteredResult.add(obj); + } catch (AuthorizationException e) { + // squash the exception, user doesn't have privileges, so the table is + // not added to + // filtered list. + } + } + return filteredResult; + } +} diff --git a/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/authorizer/SentryHiveAccessController.java b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/authorizer/SentryHiveAccessController.java new file mode 100644 index 000000000..26fdac803 --- /dev/null +++ b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/authorizer/SentryHiveAccessController.java @@ -0,0 +1,200 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.sentry.binding.hive.v2.authorizer; + +import java.util.List; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessControlException; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessController; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrincipal; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilege; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeInfo; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveRoleGrant; + +/** + * Abstract class to do access control commands, e.g. grant/revoke privileges, grant/revoke role, + * create/drop role. + */ +public abstract class SentryHiveAccessController implements HiveAccessController { + + /** + * Hive statement: Grant privilege GRANT priv_type [, priv_type ] ... ON table_or_view_name TO + * principal_specification [, principal_specification] ... [WITH GRANT OPTION]; + * principal_specification : USER user | ROLE role + * + * priv_type : INSERT | SELECT | UPDATE | DELETE | ALL + * + * @param hivePrincipals + * @param hivePrivileges + * @param hivePrivObject + * @param grantorPrincipal + * @param grantOption + * @throws HiveAuthzPluginException, HiveAccessControlException + */ + @Override + public abstract void grantPrivileges(List hivePrincipals, + List hivePrivileges, HivePrivilegeObject hivePrivObject, + HivePrincipal grantorPrincipal, boolean grantOption) throws HiveAuthzPluginException, + HiveAccessControlException; + + /** + * Hive statement: Revoke privilege REVOKE priv_type [, priv_type ] ... ON table_or_view_name FROM + * principal_specification [, principal_specification] ... ; + * + * principal_specification : USER user | ROLE role + * + * priv_type : INSERT | SELECT | UPDATE | DELETE | ALL + * + * @param hivePrincipals + * @param hivePrivileges + * @param hivePrivObject + * @param grantorPrincipal + * @param grantOption + * @throws HiveAuthzPluginException, HiveAccessControlException + */ + @Override + public abstract void revokePrivileges(List hivePrincipals, + List hivePrivileges, HivePrivilegeObject hivePrivObject, + HivePrincipal grantorPrincipal, boolean grantOption) throws HiveAuthzPluginException, + HiveAccessControlException; + + /** + * Hive statement: Create role CREATE ROLE role_name; + * + * @param roleName + * @param adminGrantor + * @throws HiveAuthzPluginException, HiveAccessControlException + */ + @Override + public abstract void createRole(String roleName, HivePrincipal adminGrantor) + throws HiveAuthzPluginException, HiveAccessControlException; + + /** + * Hive statement: Drop role DROP ROLE role_name; + * + * @param roleName + * @throws HiveAuthzPluginException, HiveAccessControlException + */ + @Override + public abstract void dropRole(String roleName) throws HiveAuthzPluginException, + HiveAccessControlException; + + /** + * Hive statement: Grant role GRANT role_name [, role_name] ... TO principal_specification [, + * principal_specification] ... [ WITH ADMIN OPTION ]; + * + * principal_specification : USER user | ROLE role + * + * @param hivePrincipals + * @param roles + * @param grantOption + * @param grantorPrinc + * @throws HiveAuthzPluginException, HiveAccessControlException + */ + @Override + public abstract void grantRole(List hivePrincipals, List roles, + boolean grantOption, HivePrincipal grantorPrinc) throws HiveAuthzPluginException, + HiveAccessControlException; + + + /** + * Hive statement: Revoke role REVOKE [ADMIN OPTION FOR] role_name [, role_name] ... FROM + * principal_specification [, principal_specification] ... ; + * + * principal_specification : USER user | ROLE role + * + * @param hivePrincipals + * @param roles + * @param grantOption + * @param grantorPrinc + * @throws HiveAuthzPluginException, HiveAccessControlException + */ + @Override + public abstract void revokeRole(List hivePrincipals, List roles, + boolean grantOption, HivePrincipal grantorPrinc) throws HiveAuthzPluginException, + HiveAccessControlException; + + /** + * Hive statement: Show roles SHOW ROLES; + * + * @throws HiveAuthzPluginException, HiveAccessControlException + */ + @Override + public abstract List getAllRoles() throws HiveAuthzPluginException, + HiveAccessControlException; + + /** + * Hive statement: Show grant SHOW GRANT [principal_name] ON (ALL| ([TABLE] table_or_view_name); + * + * @param principal + * @param privObj + * @throws HiveAuthzPluginException, HiveAccessControlException + */ + @Override + public abstract List showPrivileges(HivePrincipal principal, + HivePrivilegeObject privObj) throws HiveAuthzPluginException, HiveAccessControlException; + + /** + * Hive statement: Set role SET ROLE (role_name|ALL); + * + * @param roleName + * @throws HiveAuthzPluginException, HiveAccessControlException + */ + @Override + public abstract void setCurrentRole(String roleName) throws HiveAuthzPluginException, + HiveAccessControlException; + + /** + * Hive statement: Show current roles SHOW CURRENT ROLES; + * + * @throws HiveAuthzPluginException + */ + @Override + public abstract List getCurrentRoleNames() throws HiveAuthzPluginException; + + /** + * Hive statement: Set role privileges SHOW PRINCIPALS role_name; + * + * @param roleName + * @throws HiveAuthzPluginException, HiveAccessControlException + */ + @Override + public abstract List getPrincipalGrantInfoForRole(String roleName) + throws HiveAuthzPluginException, HiveAccessControlException; + + /** + * Hive statement: Set role grant SHOW ROLE GRANT (USER|ROLE) principal_name; + * + * @param principal + * @throws HiveAuthzPluginException, HiveAccessControlException + */ + @Override + public abstract List getRoleGrantInfoForPrincipal(HivePrincipal principal) + throws HiveAuthzPluginException, HiveAccessControlException; + + /** + * Apply configuration files for authorization V2 + * + * @param hiveConf + * @throws HiveAuthzPluginException + */ + @Override + public abstract void applyAuthorizationConfigPolicy(HiveConf hiveConf) + throws HiveAuthzPluginException; + +} diff --git a/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/authorizer/SentryHiveAuthorizationValidator.java b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/authorizer/SentryHiveAuthorizationValidator.java new file mode 100644 index 000000000..7bf7b8722 --- /dev/null +++ b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/authorizer/SentryHiveAuthorizationValidator.java @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.sentry.binding.hive.v2.authorizer; + +import java.util.List; + +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessControlException; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizationValidator; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzContext; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject; + +/** + * This class used to do authorization validate. Check if current user has privileges to do the + * operation and filter the select results. + */ +public abstract class SentryHiveAuthorizationValidator implements HiveAuthorizationValidator { + + /** + * Check if current user has privileges to perform given operation type hiveOpType on the given + * input and output objects. + * + * @param hiveOpType + * @param inputHObjs + * @param outputHObjs + * @param context + * @throws HiveAuthzPluginException, HiveAccessControlException + */ + @Override + public abstract void checkPrivileges(HiveOperationType hiveOpType, + List inputHObjs, List outputHObjs, + HiveAuthzContext context) throws HiveAuthzPluginException, HiveAccessControlException; + + + /** + * Filter the select results according current user's permission. remove the object which current + * user do not have any privilege on it. + * + * @param listObjs + * @param context + */ + @Override + public abstract List filterListCmdObjects( + List listObjs, HiveAuthzContext context); +} diff --git a/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/authorizer/SentryHiveAuthorizer.java b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/authorizer/SentryHiveAuthorizer.java new file mode 100644 index 000000000..14b952f55 --- /dev/null +++ b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/authorizer/SentryHiveAuthorizer.java @@ -0,0 +1,192 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.sentry.binding.hive.v2.authorizer; + +import java.util.List; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.exec.SentryHivePrivilegeObjectDesc; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.PrivilegeObjectDesc; +import org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationTranslator; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessControlException; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizationTranslator; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizer; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzContext; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrincipal; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilege; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeInfo; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject.HivePrivilegeObjectType; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveRoleGrant; +import org.apache.sentry.binding.hive.v2.SentryHivePrivilegeObject; + +/** + * Convenience implementation of HiveAuthorizer. You can customize the behavior by passing different + * implementations of {@link SentryHiveAccessController} and + * {@link SentryHiveAuthorizationValidator} to constructor. + */ +public class SentryHiveAuthorizer implements HiveAuthorizer { + + private SentryHiveAccessController accessController; + private SentryHiveAuthorizationValidator authValidator; + static private HiveAuthorizationTranslator hiveTranslator = + new SentryHiveAuthorizationTranslator(); + + public SentryHiveAuthorizer(SentryHiveAccessController accessController, + SentryHiveAuthorizationValidator authValidator) { + this.accessController = accessController; + this.authValidator = authValidator; + } + + @Override + public void grantPrivileges(List hivePrincipals, + List hivePrivileges, HivePrivilegeObject hivePrivObject, + HivePrincipal grantorPrincipal, boolean grantOption) throws HiveAuthzPluginException, + HiveAccessControlException { + accessController.grantPrivileges(hivePrincipals, hivePrivileges, hivePrivObject, + grantorPrincipal, grantOption); + } + + @Override + public void revokePrivileges(List hivePrincipals, + List hivePrivileges, HivePrivilegeObject hivePrivObject, + HivePrincipal grantorPrincipal, boolean grantOption) throws HiveAuthzPluginException, + HiveAccessControlException { + accessController.revokePrivileges(hivePrincipals, hivePrivileges, hivePrivObject, + grantorPrincipal, grantOption); + } + + @Override + public void createRole(String roleName, HivePrincipal adminGrantor) + throws HiveAuthzPluginException, HiveAccessControlException { + accessController.createRole(roleName, adminGrantor); + } + + @Override + public void dropRole(String roleName) throws HiveAuthzPluginException, HiveAccessControlException { + accessController.dropRole(roleName); + } + + @Override + public void grantRole(List hivePrincipals, List roles, + boolean grantOption, HivePrincipal grantorPrinc) throws HiveAuthzPluginException, + HiveAccessControlException { + accessController.grantRole(hivePrincipals, roles, grantOption, grantorPrinc); + } + + @Override + public void revokeRole(List hivePrincipals, List roles, + boolean grantOption, HivePrincipal grantorPrinc) throws HiveAuthzPluginException, + HiveAccessControlException { + accessController.revokeRole(hivePrincipals, roles, grantOption, grantorPrinc); + } + + @Override + public void checkPrivileges(HiveOperationType hiveOpType, List inputHObjs, + List outputHObjs, HiveAuthzContext context) + throws HiveAuthzPluginException, HiveAccessControlException { + authValidator.checkPrivileges(hiveOpType, inputHObjs, outputHObjs, context); + } + + @Override + public List getAllRoles() throws HiveAuthzPluginException, HiveAccessControlException { + return accessController.getAllRoles(); + } + + @Override + public List showPrivileges(HivePrincipal principal, HivePrivilegeObject privObj) + throws HiveAuthzPluginException, HiveAccessControlException { + return accessController.showPrivileges(principal, privObj); + } + + @Override + public VERSION getVersion() { + return VERSION.V1; + } + + @Override + public void setCurrentRole(String roleName) throws HiveAccessControlException, + HiveAuthzPluginException { + accessController.setCurrentRole(roleName); + } + + @Override + public List getCurrentRoleNames() throws HiveAuthzPluginException { + return accessController.getCurrentRoleNames(); + } + + @Override + public List getPrincipalGrantInfoForRole(String roleName) + throws HiveAuthzPluginException, HiveAccessControlException { + return accessController.getPrincipalGrantInfoForRole(roleName); + } + + @Override + public List getRoleGrantInfoForPrincipal(HivePrincipal principal) + throws HiveAuthzPluginException, HiveAccessControlException { + return accessController.getRoleGrantInfoForPrincipal(principal); + } + + @Override + public void applyAuthorizationConfigPolicy(HiveConf hiveConf) throws HiveAuthzPluginException { + accessController.applyAuthorizationConfigPolicy(hiveConf); + } + + @Override + public List filterListCmdObjects(List listObjs, + HiveAuthzContext context) throws HiveAuthzPluginException, HiveAccessControlException { + return authValidator.filterListCmdObjects(listObjs, context); + } + + protected static HivePrivilegeObjectType getPrivObjectType( + SentryHivePrivilegeObjectDesc privSubjectDesc) { + if (privSubjectDesc.getObject() == null) { + return null; + } + if (privSubjectDesc.getServer()) { + return HivePrivilegeObjectType.GLOBAL; + } else if (privSubjectDesc.getUri()) { + return HivePrivilegeObjectType.LOCAL_URI; + } else { + return privSubjectDesc.getTable() ? HivePrivilegeObjectType.TABLE_OR_VIEW + : HivePrivilegeObjectType.DATABASE; + } + } + + @Override + public Object getHiveAuthorizationTranslator() throws HiveAuthzPluginException { + return hiveTranslator; + } + + private static class SentryHiveAuthorizationTranslator extends DefaultHiveAuthorizationTranslator { + + @Override + public HivePrivilegeObject getHivePrivilegeObject(PrivilegeObjectDesc privSubjectDesc) + throws HiveException { + if (privSubjectDesc != null && privSubjectDesc instanceof SentryHivePrivilegeObjectDesc) { + SentryHivePrivilegeObjectDesc sPrivSubjectDesc = + (SentryHivePrivilegeObjectDesc) privSubjectDesc; + if (sPrivSubjectDesc.isSentryPrivObjectDesc()) { + HivePrivilegeObjectType objectType = getPrivObjectType(sPrivSubjectDesc); + return new SentryHivePrivilegeObject(objectType, privSubjectDesc.getObject()); + } + } + return super.getHivePrivilegeObject(privSubjectDesc); + } + } +} diff --git a/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/metastore/AuthorizingObjectStoreV2.java b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/metastore/AuthorizingObjectStoreV2.java new file mode 100644 index 000000000..726f5ad81 --- /dev/null +++ b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/metastore/AuthorizingObjectStoreV2.java @@ -0,0 +1,412 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.binding.hive.v2.metastore; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URL; +import java.util.List; +import java.util.Set; + +import javax.security.auth.login.LoginException; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.ObjectStore; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.Index; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.UnknownDBException; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.HiveOperation; +import org.apache.hadoop.hive.shims.Utils; +import org.apache.sentry.binding.hive.HiveAuthzBindingHook; +import org.apache.sentry.binding.hive.authz.HiveAuthzBinding; +import org.apache.sentry.binding.hive.conf.HiveAuthzConf; +import org.apache.sentry.binding.hive.conf.HiveAuthzConf.AuthzConfVars; + +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; + +/*** + * This class is the wrapper of ObjectStore which is the interface between the + * application logic and the database store. Do the authorization or filter the + * result when processing the metastore request. + * eg: + * Callers will only receive the objects back which they have privileges to + * access. + * If there is a request for the object list(like getAllTables()), the result + * will be filtered to exclude object the requestor doesn't have privilege to + * access. + */ +public class AuthorizingObjectStoreV2 extends ObjectStore { + private static ImmutableSet serviceUsers; + private static HiveConf hiveConf; + private static HiveAuthzConf authzConf; + private static HiveAuthzBinding hiveAuthzBinding; + private static String NO_ACCESS_MESSAGE_TABLE = "Table does not exist or insufficient privileges to access: "; + private static String NO_ACCESS_MESSAGE_DATABASE = "Database does not exist or insufficient privileges to access: "; + + @Override + public List getDatabases(String pattern) throws MetaException { + return filterDatabases(super.getDatabases(pattern)); + } + + @Override + public List getAllDatabases() throws MetaException { + return filterDatabases(super.getAllDatabases()); + } + + @Override + public Database getDatabase(String name) throws NoSuchObjectException { + Database db = super.getDatabase(name); + try { + if (filterDatabases(Lists.newArrayList(name)).isEmpty()) { + throw new NoSuchObjectException(getNoAccessMessageForDB(name)); + } + } catch (MetaException e) { + throw new NoSuchObjectException("Failed to authorized access to " + name + + " : " + e.getMessage()); + } + return db; + } + + @Override + public Table getTable(String dbName, String tableName) throws MetaException { + Table table = super.getTable(dbName, tableName); + if (table == null + || filterTables(dbName, Lists.newArrayList(tableName)).isEmpty()) { + return null; + } + return table; + } + + @Override + public Partition getPartition(String dbName, String tableName, + List part_vals) throws MetaException, NoSuchObjectException { + if (filterTables(dbName, Lists.newArrayList(tableName)).isEmpty()) { + throw new NoSuchObjectException(getNoAccessMessageForTable(dbName, tableName)); + } + return super.getPartition(dbName, tableName, part_vals); + } + + @Override + public List getPartitions(String dbName, String tableName, + int maxParts) throws MetaException, NoSuchObjectException { + if (filterTables(dbName, Lists.newArrayList(tableName)).isEmpty()) { + throw new MetaException(getNoAccessMessageForTable(dbName, tableName)); + } + return super.getPartitions(dbName, tableName, maxParts); + } + + @Override + public List getTables(String dbName, String pattern) + throws MetaException { + return filterTables(dbName, super.getTables(dbName, pattern)); + } + + @Override + public List getTableObjectsByName(String dbname, List tableNames) + throws MetaException, UnknownDBException { + return super.getTableObjectsByName(dbname, filterTables(dbname, tableNames)); + } + + @Override + public List getAllTables(String dbName) throws MetaException { + return filterTables(dbName, super.getAllTables(dbName)); + } + + @Override + public List listTableNamesByFilter(String dbName, String filter, + short maxTables) throws MetaException { + return filterTables(dbName, + super.listTableNamesByFilter(dbName, filter, maxTables)); + } + + @Override + public List listPartitionNames(String dbName, String tableName, + short max_parts) throws MetaException { + if (filterTables(dbName, Lists.newArrayList(tableName)).isEmpty()) { + throw new MetaException(getNoAccessMessageForTable(dbName, tableName)); + } + return super.listPartitionNames(dbName, tableName, max_parts); + } + + @Override + public List listPartitionNamesByFilter(String dbName, + String tableName, String filter, short max_parts) throws MetaException { + if (filterTables(dbName, Lists.newArrayList(tableName)).isEmpty()) { + throw new MetaException(getNoAccessMessageForTable(dbName, tableName)); + } + return super.listPartitionNamesByFilter(dbName, tableName, filter, + max_parts); + } + + @Override + public Index getIndex(String dbName, String origTableName, String indexName) + throws MetaException { + if (filterTables(dbName, Lists.newArrayList(origTableName)).isEmpty()) { + throw new MetaException(getNoAccessMessageForTable(dbName, origTableName)); + } + return super.getIndex(dbName, origTableName, indexName); + } + + @Override + public List getIndexes(String dbName, String origTableName, int max) + throws MetaException { + if (filterTables(dbName, Lists.newArrayList(origTableName)).isEmpty()) { + throw new MetaException(getNoAccessMessageForTable(dbName, origTableName)); + } + return super.getIndexes(dbName, origTableName, max); + } + + @Override + public List listIndexNames(String dbName, String origTableName, + short max) throws MetaException { + if (filterTables(dbName, Lists.newArrayList(origTableName)).isEmpty()) { + throw new MetaException(getNoAccessMessageForTable(dbName, origTableName)); + } + return super.listIndexNames(dbName, origTableName, max); + } + + @Override + public List getPartitionsByFilter(String dbName, + String tblName, String filter, short maxParts) throws MetaException, + NoSuchObjectException { + if (filterTables(dbName, Lists.newArrayList(tblName)).isEmpty()) { + throw new MetaException(getNoAccessMessageForTable(dbName, tblName)); + } + return super.getPartitionsByFilter(dbName, tblName, filter, maxParts); + } + + @Override + public List getPartitionsByNames(String dbName, String tblName, + List partNames) throws MetaException, NoSuchObjectException { + if (filterTables(dbName, Lists.newArrayList(tblName)).isEmpty()) { + throw new MetaException(getNoAccessMessageForTable(dbName, tblName)); + } + return super.getPartitionsByNames(dbName, tblName, partNames); + } + + @Override + public Partition getPartitionWithAuth(String dbName, String tblName, + List partVals, String user_name, List group_names) + throws MetaException, NoSuchObjectException, InvalidObjectException { + if (filterTables(dbName, Lists.newArrayList(tblName)).isEmpty()) { + throw new MetaException(getNoAccessMessageForTable(dbName, tblName)); + } + return super.getPartitionWithAuth(dbName, tblName, partVals, user_name, + group_names); + } + + @Override + public List getPartitionsWithAuth(String dbName, String tblName, + short maxParts, String userName, List groupNames) + throws MetaException, InvalidObjectException { + if (filterTables(dbName, Lists.newArrayList(tblName)).isEmpty()) { + throw new MetaException(getNoAccessMessageForTable(dbName, tblName)); + } + return super.getPartitionsWithAuth(dbName, tblName, maxParts, userName, + groupNames); + } + + @Override + public List listPartitionNamesPs(String dbName, String tblName, + List part_vals, short max_parts) throws MetaException, + NoSuchObjectException { + if (filterTables(dbName, Lists.newArrayList(tblName)).isEmpty()) { + throw new MetaException(getNoAccessMessageForTable(dbName, tblName)); + } + return super.listPartitionNamesPs(dbName, tblName, part_vals, max_parts); + } + + @Override + public List listPartitionsPsWithAuth(String dbName, + String tblName, List part_vals, short max_parts, String userName, + List groupNames) throws MetaException, InvalidObjectException, + NoSuchObjectException { + if (filterTables(dbName, Lists.newArrayList(tblName)).isEmpty()) { + throw new MetaException(getNoAccessMessageForTable(dbName, tblName)); + } + return super.listPartitionsPsWithAuth(dbName, tblName, part_vals, + max_parts, userName, groupNames); + } + + @Override + public ColumnStatistics getTableColumnStatistics(String dbName, + String tableName, List colNames) throws MetaException, + NoSuchObjectException { + if (filterTables(dbName, Lists.newArrayList(tableName)).isEmpty()) { + throw new MetaException(getNoAccessMessageForTable(dbName, tableName)); + } + return super.getTableColumnStatistics(dbName, tableName, colNames); + } + + @Override + public List getPartitionColumnStatistics( + String dbName, String tblName, List partNames, + List colNames) throws MetaException, NoSuchObjectException { + if (filterTables(dbName, Lists.newArrayList(tblName)).isEmpty()) { + throw new MetaException(getNoAccessMessageForTable(dbName, tblName)); + } + return super.getPartitionColumnStatistics(dbName, tblName, partNames, + colNames); + } + + /** + * Invoke Hive database filtering that removes the entries which use has no + * privileges to access + * @param dbList + * @return + * @throws MetaException + */ + private List filterDatabases(List dbList) + throws MetaException { + if (needsAuthorization(getUserName())) { + try { + return HiveAuthzBindingHook.filterShowDatabases(getHiveAuthzBinding(), + dbList, HiveOperation.SHOWDATABASES, getUserName()); + } catch (SemanticException e) { + throw new MetaException("Error getting DB list " + e.getMessage()); + } + } else { + return dbList; + } + } + + /** + * Invoke Hive table filtering that removes the entries which use has no + * privileges to access + * @param dbList + * @return + * @throws MetaException + */ + protected List filterTables(String dbName, List tabList) + throws MetaException { + if (needsAuthorization(getUserName())) { + try { + return HiveAuthzBindingHook.filterShowTables(getHiveAuthzBinding(), + tabList, HiveOperation.SHOWTABLES, getUserName(), dbName); + } catch (SemanticException e) { + throw new MetaException("Error getting Table list " + e.getMessage()); + } + } else { + return tabList; + } + } + + /** + * load Hive auth provider + * + * @return + * @throws MetaException + */ + private HiveAuthzBinding getHiveAuthzBinding() throws MetaException { + if (hiveAuthzBinding == null) { + try { + hiveAuthzBinding = new HiveAuthzBinding(HiveAuthzBinding.HiveHook.HiveMetaStore, + getHiveConf(), getAuthzConf()); + } catch (Exception e) { + throw new MetaException("Failed to load Hive binding " + e.getMessage()); + } + } + return hiveAuthzBinding; + } + + private ImmutableSet getServiceUsers() throws MetaException { + if (serviceUsers == null) { + serviceUsers = ImmutableSet.copyOf(toTrimed(Sets.newHashSet(getAuthzConf().getStrings( + AuthzConfVars.AUTHZ_METASTORE_SERVICE_USERS.getVar(), new String[] { "" })))); + } + return serviceUsers; + } + + private HiveConf getHiveConf() { + if (hiveConf == null) { + hiveConf = new HiveConf(getConf(), this.getClass()); + } + return hiveConf; + } + + private HiveAuthzConf getAuthzConf() throws MetaException { + if (authzConf == null) { + String hiveAuthzConf = getConf().get(HiveAuthzConf.HIVE_SENTRY_CONF_URL); + if (hiveAuthzConf == null + || (hiveAuthzConf = hiveAuthzConf.trim()).isEmpty()) { + throw new MetaException("Configuration key " + + HiveAuthzConf.HIVE_SENTRY_CONF_URL + " value '" + hiveAuthzConf + + "' is invalid."); + } + try { + authzConf = new HiveAuthzConf(new URL(hiveAuthzConf)); + } catch (MalformedURLException e) { + throw new MetaException("Configuration key " + + HiveAuthzConf.HIVE_SENTRY_CONF_URL + + " specifies a malformed URL '" + hiveAuthzConf + "' " + + e.getMessage()); + } + } + return authzConf; + } + + /** + * Extract the user from underlying auth subsystem + * @return + * @throws MetaException + */ + private String getUserName() throws MetaException { + try { + return Utils.getUGI().getShortUserName(); + } catch (LoginException e) { + throw new MetaException("Failed to get username " + e.getMessage()); + } catch (IOException e) { + throw new MetaException("Failed to get username " + e.getMessage()); + } + } + + /** + * Check if the give user needs to be validated. + * @param userName + * @return + */ + private boolean needsAuthorization(String userName) throws MetaException { + return !getServiceUsers().contains(userName.trim()); + } + + private static Set toTrimed(Set s) { + Set result = Sets.newHashSet(); + for (String v : s) { + result.add(v.trim()); + } + return result; + } + + protected String getNoAccessMessageForTable(String dbName, String tableName) { + return NO_ACCESS_MESSAGE_TABLE + "<" + dbName + ">.<" + tableName + ">"; + } + + private String getNoAccessMessageForDB(String dbName) { + return NO_ACCESS_MESSAGE_DATABASE + "<" + dbName + ">"; + } +} diff --git a/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/metastore/MetastoreAuthzBindingV2.java b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/metastore/MetastoreAuthzBindingV2.java new file mode 100644 index 000000000..d9374910e --- /dev/null +++ b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/metastore/MetastoreAuthzBindingV2.java @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.binding.hive.v2.metastore; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent; +import org.apache.hadoop.hive.ql.plan.HiveOperation; +import org.apache.sentry.binding.metastore.MetastoreAuthzBinding; + +/** + * Sentry binding for Hive Metastore. The binding is integrated into Metastore + * via the pre-event listener which are fired prior to executing the metadata + * action. This point we are only authorizing metadata writes since the listners + * are not fired from read events. Each action builds a input and output + * hierarchy as per the objects used in the given operations. This is then + * passed down to the hive binding which handles the authorization. This ensures + * that we follow the same privilege model and policies. + */ +public class MetastoreAuthzBindingV2 extends MetastoreAuthzBinding { + + public MetastoreAuthzBindingV2(Configuration config) throws Exception { + super(config); + } + + protected void authorizeDropPartition(PreDropPartitionEvent context) + throws InvalidOperationException, MetaException { + authorizeMetastoreAccess( + HiveOperation.ALTERTABLE_DROPPARTS, + new HierarcyBuilder().addTableToOutput(getAuthServer(), + context.getTable().getDbName(), + context.getTable().getTableName()).build(), + new HierarcyBuilder().addTableToOutput(getAuthServer(), + context.getTable().getDbName(), + context.getTable().getTableName()).build()); + } + +} diff --git a/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/metastore/SentryMetastorePostEventListenerV2.java b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/metastore/SentryMetastorePostEventListenerV2.java new file mode 100644 index 000000000..013d01628 --- /dev/null +++ b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/metastore/SentryMetastorePostEventListenerV2.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.binding.hive.v2.metastore; + +import java.util.Iterator; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.events.AddPartitionEvent; +import org.apache.hadoop.hive.metastore.events.DropPartitionEvent; +import org.apache.sentry.binding.metastore.SentryMetastorePostEventListener; +import org.apache.sentry.provider.db.SentryMetastoreListenerPlugin; + +public class SentryMetastorePostEventListenerV2 extends SentryMetastorePostEventListener { + + public SentryMetastorePostEventListenerV2(Configuration config) { + super(config); + } + + @Override + public void onAddPartition(AddPartitionEvent partitionEvent) + throws MetaException { + if (partitionEvent != null && partitionEvent.getPartitionIterator() != null) { + Iterator it = partitionEvent.getPartitionIterator(); + while (it.hasNext()) { + Partition part = it.next(); + if (part.getSd() != null && part.getSd().getLocation() != null) { + String authzObj = part.getDbName() + "." + part.getTableName(); + String path = part.getSd().getLocation(); + for (SentryMetastoreListenerPlugin plugin : sentryPlugins) { + plugin.addPath(authzObj, path); + } + } + } + } + } + + @Override + public void onDropPartition(DropPartitionEvent partitionEvent) + throws MetaException { + if (partitionEvent != null && partitionEvent.getPartitionIterator() != null) { + String authzObj = partitionEvent.getTable().getDbName() + "." + + partitionEvent.getTable().getTableName(); + Iterator it = partitionEvent.getPartitionIterator(); + while (it.hasNext()) { + Partition part = it.next(); + if (part.getSd() != null && part.getSd().getLocation() != null) { + String path = part.getSd().getLocation(); + for (SentryMetastoreListenerPlugin plugin : sentryPlugins) { + plugin.removePath(authzObj, path); + } + } + } + } + } + +} diff --git a/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/util/SentryAuthorizerUtil.java b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/util/SentryAuthorizerUtil.java new file mode 100644 index 000000000..35bd68ce7 --- /dev/null +++ b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/util/SentryAuthorizerUtil.java @@ -0,0 +1,362 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.sentry.binding.hive.v2.util; + +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.common.JavaUtils; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.ql.hooks.Hook; +import org.apache.hadoop.hive.ql.metadata.AuthorizationException; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.HiveOperation; +import org.apache.hadoop.hive.ql.security.authorization.PrivilegeType; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrincipal; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrincipal.HivePrincipalType; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilege; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeInfo; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject.HivePrivilegeObjectType; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveRoleGrant; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.sentry.binding.hive.SentryOnFailureHook; +import org.apache.sentry.binding.hive.SentryOnFailureHookContext; +import org.apache.sentry.binding.hive.conf.HiveAuthzConf; +import org.apache.sentry.core.common.utils.PathUtils; +import org.apache.sentry.core.model.db.AccessConstants; +import org.apache.sentry.core.model.db.AccessURI; +import org.apache.sentry.core.model.db.Column; +import org.apache.sentry.core.model.db.DBModelAuthorizable; +import org.apache.sentry.core.model.db.Database; +import org.apache.sentry.core.model.db.Server; +import org.apache.sentry.core.model.db.Table; +import org.apache.sentry.provider.db.service.thrift.TSentryGrantOption; +import org.apache.sentry.provider.db.service.thrift.TSentryPrivilege; +import org.apache.sentry.provider.db.service.thrift.TSentryRole; +import org.apache.sentry.service.thrift.ServiceConstants.PrivilegeScope; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Splitter; + +public class SentryAuthorizerUtil { + public static final Logger LOG = LoggerFactory.getLogger(SentryAuthorizerUtil.class); + public static String UNKONWN_GRANTOR = "--"; + + /** + * Convert string to URI + * + * @param uri + * @param isLocal + * @throws SemanticException + * @throws URISyntaxException + */ + public static AccessURI parseURI(String uri, boolean isLocal) throws URISyntaxException { + HiveConf conf = SessionState.get().getConf(); + String warehouseDir = conf.getVar(ConfVars.METASTOREWAREHOUSE); + return new AccessURI(PathUtils.parseURI(warehouseDir, uri, isLocal)); + } + + /** + * Convert HivePrivilegeObject to DBModelAuthorizable list Now hive 0.13 don't support column + * level + * + * @param server + * @param privilege + */ + public static List> getAuthzHierarchy(Server server, + HivePrivilegeObject privilege) { + List baseHierarchy = new ArrayList(); + List> objectHierarchy = new ArrayList>(); + boolean isLocal = false; + if (privilege.getType() != null) { + switch (privilege.getType()) { + case GLOBAL: + baseHierarchy.add(new Server(privilege.getObjectName())); + objectHierarchy.add(baseHierarchy); + break; + case DATABASE: + baseHierarchy.add(server); + baseHierarchy.add(new Database(privilege.getDbname())); + objectHierarchy.add(baseHierarchy); + break; + case TABLE_OR_VIEW: + baseHierarchy.add(server); + baseHierarchy.add(new Database(privilege.getDbname())); + baseHierarchy.add(new Table(privilege.getObjectName())); + if (privilege.getColumns() != null) { + for (String columnName : privilege.getColumns()) { + List columnHierarchy = + new ArrayList(baseHierarchy); + columnHierarchy.add(new Column(columnName)); + objectHierarchy.add(columnHierarchy); + } + } else { + objectHierarchy.add(baseHierarchy); + } + break; + case LOCAL_URI: + isLocal = true; + case DFS_URI: + if (privilege.getObjectName() == null) { + break; + } + try { + baseHierarchy.add(server); + baseHierarchy.add(parseURI(privilege.getObjectName(), isLocal)); + objectHierarchy.add(baseHierarchy); + } catch (Exception e) { + throw new AuthorizationException("Failed to get File URI", e); + } + break; + case FUNCTION: + case PARTITION: + case COLUMN: + case COMMAND_PARAMS: + // not support these type + break; + default: + break; + } + } + return objectHierarchy; + } + + /** + * Convert HivePrivilegeObject list to List> + * + * @param server + * @param privilges + */ + public static List> convert2SentryPrivilegeList(Server server, + List privilges) { + List> hierarchyList = new ArrayList>(); + if (privilges != null && !privilges.isEmpty()) { + for (HivePrivilegeObject p : privilges) { + hierarchyList.addAll(getAuthzHierarchy(server, p)); + } + } + return hierarchyList; + } + + /** + * Convert HiveOperationType to HiveOperation + * + * @param type + */ + public static HiveOperation convert2HiveOperation(String typeName) { + try { + return HiveOperation.valueOf(typeName); + } catch (Exception e) { + return null; + } + } + + /** + * Convert HivePrivilege to Sentry Action + * + * @param hivePrivilege + */ + public static String convert2SentryAction(HivePrivilege hivePrivilege) { + if (PrivilegeType.ALL.name().equals(hivePrivilege.getName())) { + return AccessConstants.ALL; + } else { + return hivePrivilege.getName(); + } + } + + /** + * Convert Sentry Action to HivePrivilege + * + * @param hivePrivilege + */ + public static HivePrivilege convert2HivePrivilege(String action) { + return new HivePrivilege(action, null); + } + + /** + * Convert TSentryRole Set to String List + * + * @param roleSet + */ + public static List convert2RoleList(Set roleSet) { + List roles = new ArrayList(); + if (roleSet != null && !roleSet.isEmpty()) { + for (TSentryRole tRole : roleSet) { + roles.add(tRole.getRoleName()); + } + } + return roles; + } + + /** + * Convert TSentryPrivilege to HivePrivilegeInfo + * + * @param tPrivilege + * @param principal + */ + public static HivePrivilegeInfo convert2HivePrivilegeInfo(TSentryPrivilege tPrivilege, + HivePrincipal principal) { + HivePrivilege hivePrivilege = convert2HivePrivilege(tPrivilege.getAction()); + HivePrivilegeObject hivePrivilegeObject = convert2HivePrivilegeObject(tPrivilege); + // now sentry don't show grantor of a privilege + HivePrincipal grantor = new HivePrincipal(UNKONWN_GRANTOR, HivePrincipalType.ROLE); + boolean grantOption = + tPrivilege.getGrantOption().equals(TSentryGrantOption.TRUE) ? true : false; + return new HivePrivilegeInfo(principal, hivePrivilege, hivePrivilegeObject, grantor, + grantOption, (int) tPrivilege.getCreateTime()); + } + + /** + * Convert TSentryPrivilege to HivePrivilegeObject + * + * @param tSentryPrivilege + */ + public static HivePrivilegeObject convert2HivePrivilegeObject(TSentryPrivilege tSentryPrivilege) { + HivePrivilegeObject privilege = null; + switch (PrivilegeScope.valueOf(tSentryPrivilege.getPrivilegeScope())) { + case SERVER: + privilege = new HivePrivilegeObject(HivePrivilegeObjectType.GLOBAL, "*", null); + break; + case DATABASE: + privilege = + new HivePrivilegeObject(HivePrivilegeObjectType.DATABASE, tSentryPrivilege.getDbName(), + null); + break; + case TABLE: + privilege = + new HivePrivilegeObject(HivePrivilegeObjectType.TABLE_OR_VIEW, + tSentryPrivilege.getDbName(), tSentryPrivilege.getTableName()); + break; + case COLUMN: + privilege = + new HivePrivilegeObject(HivePrivilegeObjectType.COLUMN, tSentryPrivilege.getDbName(), + tSentryPrivilege.getTableName(), null, tSentryPrivilege.getColumnName()); + break; + case URI: + String uriString = tSentryPrivilege.getURI(); + try { + uriString = uriString.replace("'", "").replace("\"", ""); + HivePrivilegeObjectType type = + isLocalUri(uriString) ? HivePrivilegeObjectType.LOCAL_URI + : HivePrivilegeObjectType.DFS_URI; + privilege = new HivePrivilegeObject(type, uriString, null); + } catch (URISyntaxException e1) { + throw new RuntimeException(uriString + "is not a URI"); + } + default: + LOG.warn("Unknown PrivilegeScope: " + + PrivilegeScope.valueOf(tSentryPrivilege.getPrivilegeScope())); + break; + } + return privilege; + } + + public static boolean isLocalUri(String uriString) throws URISyntaxException { + URI uri = new URI(uriString); + if (uri.getScheme().equalsIgnoreCase("file")) { + return true; + } + + return false; + } + + /** + * Convert TSentryRole to HiveRoleGrant + * + * @param role + */ + public static HiveRoleGrant convert2HiveRoleGrant(TSentryRole role) { + HiveRoleGrant hiveRoleGrant = new HiveRoleGrant(); + hiveRoleGrant.setRoleName(role.getRoleName()); + hiveRoleGrant.setPrincipalName(role.getRoleName()); + hiveRoleGrant.setPrincipalType(PrincipalType.ROLE.name()); + hiveRoleGrant.setGrantOption(false); + hiveRoleGrant.setGrantor(role.getGrantorPrincipal()); + hiveRoleGrant.setGrantorType(PrincipalType.USER.name()); + return hiveRoleGrant; + } + + /** + * Execute on failure hooks for e2e tests + * + * @param context + * @param conf + * @param hiveOp + */ + public static void executeOnFailureHooks(SentryOnFailureHookContext hookCtx, Configuration conf) { + String csHooks = + conf.get(HiveAuthzConf.AuthzConfVars.AUTHZ_ONFAILURE_HOOKS.getVar(), "").trim(); + + try { + for (Hook aofh : SentryAuthorizerUtil.getHooks(csHooks)) { + ((SentryOnFailureHook) aofh).run(hookCtx); + } + } catch (Exception ex) { + LOG.error("Error executing hook:", ex); + } + } + + /** + * Returns a set of hooks specified in a configuration variable. + * + * See getHooks(HiveAuthzConf.AuthzConfVars hookConfVar, Class clazz) + * + * @param hookConfVar + * @return + * @throws Exception + */ + public static List getHooks(String csHooks) throws Exception { + return getHooks(csHooks, Hook.class); + } + + /** + * Returns the hooks specified in a configuration variable. The hooks are returned in a list in + * the order they were specified in the configuration variable. + * + * @param hookConfVar The configuration variable specifying a comma separated list of the hook + * class names. + * @param clazz The super type of the hooks. + * @return A list of the hooks cast as the type specified in clazz, in the order they are listed + * in the value of hookConfVar + * @throws Exception + */ + public static List getHooks(String csHooks, Class clazz) throws Exception { + + List hooks = new ArrayList(); + if (csHooks.isEmpty()) { + return hooks; + } + for (String hookClass : Splitter.on(",").omitEmptyStrings().trimResults().split(csHooks)) { + try { + @SuppressWarnings("unchecked") + T hook = (T) Class.forName(hookClass, true, JavaUtils.getClassLoader()).newInstance(); + hooks.add(hook); + } catch (ClassNotFoundException e) { + LOG.error(hookClass + " Class not found:" + e.getMessage()); + throw e; + } + } + + return hooks; + } +} diff --git a/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/util/SimpleSemanticAnalyzer.java b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/util/SimpleSemanticAnalyzer.java new file mode 100644 index 000000000..b50bbf482 --- /dev/null +++ b/sentry-binding/sentry-binding-hive-v2/src/main/java/org/apache/sentry/binding/hive/v2/util/SimpleSemanticAnalyzer.java @@ -0,0 +1,369 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.sentry.binding.hive.v2.util; + +import java.util.HashMap; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.apache.hadoop.hive.ql.plan.HiveOperation; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.sentry.core.model.db.Table; + +/** + * Currently hive complier doesn't create read/write entities for some operations, e.g. create + * table, drop table. This class is a simple semantic analyzer using regex, it is a workaround + * approach to extract db_name and tb_name from those operations. + */ +public class SimpleSemanticAnalyzer { + private String currentDb; + private String currentTb; + + /** + * CREATE [TEMPORARY] [EXTERNAL] TABLE [IF NOT EXISTS] [db_name.]table_name ... + */ + private static final String CREATE_TABLE_REGEX = "^(CREATE)\\s+" + "(TEMPORARY\\s+)?" + + "(EXTERNAL\\s+)?" + "TABLE\\s+" + "(IF\\s+NOT\\s+EXISTS\\s+)?" + "([A-Za-z0-9._]+)"; + + /** + * DROP (DATABASE|SCHEMA) [IF EXISTS] database_name [RESTRICT|CASCADE]; + */ + private static final String DROP_DB_REGEX = "^DROP\\s+" + "(DATABASE|SCHEMA)\\s+" + + "(IF\\s+EXISTS\\s+)?" + "([A-Za-z0-9_]+)"; + + /** + * DROP TABLE [IF EXISTS] table_name; + */ + private static final String DROP_TABLE_REGEX = "^DROP\\s+" + "TABLE\\s+" + "(IF\\s+EXISTS\\s+)?" + + "([A-Za-z0-9._]+)"; + + /** + * DROP VIEW [IF EXISTS] view_name; + */ + private static final String DROP_VIEW_REGEX = "^DROP\\s+" + "VIEW\\s+" + "(IF\\s+EXISTS\\s+)?" + + "([A-Za-z0-9_].+)"; + + /** + * DESCRIBE DATABASE|SCHEMA [EXTENDED] db_name; + */ + private static final String DESCRIBE_DB_REGEX = "^DESCRIBE\\s+" + "(DATABASE|SCHEMA)\\s+" + + "(EXTENDED\\s+)?" + "([A-Za-z0-9_]+)"; + + /** + * DESCRIBE [EXTENDED|FORMATTED] [db_name.]table_name[.col_name ( [.field_name] | [.'$elem$'] | + * [.'$key$'] | [.'$value$'] )* ]; + */ + private static final String DESCRIBE_TABLE_REGEX = "^DESCRIBE\\s+" + + "((EXTENDED|FORMATTED)\\s+)?" + "([A-Za-z0-9._]+)"; + + /** + * SHOW [FORMATTED] (INDEX|INDEXES) ON table_with_index [(FROM|IN) db_name]; + */ + private static final String SHOW_INDEX_REGEX = "^SHOW\\s+" + "(FORMATTED\\s+)?" + + "(INDEX|INDEXES)\\s+" + "ON\\s+" + "([A-Za-z0-9._]+)\\s*" + + "((FROM|IN)\\s+([A-Za-z0-9_]+))?"; + + /** + * SHOW TBLPROPERTIES tblname; + */ + private static final String SHOW_TBLPROPERTIES_REGEX = "^SHOW\\s+" + "TBLPROPERTIES\\s+" + + "([A-Za-z0-9._]+)"; + + /** + * ALTER TABLE table_name ... + */ + private static final String ALTER_TABLE_REGEX = "^ALTER\\s+" + "TABLE\\s+" + "([A-Za-z0-9._]+)"; + + /** + * ALTER VIEW view_name ... + */ + private static final String ALTER_VIEW_REGEX = "^ALTER\\s+" + "VIEW\\s+" + "([A-Za-z0-9._]+)"; + + /** + * MSCK REPAIR TABLE table_name; + */ + private static final String MSCK_REGEX = "^MSCK\\s+" + "REPAIR\\s" + "TABLE\\s" + + "([A-Za-z0-9._]+)"; + + /** + * ALTER INDEX index_name ON table_name [PARTITION partition_spec] REBUILD; + */ + private static final String ALTER_INDEX_REGEX = "^ALTER\\s+" + "INDEX\\s+" + + "([A-Za-z0-9_]+)\\s+" + "ON\\s" + "([A-Za-z0-9._]+)"; + + /** + * CREATE FUNCTION [db_name.]function_name AS class_name [USING JAR|FILE|ARCHIVE 'file_uri' [, + * JAR|FILE|ARCHIVE 'file_uri'] ]; + */ + private static final String CREATE_FUNCTION_REGEX = "^CREATE\\s+" + "(TEMPORARY\\s+)?" + + "FUNCTION\\s+" + "([A-Za-z0-9._]+)\\s+" + "AS\\s" + "([A-Za-z0-9._']+)"; + + /** + * SHOW COLUMNS FROM table_name + */ + private static final String SHOWCOLUMNS = "^SHOW\\s+" + "COLUMNS\\s+" + "(FROM|IN)\\s+" + + "([A-Za-z0-9._]+)"; + + private static final String SHOW_TABLESTATUS = "^SHOW\\s+" + "TABLE\\s+" + "EXTENDED\\s+" + "IN\\s+" + + "([A-Za-z0-9._]+)"; + + private static final String LOAD = "^LOAD\\s+" + "DATA\\s+" + "(LOCAL\\s+)?" + "INPATH\\s+" + + "([A-Za-z0-9._':///-]+)" +"\\s" + "INTO\\s" + "TABLE\\s" + "([A-Za-z0-9._]+)"; + + /** + * LOCK DATABASE dbname; + */ + private static final String LOCKDB = "^LOCK\\s+" + "DATABASE\\s+" + "([A-Za-z0-9._]+)"; + + /** + * UNLOCK DATABASE dbname; + */ + private static final String UNLOCKDB = "^UNLOCK\\s+" + "DATABASE\\s+" + "([A-Za-z0-9._]+)"; + + /** + * LOCK TABLE tblname; + */ + private static final String LOCKTABLE = "^LOCK\\s+" + "TABLE\\s+" + "([A-Za-z0-9._]+)"; + + /** + * UNLOCK TABLE tblname; + */ + private static final String UNLOCKTABLE = "^UNLOCK\\s+" + "TABLE\\s+" + "([A-Za-z0-9._]+)"; + + private static Map OP_REGEX_MAP = new HashMap(); + static { + // database metadata + OP_REGEX_MAP.put(HiveOperation.DROPDATABASE, DROP_DB_REGEX); + OP_REGEX_MAP.put(HiveOperation.DESCDATABASE, DESCRIBE_DB_REGEX); + + // table metadata + OP_REGEX_MAP.put(HiveOperation.CREATETABLE, CREATE_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.DROPTABLE, DROP_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.DROPVIEW, DROP_VIEW_REGEX); + OP_REGEX_MAP.put(HiveOperation.DESCTABLE, DESCRIBE_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.SHOW_TBLPROPERTIES, SHOW_TBLPROPERTIES_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERTABLE_PROPERTIES, ALTER_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERTABLE_SERDEPROPERTIES, ALTER_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERTABLE_CLUSTER_SORT, ALTER_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERTABLE_FILEFORMAT, ALTER_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERTABLE_TOUCH, ALTER_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERTABLE_PROTECTMODE, ALTER_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERTABLE_RENAMECOL, ALTER_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERTABLE_ADDCOLS, ALTER_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERTABLE_REPLACECOLS, ALTER_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERTABLE_RENAMEPART, ALTER_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERTABLE_ARCHIVE, ALTER_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERTABLE_UNARCHIVE, ALTER_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERTABLE_SERIALIZER, ALTER_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERTABLE_MERGEFILES, ALTER_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERTABLE_SKEWED, ALTER_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERTABLE_DROPPARTS, ALTER_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERTABLE_ADDPARTS, ALTER_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERTABLE_RENAME, ALTER_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERTABLE_LOCATION, ALTER_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERPARTITION_FILEFORMAT, ALTER_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERPARTITION_PROTECTMODE, ALTER_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERPARTITION_SERDEPROPERTIES, ALTER_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERPARTITION_SERIALIZER, ALTER_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERPARTITION_MERGEFILES, ALTER_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERPARTITION_LOCATION, ALTER_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERTBLPART_SKEWED_LOCATION, ALTER_TABLE_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERVIEW_PROPERTIES, ALTER_VIEW_REGEX); + OP_REGEX_MAP.put(HiveOperation.MSCK, MSCK_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERINDEX_REBUILD, ALTER_INDEX_REGEX); + OP_REGEX_MAP.put(HiveOperation.ALTERINDEX_PROPS, ALTER_INDEX_REGEX); + OP_REGEX_MAP.put(HiveOperation.LOCKDB, LOCKDB); + OP_REGEX_MAP.put(HiveOperation.UNLOCKDB, UNLOCKDB); + OP_REGEX_MAP.put(HiveOperation.LOCKTABLE, LOCKTABLE); + OP_REGEX_MAP.put(HiveOperation.UNLOCKTABLE, UNLOCKTABLE); + OP_REGEX_MAP.put(HiveOperation.SHOWCOLUMNS, SHOWCOLUMNS); + OP_REGEX_MAP.put(HiveOperation.SHOW_TABLESTATUS, SHOW_TABLESTATUS); + } + + public SimpleSemanticAnalyzer(HiveOperation hiveOp, String cmd) throws HiveAuthzPluginException { + currentDb = SessionState.get().getCurrentDatabase(); + parse(hiveOp, cmd); + } + + private void parse(HiveOperation hiveOp, String cmd) throws HiveAuthzPluginException { + switch (hiveOp) { + case DROPDATABASE: + case DESCDATABASE: + case LOCKDB: + case UNLOCKDB: + parseDbMeta(cmd, OP_REGEX_MAP.get(hiveOp)); + break; + case DESCTABLE: + case CREATETABLE: + case DROPTABLE: + case DROPVIEW: + case SHOW_TBLPROPERTIES: + // alter table + case ALTERTABLE_PROPERTIES: + case ALTERTABLE_SERDEPROPERTIES: + case ALTERTABLE_CLUSTER_SORT: + case ALTERTABLE_FILEFORMAT: + case ALTERTABLE_TOUCH: + case ALTERTABLE_PROTECTMODE: + case ALTERTABLE_RENAMECOL: + case ALTERTABLE_ADDCOLS: + case ALTERTABLE_REPLACECOLS: + case ALTERTABLE_RENAMEPART: + case ALTERTABLE_ARCHIVE: + case ALTERTABLE_UNARCHIVE: + case ALTERTABLE_SERIALIZER: + case ALTERTABLE_MERGEFILES: + case ALTERTABLE_SKEWED: + case ALTERTABLE_DROPPARTS: + case ALTERTABLE_ADDPARTS: + case ALTERTABLE_RENAME: + case ALTERTABLE_LOCATION: + // alter view + case ALTERVIEW_PROPERTIES: + // alter partition + case ALTERPARTITION_FILEFORMAT: + case ALTERPARTITION_PROTECTMODE: + case ALTERPARTITION_SERDEPROPERTIES: + case ALTERPARTITION_SERIALIZER: + case ALTERPARTITION_MERGEFILES: + case ALTERPARTITION_LOCATION: + case ALTERTBLPART_SKEWED_LOCATION: + // MSCK + case MSCK: + // alter index + case ALTERINDEX_REBUILD: + case ALTERINDEX_PROPS: + case LOCKTABLE: + case UNLOCKTABLE: + case SHOWCOLUMNS: + parseTableMeta(cmd, OP_REGEX_MAP.get(hiveOp)); + break; + case SHOWINDEXES: + parseShowIndex(cmd, SHOW_INDEX_REGEX); + break; + case CREATEFUNCTION: + parseFunction(cmd, CREATE_FUNCTION_REGEX); + break; + case SHOW_TABLESTATUS: + parseTableExtend(cmd, SHOW_TABLESTATUS); + break; + case LOAD: + parseLoadTable(cmd, LOAD); + break; + default: + break; + } + } + + private void parseLoadTable(String cmd, String load) throws HiveAuthzPluginException { + Pattern pattern = Pattern.compile(load, Pattern.CASE_INSENSITIVE); + Matcher matcher = pattern.matcher(cmd); + if (matcher.find()) { + String tbName = matcher.group(matcher.groupCount()); + extractDbAndTb(tbName.trim()); + } else { + throw new HiveAuthzPluginException("this command " + cmd + " is not match table meta grammar"); + } + } + + private void parseTableExtend(String cmd, String showTablestatus) throws HiveAuthzPluginException { + Pattern pattern = Pattern.compile(showTablestatus, Pattern.CASE_INSENSITIVE); + Matcher matcher = pattern.matcher(cmd); + if (matcher.find()) { + String dbName = matcher.group(matcher.groupCount()); + currentDb = dbName; + currentTb = Table.SOME.getName(); + } else { + throw new HiveAuthzPluginException("this command " + cmd + " is not match table meta grammar"); + } + } + + private void extractDbAndTb(String tableName) { + if (tableName.contains(".")) { + String[] tb = tableName.split("\\."); + currentDb = tb[0]; + currentTb = tb[1]; + } else { + currentDb = SessionState.get().getCurrentDatabase(); + currentTb = tableName; + } + } + + private void parseDbMeta(String cmd, String regex) throws HiveAuthzPluginException { + Pattern pattern = Pattern.compile(regex, Pattern.CASE_INSENSITIVE); + Matcher matcher = pattern.matcher(cmd); + if (matcher.find()) { + currentDb = matcher.group(matcher.groupCount()); + } else { + throw new HiveAuthzPluginException("this command " + cmd + + " is not match database meta grammar"); + } + } + + private void parseTableMeta(String cmd, String regex) throws HiveAuthzPluginException { + Pattern pattern = Pattern.compile(regex, Pattern.CASE_INSENSITIVE); + Matcher matcher = pattern.matcher(cmd); + if (matcher.find()) { + String tbName = matcher.group(matcher.groupCount()); + extractDbAndTb(tbName.trim()); + } else { + throw new HiveAuthzPluginException("this command " + cmd + " is not match table meta grammar"); + } + } + + private void parseShowIndex(String cmd, String regex) throws HiveAuthzPluginException { + Pattern pattern = Pattern.compile(regex, Pattern.CASE_INSENSITIVE); + Matcher matcher = pattern.matcher(cmd); + if (matcher.find()) { + String dbName = matcher.group(matcher.groupCount()); + String tbName = matcher.group(3); + if (dbName != null) { + currentDb = dbName; + currentTb = tbName; + } else { + extractDbAndTb(tbName); + } + } else { + throw new HiveAuthzPluginException("this command " + cmd + " is not match show index grammar"); + } + } + + private void parseFunction(String cmd, String regex) throws HiveAuthzPluginException { + Pattern pattern = Pattern.compile(regex, Pattern.CASE_INSENSITIVE); + Matcher matcher = pattern.matcher(cmd); + if (matcher.find()) { + String udfClass = matcher.group(matcher.groupCount()); + if (udfClass.contains("'")) { + currentTb = udfClass.split("'")[1]; + } else { + currentTb = udfClass; + } + } else { + throw new HiveAuthzPluginException("this command " + cmd + + " is not match create function grammar"); + } + } + + public String getCurrentDb() { + return currentDb; + } + + public String getCurrentTb() { + return currentTb; + } + +} diff --git a/sentry-binding/sentry-binding-hive-v2/src/test/java/org/apache/sentry/binding/hive/v2/DummyHiveAuthenticationProvider.java b/sentry-binding/sentry-binding-hive-v2/src/test/java/org/apache/sentry/binding/hive/v2/DummyHiveAuthenticationProvider.java new file mode 100644 index 000000000..9335c37bd --- /dev/null +++ b/sentry-binding/sentry-binding-hive-v2/src/test/java/org/apache/sentry/binding/hive/v2/DummyHiveAuthenticationProvider.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.sentry.binding.hive.v2; + +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider; +import org.apache.hadoop.hive.ql.session.SessionState; + +public class DummyHiveAuthenticationProvider implements HiveAuthenticationProvider { + + private String userName; + private Configuration conf; + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + } + + @Override + public Configuration getConf() { + return conf; + } + + @Override + public String getUserName() { + return userName; + } + + @Override + public List getGroupNames() { + return null; + } + + @Override + public void destroy() throws HiveException { + + } + + @Override + public void setSessionState(SessionState ss) { + + } + + public void setUserName(String user) { + this.userName = user; + } + +} diff --git a/sentry-binding/sentry-binding-hive/pom.xml b/sentry-binding/sentry-binding-hive/pom.xml index 6188b3400..fb5f21494 100644 --- a/sentry-binding/sentry-binding-hive/pom.xml +++ b/sentry-binding/sentry-binding-hive/pom.xml @@ -22,7 +22,7 @@ limitations under the License. org.apache.sentry sentry-binding - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-binding-hive @@ -73,6 +73,10 @@ limitations under the License. org.apache.sentry sentry-provider-file + + org.apache.sentry + sentry-provider-cache + org.apache.sentry sentry-policy-db diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/hadoop/hive/ql/exec/SentryFilterDDLTask.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/hadoop/hive/ql/exec/SentryFilterDDLTask.java new file mode 100644 index 000000000..883836809 --- /dev/null +++ b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/hadoop/hive/ql/exec/SentryFilterDDLTask.java @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec; + +import static org.apache.hadoop.util.StringUtils.stringifyException; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.DriverContext; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatUtils; +import org.apache.hadoop.hive.ql.plan.HiveOperation; +import org.apache.hadoop.hive.ql.plan.ShowColumnsDesc; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.io.IOUtils; +import org.apache.sentry.binding.hive.HiveAuthzBindingHook; +import org.apache.sentry.binding.hive.authz.HiveAuthzBinding; +import org.apache.sentry.core.common.Subject; + +import com.google.common.base.Preconditions; + +public class SentryFilterDDLTask extends DDLTask { + private static final long serialVersionUID = 1L; + private static final Log LOG = LogFactory.getLog(SentryFilterDDLTask.class); + + private HiveAuthzBinding hiveAuthzBinding; + private Subject subject; + private HiveOperation stmtOperation; + + public SentryFilterDDLTask(HiveAuthzBinding hiveAuthzBinding, Subject subject, + HiveOperation stmtOperation) { + Preconditions.checkNotNull(hiveAuthzBinding); + Preconditions.checkNotNull(subject); + Preconditions.checkNotNull(stmtOperation); + + this.hiveAuthzBinding = hiveAuthzBinding; + this.subject = subject; + this.stmtOperation = stmtOperation; + } + + public HiveAuthzBinding getHiveAuthzBinding() { + return hiveAuthzBinding; + } + + public Subject getSubject() { + return subject; + } + + public HiveOperation getStmtOperation() { + return stmtOperation; + } + + @Override + public int execute(DriverContext driverContext) { + // Currently the SentryFilterDDLTask only supports filter the "show columns in table " command. + ShowColumnsDesc showCols = work.getShowColumnsDesc(); + try { + if (showCols != null) { + return showFilterColumns(showCols); + } + } catch (Throwable e) { + failed(e); + return 1; + } + + return super.execute(driverContext); + } + + private void failed(Throwable e) { + while (e.getCause() != null && e.getClass() == RuntimeException.class) { + e = e.getCause(); + } + setException(e); + LOG.error(stringifyException(e)); + } + + /** + * Filter the command "show columns in table" + * + */ + private int showFilterColumns(ShowColumnsDesc showCols) throws HiveException { + Table table = Hive.get(conf).getTable(showCols.getTableName()); + + // write the results in the file + DataOutputStream outStream = null; + try { + Path resFile = new Path(showCols.getResFile()); + FileSystem fs = resFile.getFileSystem(conf); + outStream = fs.create(resFile); + + List cols = table.getCols(); + cols.addAll(table.getPartCols()); + // In case the query is served by HiveServer2, don't pad it with spaces, + // as HiveServer2 output is consumed by JDBC/ODBC clients. + boolean isOutputPadded = !SessionState.get().isHiveServerQuery(); + outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation( + fiterColumns(cols, table), false, isOutputPadded, null)); + outStream.close(); + outStream = null; + } catch (IOException e) { + throw new HiveException(e, ErrorMsg.GENERIC_ERROR); + } finally { + IOUtils.closeStream(outStream); + } + return 0; + } + + private List fiterColumns(List cols, Table table) throws HiveException { + // filter some columns that the subject has privilege on + return HiveAuthzBindingHook.filterShowColumns(getHiveAuthzBinding(), + cols, getStmtOperation(), getSubject().getName(), table.getTableName(), table.getDbName()); + } +} diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/hadoop/hive/ql/exec/SentryGrantRevokeTask.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/hadoop/hive/ql/exec/SentryGrantRevokeTask.java index 2a60a232c..31eb5e8ad 100644 --- a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/hadoop/hive/ql/exec/SentryGrantRevokeTask.java +++ b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/hadoop/hive/ql/exec/SentryGrantRevokeTask.java @@ -130,23 +130,23 @@ public int execute(DriverContext driverContext) { "Config " + AuthzConfVars.AUTHZ_SERVER_NAME.getVar() + " is required"); try { if (work.getRoleDDLDesc() != null) { - return processRoleDDL(conf, console, sentryClient, subject.getName(), + return processRoleDDL(console, sentryClient, subject.getName(), hiveAuthzBinding, work.getRoleDDLDesc()); } if (work.getGrantDesc() != null) { - return processGrantDDL(conf, console, sentryClient, + return processGrantDDL(console, sentryClient, subject.getName(), server, work.getGrantDesc()); } if (work.getRevokeDesc() != null) { - return processRevokeDDL(conf, console, sentryClient, + return processRevokeDDL(console, sentryClient, subject.getName(), server, work.getRevokeDesc()); } if (work.getShowGrantDesc() != null) { - return processShowGrantDDL(conf, console, sentryClient, subject.getName(), server, + return processShowGrantDDL(console, sentryClient, subject.getName(), work.getShowGrantDesc()); } if (work.getGrantRevokeRoleDDL() != null) { - return processGrantRevokeRoleDDL(conf, console, sentryClient, + return processGrantRevokeRoleDDL(console, sentryClient, subject.getName(), work.getGrantRevokeRoleDDL()); } throw new AssertionError( @@ -165,7 +165,10 @@ public int execute(DriverContext driverContext) { } } catch(SentryUserException e) { setException(new Exception(e.getClass().getSimpleName() + ": " + e.getReason(), e)); - String msg = "Error processing Sentry command: " + e.getMessage(); + String msg = "Error processing Sentry command: " + e.getReason() + "."; + if (e instanceof SentryAccessDeniedException) { + msg += "Please grant admin privilege to " + subject.getName() + "."; + } LOG.error(msg, e); console.printError(msg); return RETURN_CODE_FAILURE; @@ -214,7 +217,7 @@ public void setOperation(HiveOperation stmtOperation) { this.stmtOperation = stmtOperation; } - private int processRoleDDL(HiveConf conf, LogHelper console, + private int processRoleDDL(LogHelper console, SentryPolicyServiceClient sentryClient, String subject, HiveAuthzBinding hiveAuthzBinding, RoleDDLDesc desc) throws SentryUserException { @@ -277,7 +280,7 @@ private int processRoleDDL(HiveConf conf, LogHelper console, } } - private int processGrantDDL(HiveConf conf, LogHelper console, + private int processGrantDDL(LogHelper console, SentryPolicyServiceClient sentryClient, String subject, String server, GrantDesc desc) throws SentryUserException { return processGrantRevokeDDL(console, sentryClient, subject, @@ -286,7 +289,7 @@ private int processGrantDDL(HiveConf conf, LogHelper console, } // For grant option, we use null to stand for revoke the privilege ignore the grant option - private int processRevokeDDL(HiveConf conf, LogHelper console, + private int processRevokeDDL(LogHelper console, SentryPolicyServiceClient sentryClient, String subject, String server, RevokeDesc desc) throws SentryUserException { return processGrantRevokeDDL(console, sentryClient, subject, @@ -294,8 +297,8 @@ private int processRevokeDDL(HiveConf conf, LogHelper console, desc.getPrivilegeSubjectDesc(), null); } - private int processShowGrantDDL(HiveConf conf, LogHelper console, SentryPolicyServiceClient sentryClient, - String subject, String server, ShowGrantDesc desc) throws SentryUserException{ + private int processShowGrantDDL(LogHelper console, SentryPolicyServiceClient sentryClient, + String subject, ShowGrantDesc desc) throws SentryUserException{ PrincipalDesc principalDesc = desc.getPrincipalDesc(); PrivilegeObjectDesc hiveObjectDesc = desc.getHiveObj(); String principalName = principalDesc.getName(); @@ -384,17 +387,18 @@ private void writeToFile(String data, String file) throws IOException { FSDataOutputStream out = fs.create(resFile); try { if (data != null && !data.isEmpty()) { - OutputStreamWriter writer = new OutputStreamWriter(out, "UTF-8"); - writer.write(data); - writer.write((char) terminator); - writer.flush(); + try (OutputStreamWriter writer = new OutputStreamWriter(out, "UTF-8")) { + writer.write(data); + writer.write((char) terminator); + writer.flush(); + } } } finally { closeQuiet(out); } } - private int processGrantRevokeRoleDDL(HiveConf conf, LogHelper console, + private int processGrantRevokeRoleDDL(LogHelper console, SentryPolicyServiceClient sentryClient, String subject, GrantRevokeRoleDDL desc) throws SentryUserException { try { @@ -590,7 +594,8 @@ private static int processGrantRevokeDDL(LogHelper console, } } else { if (serverName != null) { - sentryClient.revokeServerPrivilege(subject, princ.getName(), serverName, grantOption); + sentryClient.revokeServerPrivilege(subject, princ.getName(), serverName, + toSentryAction(privDesc.getPrivilege().getPriv()), grantOption); } else if (uriPath != null) { sentryClient.revokeURIPrivilege(subject, princ.getName(), server, uriPath, grantOption); } else if (tableName == null) { diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/hadoop/hive/ql/exec/SentryHivePrivilegeObjectDesc.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/hadoop/hive/ql/exec/SentryHivePrivilegeObjectDesc.java index 18cdde228..4fa4221b4 100644 --- a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/hadoop/hive/ql/exec/SentryHivePrivilegeObjectDesc.java +++ b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/hadoop/hive/ql/exec/SentryHivePrivilegeObjectDesc.java @@ -17,9 +17,6 @@ package org.apache.hadoop.hive.ql.exec; -import java.util.ArrayList; -import java.util.List; - import org.apache.hadoop.hive.ql.plan.PrivilegeObjectDesc; public class SentryHivePrivilegeObjectDesc extends PrivilegeObjectDesc { @@ -47,4 +44,8 @@ public void setServer(boolean isServer) { this.isServer = isServer; } + public boolean isSentryPrivObjectDesc() { + return isServer || isUri; + } + } diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/HiveAuthzBindingHook.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/HiveAuthzBindingHook.java index 48afa0875..c425e0687 100644 --- a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/HiveAuthzBindingHook.java +++ b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/HiveAuthzBindingHook.java @@ -27,17 +27,27 @@ import java.util.EnumSet; import java.util.List; import java.util.Set; +import java.util.Arrays; +import com.google.common.base.Preconditions; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.exec.DDLTask; +import org.apache.hadoop.hive.ql.exec.FunctionRegistry; +import org.apache.hadoop.hive.ql.exec.SentryFilterDDLTask; import org.apache.hadoop.hive.ql.exec.SentryGrantRevokeTask; import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.hooks.Entity; import org.apache.hadoop.hive.ql.hooks.Entity.Type; import org.apache.hadoop.hive.ql.hooks.Hook; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.metadata.AuthorizationException; import org.apache.hadoop.hive.ql.parse.ASTNode; import org.apache.hadoop.hive.ql.parse.AbstractSemanticAnalyzerHook; @@ -45,6 +55,7 @@ import org.apache.hadoop.hive.ql.parse.HiveParser; import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext; import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.sentry.binding.hive.authz.HiveAuthzBinding; @@ -62,6 +73,9 @@ import org.apache.sentry.core.model.db.DBModelAuthorizable.AuthorizableType; import org.apache.sentry.core.model.db.Database; import org.apache.sentry.core.model.db.Table; +import org.apache.sentry.provider.cache.PrivilegeCache; +import org.apache.sentry.provider.cache.SimplePrivilegeCache; +import org.apache.sentry.provider.common.AuthorizationProvider; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -77,9 +91,19 @@ public class HiveAuthzBindingHook extends AbstractSemanticAnalyzerHook { private Database currDB = Database.ALL; private Table currTab; private AccessURI udfURI; + private AccessURI serdeURI; private AccessURI partitionURI; private Table currOutTab = null; private Database currOutDB = null; + private final List serdeWhiteList; + private boolean serdeURIPrivilegesEnabled; + + // True if this is a basic DESCRIBE
operation. False for other DESCRIBE variants + // like DESCRIBE [FORMATTED|EXTENDED]. Required because Hive treats these stmts as the same + // HiveOperationType, but we want to enforces different privileges on each statement. + // Basic DESCRIBE
is allowed with only column-level privs, while the variants + // require table-level privileges. + public boolean isDescTableBasic = false; public HiveAuthzBindingHook() throws Exception { SessionState session = SessionState.get(); @@ -95,6 +119,14 @@ public HiveAuthzBindingHook() throws Exception { } authzConf = loadAuthzConf(hiveConf); hiveAuthzBinding = new HiveAuthzBinding(hiveConf, authzConf); + + String serdeWhiteLists = authzConf.get(HiveAuthzConf.HIVE_SENTRY_SERDE_WHITELIST, + HiveAuthzConf.HIVE_SENTRY_SERDE_WHITELIST_DEFAULT); + serdeWhiteList = Arrays.asList(serdeWhiteLists.split(",")); + serdeURIPrivilegesEnabled = authzConf.getBoolean(HiveAuthzConf.HIVE_SENTRY_SERDE_URI_PRIVILIEGES_ENABLED, + HiveAuthzConf.HIVE_SENTRY_SERDE_URI_PRIVILIEGES_ENABLED_DEFAULT); + + FunctionRegistry.setupPermissionsForBuiltinUDFs("", HiveAuthzConf.HIVE_UDF_BLACK_LIST); } public static HiveAuthzConf loadAuthzConf(HiveConf hiveConf) { @@ -145,6 +177,16 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast) currDB = new Database(BaseSemanticAnalyzer.unescapeIdentifier(ast.getChild(0).getText())); break; case HiveParser.TOK_CREATETABLE: + + for (Node childNode : ast.getChildren()) { + ASTNode childASTNode = (ASTNode) childNode; + if ("TOK_TABLESERIALIZER".equals(childASTNode.getText())) { + ASTNode serdeNode = (ASTNode)childASTNode.getChild(0); + String serdeClassName = BaseSemanticAnalyzer.unescapeSQLString(serdeNode.getChild(0).getText()); + setSerdeURI(serdeClassName); + } + } + case HiveParser.TOK_CREATEVIEW: /* * Compiler doesn't create read/write entities for create table. @@ -218,7 +260,9 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast) case HiveParser.TOK_CREATEFUNCTION: String udfClassName = BaseSemanticAnalyzer.unescapeSQLString(ast.getChild(1).getText()); try { - CodeSource udfSrc = Class.forName(udfClassName).getProtectionDomain().getCodeSource(); + CodeSource udfSrc = + Class.forName(udfClassName, true, Utilities.getSessionSpecifiedClassLoader()) + .getProtectionDomain().getCodeSource(); if (udfSrc == null) { throw new SemanticException("Could not resolve the jar for UDF class " + udfClassName); } @@ -229,7 +273,7 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast) } udfURI = parseURI(udfSrc.getLocation().toString(), true); } catch (ClassNotFoundException e) { - throw new SemanticException("Error retrieving udf class", e); + throw new SemanticException("Error retrieving udf class:" + e.getMessage(), e); } // create/drop function is allowed with any database currDB = Database.ALL; @@ -243,7 +287,37 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast) String dbName = BaseSemanticAnalyzer.unescapeIdentifier(ast.getChild(1).getChild(0).getChild(0).getText()); currDB = new Database(dbName); break; - default: + case HiveParser.TOK_DESCTABLE: + currDB = getCanonicalDb(); + // For DESCRIBE FORMATTED/EXTENDED ast will have an additional child node with value + // "FORMATTED/EXTENDED". + isDescTableBasic = (ast.getChildCount() == 1); + break; + case HiveParser.TOK_TRUNCATETABLE: + // SENTRY-826: + // Truncate empty partitioned table should throw SemanticException only if the + // user does not have permission. + // In postAnalyze, currOutDB and currOutTbl will be added into outputHierarchy + // which will be validated in the hiveAuthzBinding.authorize method. + Preconditions.checkArgument(ast.getChildCount() == 1); + // childcount is 1 for table without partition, 2 for table with partitions + Preconditions.checkArgument(ast.getChild(0).getChildCount() >= 1); + Preconditions.checkArgument(ast.getChild(0).getChild(0).getChildCount() == 1); + currOutDB = extractDatabase((ASTNode) ast.getChild(0)); + currOutTab = extractTable((ASTNode) ast.getChild(0).getChild(0).getChild(0)); + break; + case HiveParser.TOK_ALTERTABLE: + + for (Node childNode : ast.getChildren()) { + ASTNode childASTNode = (ASTNode) childNode; + if ("TOK_ALTERTABLE_SERIALIZER".equals(childASTNode.getText())) { + ASTNode serdeNode = (ASTNode)childASTNode.getChild(0); + String serdeClassName = BaseSemanticAnalyzer.unescapeSQLString(serdeNode.getText()); + setSerdeURI(serdeClassName); + } + } + + default: currDB = getCanonicalDb(); break; } @@ -258,7 +332,7 @@ private Database getCanonicalDb() { private Database extractDatabase(ASTNode ast) throws SemanticException { String tableName = BaseSemanticAnalyzer.getUnescapedName(ast); if (tableName.contains(".")) { - return new Database((tableName.split("\\."))[0]); + return new Database(tableName.split("\\.")[0]); } else { return getCanonicalDb(); } @@ -266,7 +340,7 @@ private Database extractDatabase(ASTNode ast) throws SemanticException { private Table extractTable(ASTNode ast) throws SemanticException { String tableName = BaseSemanticAnalyzer.getUnescapedName(ast); if (tableName.contains(".")) { - return new Table((tableName.split("\\."))[1]); + return new Table(tableName.split("\\.")[1]); } else { return new Table(tableName); } @@ -296,6 +370,11 @@ protected static AccessURI parseURI(String uri, boolean isLocal) try { HiveConf conf = SessionState.get().getConf(); String warehouseDir = conf.getVar(ConfVars.METASTOREWAREHOUSE); + Path warehousePath = new Path(warehouseDir); + if (warehousePath.isAbsoluteAndSchemeAuthorityNull()) { + FileSystem fs = FileSystem.get(conf); + warehouseDir = fs.makeQualified(warehousePath).toUri().toString(); + } return new AccessURI(PathUtils.parseURI(warehouseDir, uri, isLocal)); } catch (Exception e) { throw new SemanticException("Error parsing URI " + uri + ": " + @@ -335,17 +414,40 @@ public void postAnalyze(HiveSemanticAnalyzerHookContext context, // We don't handle authorizing this statement return; } + + /** + * Replace DDLTask using the SentryFilterDDLTask for protection, + * such as "show column" only allow show some column that user can access to. + * SENTRY-847 + */ + for (int i = 0; i < rootTasks.size(); i++) { + Task task = rootTasks.get(i); + if (task instanceof DDLTask) { + SentryFilterDDLTask filterTask = + new SentryFilterDDLTask(hiveAuthzBinding, subject, stmtOperation); + filterTask.setWork((DDLWork)task.getWork()); + rootTasks.set(i, filterTask); + } + } + authorizeWithHiveBindings(context, stmtAuthObject, stmtOperation); } catch (AuthorizationException e) { executeOnFailureHooks(context, stmtOperation, e); - String permsRequired = ""; + StringBuilder permsBuilder = new StringBuilder(); for (String perm : hiveAuthzBinding.getLastQueryPrivilegeErrors()) { - permsRequired += perm + ";"; + permsBuilder.append(perm); + permsBuilder.append(";"); } + String permsRequired = permsBuilder.toString(); SessionState.get().getConf().set(HiveAuthzConf.HIVE_SENTRY_AUTH_ERRORS, permsRequired); - String msg = HiveAuthzConf.HIVE_SENTRY_PRIVILEGE_ERROR_MESSAGE + "\n Required privileges for this query: " + String msgForLog = HiveAuthzConf.HIVE_SENTRY_PRIVILEGE_ERROR_MESSAGE + + "\n Required privileges for this query: " + permsRequired; - throw new SemanticException(msg, e); + String msgForConsole = HiveAuthzConf.HIVE_SENTRY_PRIVILEGE_ERROR_MESSAGE + "\n " + + e.getMessage()+ "\n The required privileges: " + permsRequired; + // AuthorizationException is not a real exception, use the info level to record this. + LOG.info(msgForLog); + throw new SemanticException(msgForConsole, e); } finally { hiveAuthzBinding.close(); } @@ -409,6 +511,14 @@ private void authorizeWithHiveBindings(HiveSemanticAnalyzerHookContext context, LOG.debug("context.getOutputs() = " + context.getOutputs()); } + // Workaround to allow DESCRIBE
to be executed with only column-level privileges, while + // still authorizing DESCRIBE [EXTENDED|FORMATTED] as table-level. + // This is done by treating DESCRIBE
the same as SHOW COLUMNS, which only requires column + // level privs. + if (isDescTableBasic) { + stmtAuthObject = HiveAuthzPrivilegesMap.getHiveAuthzPrivileges(HiveOperation.SHOWCOLUMNS); + } + switch (stmtAuthObject.getOperationScope()) { case SERVER : @@ -426,6 +536,13 @@ private void authorizeWithHiveBindings(HiveSemanticAnalyzerHookContext context, outputHierarchy.add(dbHierarchy); getInputHierarchyFromInputs(inputHierarchy, inputs); + + if (serdeURI != null) { + List serdeUriHierarchy = new ArrayList(); + serdeUriHierarchy.add(hiveAuthzBinding.getAuthServer()); + serdeUriHierarchy.add(serdeURI); + outputHierarchy.add(serdeUriHierarchy); + } break; case TABLE: // workaround for add partitions @@ -453,6 +570,8 @@ private void authorizeWithHiveBindings(HiveSemanticAnalyzerHookContext context, inputHierarchy.add(externalAuthorizableHierarchy); } + + // workaround for DDL statements // Capture the table name in pre-analyze and include that in the output entity list if (currOutTab != null) { @@ -462,6 +581,14 @@ private void authorizeWithHiveBindings(HiveSemanticAnalyzerHookContext context, externalAuthorizableHierarchy.add(currOutTab); outputHierarchy.add(externalAuthorizableHierarchy); } + + if (serdeURI != null) { + List serdeUriHierarchy = new ArrayList(); + serdeUriHierarchy.add(hiveAuthzBinding.getAuthServer()); + serdeUriHierarchy.add(serdeURI); + outputHierarchy.add(serdeUriHierarchy); + } + break; case FUNCTION: /* The 'FUNCTION' privilege scope currently used for @@ -492,9 +619,9 @@ private void authorizeWithHiveBindings(HiveSemanticAnalyzerHookContext context, // by default allow connect access to default db Table currTbl = Table.ALL; Column currCol = Column.ALL; - if ((DEFAULT_DATABASE_NAME.equalsIgnoreCase(currDB.getName()) && + if (DEFAULT_DATABASE_NAME.equalsIgnoreCase(currDB.getName()) && "false".equalsIgnoreCase(authzConf. - get(HiveAuthzConf.AuthzConfVars.AUTHZ_RESTRICT_DEFAULT_DB.getVar(), "false")))) { + get(HiveAuthzConf.AuthzConfVars.AUTHZ_RESTRICT_DEFAULT_DB.getVar(), "false"))) { currDB = Database.ALL; currTbl = Table.SOME; } @@ -506,32 +633,34 @@ private void authorizeWithHiveBindings(HiveSemanticAnalyzerHookContext context, inputHierarchy.add(connectHierarchy); outputHierarchy.add(connectHierarchy); break; - + case COLUMN: + for (ReadEntity readEntity: inputs) { + if (readEntity.getAccessedColumns() != null && !readEntity.getAccessedColumns().isEmpty()) { + addColumnHierarchy(inputHierarchy, readEntity); + } else { + List entityHierarchy = new ArrayList(); + entityHierarchy.add(hiveAuthzBinding.getAuthServer()); + entityHierarchy.addAll(getAuthzHierarchyFromEntity(readEntity)); + entityHierarchy.add(Column.ALL); + inputHierarchy.add(entityHierarchy); + } + } + break; default: throw new AuthorizationException("Unknown operation scope type " + stmtAuthObject.getOperationScope().toString()); } - // validate permission - hiveAuthzBinding.authorize(stmtOperation, stmtAuthObject, getCurrentSubject(context), - inputHierarchy, outputHierarchy); - } - - private boolean isUDF(ReadEntity readEntity) { - return readEntity.getType().equals(Type.FUNCTION); - } - - private void checkUDFWhiteList(String queryUDF) throws AuthorizationException { - String whiteList = authzConf.get(HiveAuthzConf.AuthzConfVars.AUTHZ_UDF_WHITELIST.getVar()); - if (whiteList == null) { - return; - } - for (String hiveUDF : Splitter.on(",").omitEmptyStrings().trimResults().split(whiteList)) { - if (queryUDF.equalsIgnoreCase(hiveUDF)) { - return; // found the given UDF in whitelist - } + HiveAuthzBinding binding = null; + try { + binding = getHiveBindingWithPrivilegeCache(hiveAuthzBinding, context.getUserName()); + } catch (SemanticException e) { + // Will use the original hiveAuthzBinding + binding = hiveAuthzBinding; } - throw new AuthorizationException("The UDF " + queryUDF + " is not found in the list of allowed UDFs"); + // validate permission + binding.authorize(stmtOperation, stmtAuthObject, getCurrentSubject(context), inputHierarchy, + outputHierarchy); } private HiveOperation getCurrentHiveStmtOp() { @@ -557,6 +686,7 @@ private List getAuthzHierarchyFromEntity(Entity entity) { objectHierarchy.add(new Table(entity.getTable().getTableName())); break; case PARTITION: + case DUMMYPARTITION: objectHierarchy.add(new Database(entity.getPartition().getTable().getDbName())); objectHierarchy.add(new Table(entity.getPartition().getTable().getTableName())); break; @@ -618,8 +748,9 @@ private void addColumnHierarchy(List> inputHierarchy, private void getInputHierarchyFromInputs(List> inputHierarchy, Set inputs) { for (ReadEntity readEntity: inputs) { - // skip the tables/view that are part of expanded view definition. - if (isChildTabForView(readEntity)) { + // skip the tables/view that are part of expanded view definition + // skip the Hive generated dummy entities created for queries like 'select ' + if (isChildTabForView(readEntity) || isDummyEntity(readEntity)) { continue; } if (readEntity.getAccessedColumns() != null && !readEntity.getAccessedColumns().isEmpty()) { @@ -678,6 +809,8 @@ public static List filterShowTables( setOperationType(HiveOperationType.INFO). build(); + HiveAuthzBinding hiveBindingWithPrivilegeCache = getHiveBindingWithPrivilegeCache(hiveAuthzBinding, userName); + for (String tableName : queryResult) { // if user has privileges on table, add to filtered list, else discard Table table = new Table(tableName); @@ -694,14 +827,51 @@ public static List filterShowTables( inputHierarchy.add(externalAuthorizableHierarchy); try { - hiveAuthzBinding.authorize(operation, tableMetaDataPrivilege, subject, + // do the authorization by new HiveAuthzBinding with PrivilegeCache + hiveBindingWithPrivilegeCache.authorize(operation, tableMetaDataPrivilege, subject, inputHierarchy, outputHierarchy); filteredResult.add(table.getName()); } catch (AuthorizationException e) { // squash the exception, user doesn't have privileges, so the table is // not added to // filtered list. - ; + } + } + return filteredResult; + } + + public static List filterShowColumns( + HiveAuthzBinding hiveAuthzBinding, List cols, + HiveOperation operation, String userName, String tableName, String dbName) + throws SemanticException { + List filteredResult = new ArrayList(); + Subject subject = new Subject(userName); + HiveAuthzPrivileges columnMetaDataPrivilege = + HiveAuthzPrivilegesMap.getHiveAuthzPrivileges(HiveOperation.SHOWCOLUMNS); + HiveAuthzBinding hiveBindingWithPrivilegeCache = getHiveBindingWithPrivilegeCache(hiveAuthzBinding, userName); + + Database database = new Database(dbName); + Table table = new Table(tableName); + for (FieldSchema col : cols) { + // if user has privileges on column, add to filtered list, else discard + List> inputHierarchy = new ArrayList>(); + List> outputHierarchy = new ArrayList>(); + List externalAuthorizableHierarchy = new ArrayList(); + externalAuthorizableHierarchy.add(hiveAuthzBinding.getAuthServer()); + externalAuthorizableHierarchy.add(database); + externalAuthorizableHierarchy.add(table); + externalAuthorizableHierarchy.add(new Column(col.getName())); + inputHierarchy.add(externalAuthorizableHierarchy); + + try { + // do the authorization by new HiveAuthzBinding with PrivilegeCache + hiveBindingWithPrivilegeCache.authorize(operation, columnMetaDataPrivilege, subject, + inputHierarchy, outputHierarchy); + filteredResult.add(col); + } catch (AuthorizationException e) { + // squash the exception, user doesn't have privileges, so the column is + // not added to + // filtered list. } } return filteredResult; @@ -712,6 +882,8 @@ public static List filterShowDatabases( HiveOperation operation, String userName) throws SemanticException { List filteredResult = new ArrayList(); Subject subject = new Subject(userName); + HiveAuthzBinding hiveBindingWithPrivilegeCache = getHiveBindingWithPrivilegeCache(hiveAuthzBinding, userName); + HiveAuthzPrivileges anyPrivilege = new HiveAuthzPrivileges.AuthzPrivilegeBuilder(). addInputObjectPriviledge(AuthorizableType.Column, EnumSet.of(DBModelAction.SELECT, DBModelAction.INSERT)). addInputObjectPriviledge(AuthorizableType.URI, EnumSet.of(DBModelAction.SELECT)). @@ -724,9 +896,8 @@ public static List filterShowDatabases( Database database = null; // if default is not restricted, continue - if (DEFAULT_DATABASE_NAME.equalsIgnoreCase(dbName) && - "false".equalsIgnoreCase( -hiveAuthzBinding.getAuthzConf().get( + if (DEFAULT_DATABASE_NAME.equalsIgnoreCase(dbName) && "false".equalsIgnoreCase( + hiveAuthzBinding.getAuthzConf().get( HiveAuthzConf.AuthzConfVars.AUTHZ_RESTRICT_DEFAULT_DB.getVar(), "false"))) { filteredResult.add(DEFAULT_DATABASE_NAME); @@ -745,14 +916,14 @@ public static List filterShowDatabases( inputHierarchy.add(externalAuthorizableHierarchy); try { - hiveAuthzBinding.authorize(operation, anyPrivilege, subject, + // do the authorization by new HiveAuthzBinding with PrivilegeCache + hiveBindingWithPrivilegeCache.authorize(operation, anyPrivilege, subject, inputHierarchy, outputHierarchy); filteredResult.add(database.getName()); } catch (AuthorizationException e) { // squash the exception, user doesn't have privileges, so the table is // not added to // filtered list. - ; } } @@ -772,7 +943,7 @@ private boolean isChildTabForView(ReadEntity readEntity) { if (!readEntity.getType().equals(Type.TABLE) && !readEntity.getType().equals(Type.PARTITION)) { return false; } - if ((readEntity.getParents() != null) && (readEntity.getParents().size() > 0)) { + if (readEntity.getParents() != null && readEntity.getParents().size() > 0) { for (ReadEntity parentEntity : readEntity.getParents()) { if (!parentEntity.getType().equals(Type.TABLE)) { return false; @@ -784,32 +955,16 @@ private boolean isChildTabForView(ReadEntity readEntity) { } } - /** - * Returns a set of hooks specified in a configuration variable. - * - * See getHooks(HiveAuthzConf.AuthzConfVars hookConfVar, Class clazz) - * @param hookConfVar - * @return - * @throws Exception - */ - private static List getHooks(String csHooks) throws Exception { - return getHooks(csHooks, Hook.class); - } - /** * Returns the hooks specified in a configuration variable. The hooks are returned in a list in * the order they were specified in the configuration variable. * * @param hookConfVar The configuration variable specifying a comma separated list of the hook * class names. - * @param clazz The super type of the hooks. - * @return A list of the hooks cast as the type specified in clazz, in the order - * they are listed in the value of hookConfVar + * @return A list of the hooks, in the order they are listed in the value of hookConfVar * @throws Exception */ - private static List getHooks(String csHooks, - Class clazz) - throws Exception { + private static List getHooks(String csHooks) throws Exception { List hooks = new ArrayList(); if (csHooks.isEmpty()) { @@ -829,4 +984,73 @@ private static List getHooks(String csHooks, return hooks; } + + // Check if the given entity is identified as dummy by Hive compilers. + private boolean isDummyEntity(Entity entity) { + return entity.isDummy(); + } + + // create hiveBinding with PrivilegeCache + private static HiveAuthzBinding getHiveBindingWithPrivilegeCache(HiveAuthzBinding hiveAuthzBinding, + String userName) throws SemanticException { + // get the original HiveAuthzBinding, and get the user's privileges by AuthorizationProvider + AuthorizationProvider authProvider = hiveAuthzBinding.getCurrentAuthProvider(); + Set userPrivileges = authProvider.getPolicyEngine().getPrivileges( + authProvider.getGroupMapping().getGroups(userName), hiveAuthzBinding.getActiveRoleSet(), + hiveAuthzBinding.getAuthServer()); + + // create PrivilegeCache using user's privileges + PrivilegeCache privilegeCache = new SimplePrivilegeCache(userPrivileges); + try { + // create new instance of HiveAuthzBinding whose backend provider should be SimpleCacheProviderBackend + return new HiveAuthzBinding(HiveAuthzBinding.HiveHook.HiveServer2, hiveAuthzBinding.getHiveConf(), + hiveAuthzBinding.getAuthzConf(), privilegeCache); + } catch (Exception e) { + LOG.error("Can not create HiveAuthzBinding with privilege cache."); + throw new SemanticException(e); + } + } + + private static boolean hasPrefixMatch(List prefixList, final String str) { + for (String prefix : prefixList) { + if (str.startsWith(prefix)) { + return true; + } + } + + return false; + } + + /** + * Set the Serde URI privileges. If the URI privileges are not set, which serdeURI will be null, + * the URI authorization checks will be skipped. + */ + private void setSerdeURI(String serdeClassName) throws SemanticException { + if (!serdeURIPrivilegesEnabled) { + return; + } + + // WhiteList Serde Jar can be used by any users. WhiteList checking is + // done by comparing the Java package name. The assumption is cluster + // admin will ensure there is no Java namespace collision. + // e.g org.apache.hadoop.hive.serde2 is used by hive and cluster admin should + // ensure no custom Serde class is introduced under the same namespace. + if (!hasPrefixMatch(serdeWhiteList, serdeClassName)) { + try { + CodeSource serdeSrc = Class.forName(serdeClassName, true, Utilities.getSessionSpecifiedClassLoader()).getProtectionDomain().getCodeSource(); + if (serdeSrc == null) { + throw new SemanticException("Could not resolve the jar for Serde class " + serdeClassName); + } + + String serdeJar = serdeSrc.getLocation().getPath(); + if (serdeJar == null || serdeJar.isEmpty()) { + throw new SemanticException("Could not find the jar for Serde class " + serdeClassName + "to validate privileges"); + } + + serdeURI = parseURI(serdeSrc.getLocation().toString(), true); + } catch (ClassNotFoundException e) { + throw new SemanticException("Error retrieving Serde class:" + e.getMessage(), e); + } + } + } } diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/HiveAuthzBindingSessionHook.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/HiveAuthzBindingSessionHook.java index 0fa4a87fe..17b900341 100644 --- a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/HiveAuthzBindingSessionHook.java +++ b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/HiveAuthzBindingSessionHook.java @@ -57,6 +57,7 @@ public class HiveAuthzBindingSessionHook ConfVars.HIVE_SECURITY_COMMAND_WHITELIST.varname, ConfVars.HIVE_AUTHORIZATION_TASK_FACTORY.varname, ConfVars.HIVE_CAPTURE_TRANSFORM_ENTITY.varname, + ConfVars.HIVERELOADABLEJARS.varname, HiveAuthzConf.HIVE_ACCESS_CONF_URL, HiveAuthzConf.HIVE_SENTRY_CONF_URL, HiveAuthzConf.HIVE_ACCESS_SUBJECT_NAME, @@ -84,7 +85,6 @@ public SentryHiveAuthorizerImpl(HiveAccessController accessController, @Override public void applyAuthorizationConfigPolicy(HiveConf conf) { - return; } } @@ -95,7 +95,7 @@ public void applyAuthorizationConfigPolicy(HiveConf conf) { * 2. Set additional config properties required for auth * set HIVE_EXTENDED_ENITITY_CAPTURE = true * set SCRATCHDIRPERMISSION = 700 - * 3. Add sensetive config parameters to the config restrict list so that they can't be overridden by users + * 3. Add sensitive config parameters to the config restrict list so that they can't be overridden by users */ @Override public void run(HiveSessionHookContext sessionHookContext) throws HiveSQLException { @@ -104,7 +104,11 @@ public void run(HiveSessionHookContext sessionHookContext) throws HiveSQLExcepti appendConfVar(sessionConf, ConfVars.SEMANTIC_ANALYZER_HOOK.varname, SEMANTIC_HOOK); - sessionConf.setVar(ConfVars.HIVE_SECURITY_COMMAND_WHITELIST, "set"); + HiveAuthzConf authzConf = HiveAuthzBindingHook.loadAuthzConf(sessionConf); + String commandWhitelist = + authzConf.get(HiveAuthzConf.HIVE_SENTRY_SECURITY_COMMAND_WHITELIST, + HiveAuthzConf.HIVE_SENTRY_SECURITY_COMMAND_WHITELIST_DEFAULT); + sessionConf.setVar(ConfVars.HIVE_SECURITY_COMMAND_WHITELIST, commandWhitelist); sessionConf.setVar(ConfVars.SCRATCHDIRPERMISSION, SCRATCH_DIR_PERMISSIONS); sessionConf.setBoolVar(ConfVars.HIVE_CAPTURE_TRANSFORM_ENTITY, true); diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/SentryHiveAuthorizationTaskFactoryImpl.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/SentryHiveAuthorizationTaskFactoryImpl.java index 5898b7e27..caf32cfa3 100644 --- a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/SentryHiveAuthorizationTaskFactoryImpl.java +++ b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/SentryHiveAuthorizationTaskFactoryImpl.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.parse.ASTNode; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; -import org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.HiveParser; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactory; @@ -61,7 +60,7 @@ public class SentryHiveAuthorizationTaskFactoryImpl implements HiveAuthorization private static final Logger LOG = LoggerFactory.getLogger(SentryHiveAuthorizationTaskFactoryImpl.class); - public SentryHiveAuthorizationTaskFactoryImpl(HiveConf conf, Hive db) { + public SentryHiveAuthorizationTaskFactoryImpl(HiveConf conf, Hive db) { //NOPMD } @@ -162,7 +161,7 @@ public Task createRevokeTask(ASTNode ast, HashSet createShowGrantTask(ASTNode ast, Path result PrincipalDesc principalDesc = new PrincipalDesc(principalName, type); // Partition privileges are not supported by Sentry - List cols = null; if (ast.getChildCount() > 1) { ASTNode child = (ASTNode) ast.getChild(1); if (child.getToken().getType() == HiveParser.TOK_PRIV_OBJECT_COL) { privHiveObj = analyzePrivilegeObject(child); - cols = privHiveObj.getColumns(); - }else { + } else { throw new SemanticException("Unrecognized Token: " + child.getToken().getType()); } } diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/SentryIniPolicyFileFormatter.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/SentryIniPolicyFileFormatter.java new file mode 100644 index 000000000..45747dfdd --- /dev/null +++ b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/SentryIniPolicyFileFormatter.java @@ -0,0 +1,161 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.binding.hive; + +import java.io.File; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.conf.Configuration; +import org.apache.sentry.policy.common.PolicyConstants; +import org.apache.sentry.provider.common.PolicyFileConstants; +import org.apache.sentry.provider.common.ProviderBackendContext; +import org.apache.sentry.provider.file.SimpleFileProviderBackend; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Charsets; +import com.google.common.base.Joiner; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import com.google.common.collect.Table; +import com.google.common.io.Files; + +/** + * SentryIniPolicyFileFormatter is to parse file and write data to file for sentry mapping data with + * ini format, eg: + * [groups] + * group1=role1 + * [roles] + * role1=server=server1 + */ +public class SentryIniPolicyFileFormatter implements SentryPolicyFileFormatter { + + private static final Logger LOGGER = LoggerFactory.getLogger(SentryIniPolicyFileFormatter.class); + + private static final String NL = System.getProperty("line.separator", "\n"); + + /** + * Write the sentry mapping data to ini file. + * + * @param resourcePath + * The path of the output file + * @param sentryMappingData + * The map for sentry mapping data, eg: + * for the following mapping data: + * group1=role1,role2 + * group2=role2,role3 + * role1=server=server1->db=db1 + * role2=server=server1->db=db1->table=tbl1,server=server1->db=db1->table=tbl2 + * role3=server=server1->url=hdfs://localhost/path + * + * The sentryMappingData will be inputed as: + * { + * groups={[group1={role1, role2}], group2=[role2, role3]}, + * roles={role1=[server=server1->db=db1], + * role2=[server=server1->db=db1->table=tbl1,server=server1->db=db1->table=tbl2], + * role3=[server=server1->url=hdfs://localhost/path] + * } + * } + */ + @Override + public void write(String resourcePath, Map>> sentryMappingData) + throws Exception { + File destFile = new File(resourcePath); + if (destFile.exists() && !destFile.delete()) { + throw new IllegalStateException("Unable to delete " + destFile); + } + String contents = Joiner + .on(NL) + .join( + generateSection(PolicyFileConstants.GROUPS, + sentryMappingData.get(PolicyFileConstants.GROUPS)), + generateSection(PolicyFileConstants.ROLES, + sentryMappingData.get(PolicyFileConstants.ROLES)), + ""); + LOGGER.info("Writing policy file to " + destFile + ":\n" + contents); + Files.write(contents, destFile, Charsets.UTF_8); + } + + /** + * parse the ini file and return a map with all data + * + * @param resourcePath + * The path of the input file + * @param conf + * The configuration info + * @return the result of sentry mapping data in map structure. + */ + @Override + public Map>> parse(String resourcePath, Configuration conf) + throws Exception { + Map>> resultMap = Maps.newHashMap(); + // SimpleFileProviderBackend is used for parse the ini file + SimpleFileProviderBackend policyFileBackend = new SimpleFileProviderBackend(conf, resourcePath); + ProviderBackendContext context = new ProviderBackendContext(); + context.setAllowPerDatabase(true); + // parse the ini file + policyFileBackend.initialize(context); + + // SimpleFileProviderBackend parsed the input file and output the data in Table format. + Table> groupRolePrivilegeTable = policyFileBackend + .getGroupRolePrivilegeTable(); + Map> groupRolesMap = Maps.newHashMap(); + Map> rolePrivilegesMap = Maps.newHashMap(); + for (String groupName : groupRolePrivilegeTable.rowKeySet()) { + for (String roleName : groupRolePrivilegeTable.columnKeySet()) { + // get the roles set for the current groupName + Set tempRoles = groupRolesMap.get(groupName); + if (tempRoles == null) { + tempRoles = Sets.newHashSet(); + } + Set privileges = groupRolePrivilegeTable.get(groupName, roleName); + // if there has privilege for [group,role], if no privilege exist, the [group, role] info + // will be discard. + if (privileges != null) { + // update [group, role] mapping data + tempRoles.add(roleName); + groupRolesMap.put(groupName, tempRoles); + // update [role, privilege] mapping data + rolePrivilegesMap.put(roleName, privileges); + } + } + } + resultMap.put(PolicyFileConstants.GROUPS, groupRolesMap); + resultMap.put(PolicyFileConstants.ROLES, rolePrivilegesMap); + return resultMap; + } + + // generate the ini section according to the mapping data. + private String generateSection(String name, Map> mappingData) { + if (mappingData.isEmpty()) { + return ""; + } + List lines = Lists.newArrayList(); + lines.add("[" + name + "]"); + for (Map.Entry> entry : mappingData.entrySet()) { + lines.add(PolicyConstants.KV_JOINER.join(entry.getKey(), + PolicyConstants.ROLE_JOINER.join(entry.getValue()))); + } + return Joiner.on(NL).join(lines); + } + +} diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/SentryOnFailureHookContext.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/SentryOnFailureHookContext.java index a38065188..c101a4fa0 100644 --- a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/SentryOnFailureHookContext.java +++ b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/SentryOnFailureHookContext.java @@ -38,61 +38,61 @@ public interface SentryOnFailureHookContext { /** * @return the command attempted by user */ - public String getCommand(); + String getCommand(); /** * @return the set of read entities */ - public Set getInputs(); + Set getInputs(); /** * @return the set of write entities */ - public Set getOutputs(); + Set getOutputs(); /** * @return the operation */ - public HiveOperation getHiveOp(); + HiveOperation getHiveOp(); /** * @return the user name */ - public String getUserName(); + String getUserName(); /** * @return the ip address */ - public String getIpAddress(); + String getIpAddress(); /** * @return the database object */ - public Database getDatabase(); + Database getDatabase(); /** * @return the table object */ - public Table getTable(); + Table getTable(); /** * @return the udf URI */ - public AccessURI getUdfURI(); + AccessURI getUdfURI(); /** * @return the partition URI */ - public AccessURI getPartitionURI(); + AccessURI getPartitionURI(); /** * @return the authorization failure exception */ - public AuthorizationException getException(); + AuthorizationException getException(); /** * @return the config */ - public Configuration getConf(); + Configuration getConf(); } diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/SentryPolicyFileFormatFactory.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/SentryPolicyFileFormatFactory.java new file mode 100644 index 000000000..d2c607262 --- /dev/null +++ b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/SentryPolicyFileFormatFactory.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.binding.hive; + +import java.lang.reflect.Constructor; + +import org.apache.sentry.binding.hive.conf.HiveAuthzConf; +import org.apache.sentry.binding.hive.conf.HiveAuthzConf.AuthzConfVars; + +/** + * SentryPolicyFileFormatFactory is used to create FileFormatter for different file type according + * to the configuration, the default FileFormatter is for ini file. + */ +public class SentryPolicyFileFormatFactory { + + public static SentryPolicyFileFormatter createFileFormatter(HiveAuthzConf conf) throws Exception { + // The default formatter is org.apache.sentry.binding.hive.SentryIniPolicyFileFormatter, for ini + // file. + String policyFileFormatterName = conf.get(AuthzConfVars.AUTHZ_POLICY_FILE_FORMATTER.getVar()); + // load the policy file formatter class + Constructor policyFileFormatterConstructor = Class.forName(policyFileFormatterName) + .getDeclaredConstructor(); + policyFileFormatterConstructor.setAccessible(true); + SentryPolicyFileFormatter sentryPolicyFileFormatter = (SentryPolicyFileFormatter) policyFileFormatterConstructor + .newInstance(); + return sentryPolicyFileFormatter; + } +} diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/SentryPolicyFileFormatter.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/SentryPolicyFileFormatter.java new file mode 100644 index 000000000..4f465b367 --- /dev/null +++ b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/SentryPolicyFileFormatter.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.binding.hive; + +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.conf.Configuration; + +/** + * SentryPolicyFileFormatter is to parse file and write data to file for sentry mapping data. + */ +public interface SentryPolicyFileFormatter { + + // write the sentry mapping data to file + void write(String resourcePath, Map>> sentryMappingData) + throws Exception; + + // parse the sentry mapping data from file + Map>> parse(String resourcePath, Configuration conf) + throws Exception; + +} diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/authz/HiveAuthzBinding.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/authz/HiveAuthzBinding.java index 30714756d..0a1d0e8d3 100644 --- a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/authz/HiveAuthzBinding.java +++ b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/authz/HiveAuthzBinding.java @@ -22,8 +22,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -31,7 +29,6 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.ql.metadata.AuthorizationException; import org.apache.hadoop.hive.ql.plan.HiveOperation; -import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.sentry.SentryUserException; import org.apache.sentry.binding.hive.conf.HiveAuthzConf; import org.apache.sentry.binding.hive.conf.HiveAuthzConf.AuthzConfVars; @@ -44,8 +41,11 @@ import org.apache.sentry.core.model.db.DBModelAuthorizable.AuthorizableType; import org.apache.sentry.core.model.db.Server; import org.apache.sentry.policy.common.PolicyEngine; +import org.apache.sentry.provider.cache.PrivilegeCache; +import org.apache.sentry.provider.cache.SimpleCacheProviderBackend; import org.apache.sentry.provider.common.AuthorizationProvider; import org.apache.sentry.provider.common.ProviderBackend; +import org.apache.sentry.provider.common.ProviderBackendContext; import org.apache.sentry.provider.db.service.thrift.TSentryRole; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -57,7 +57,6 @@ public class HiveAuthzBinding { private static final Logger LOG = LoggerFactory .getLogger(HiveAuthzBinding.class); - private static final AtomicInteger queryID = new AtomicInteger(); private static final Splitter ROLE_SET_SPLITTER = Splitter.on(",").trimResults() .omitEmptyStrings(); public static final String HIVE_BINDING_TAG = "hive.authz.bindings.tag"; @@ -90,6 +89,18 @@ public HiveAuthzBinding (HiveHook hiveHook, HiveConf hiveConf, HiveAuthzConf aut authzConf.get(HiveAuthzConf.SENTRY_ACTIVE_ROLE_SET, "")).trim()); } + public HiveAuthzBinding (HiveHook hiveHook, HiveConf hiveConf, HiveAuthzConf authzConf, + PrivilegeCache privilegeCache) throws Exception { + validateHiveConfig(hiveHook, hiveConf, authzConf); + this.hiveConf = hiveConf; + this.authzConf = authzConf; + this.authServer = new Server(authzConf.get(AuthzConfVars.AUTHZ_SERVER_NAME.getVar())); + this.authProvider = getAuthProviderWithPrivilegeCache(authzConf, authServer.getName(), privilegeCache); + this.open = true; + this.activeRoleSet = parseActiveRoleSet(hiveConf.get(HiveAuthzConf.SENTRY_ACTIVE_ROLE_SET, + authzConf.get(HiveAuthzConf.SENTRY_ACTIVE_ROLE_SET, "")).trim()); + } + private static ActiveRoleSet parseActiveRoleSet(String name) throws SentryUserException { return parseActiveRoleSet(name, null); @@ -220,6 +231,38 @@ public static AuthorizationProvider getAuthProvider(HiveConf hiveConf, HiveAuthz return (AuthorizationProvider) constrctor.newInstance(new Object[] {resourceName, policyEngine}); } + // Instantiate the authz provider using PrivilegeCache, this method is used for metadata filter function. + public static AuthorizationProvider getAuthProviderWithPrivilegeCache(HiveAuthzConf authzConf, + String serverName, PrivilegeCache privilegeCache) throws Exception { + // get the provider class and resources from the authz config + String authProviderName = authzConf.get(AuthzConfVars.AUTHZ_PROVIDER.getVar()); + String resourceName = + authzConf.get(AuthzConfVars.AUTHZ_PROVIDER_RESOURCE.getVar()); + String policyEngineName = authzConf.get(AuthzConfVars.AUTHZ_POLICY_ENGINE.getVar()); + + LOG.debug("Using authorization provider " + authProviderName + + " with resource " + resourceName + ", policy engine " + + policyEngineName + ", provider backend SimpleCacheProviderBackend"); + + ProviderBackend providerBackend = new SimpleCacheProviderBackend(authzConf, resourceName); + ProviderBackendContext context = new ProviderBackendContext(); + context.setBindingHandle(privilegeCache); + providerBackend.initialize(context); + + // load the policy engine class + Constructor policyConstructor = + Class.forName(policyEngineName).getDeclaredConstructor(String.class, ProviderBackend.class); + policyConstructor.setAccessible(true); + PolicyEngine policyEngine = (PolicyEngine) policyConstructor. + newInstance(new Object[] {serverName, providerBackend}); + + // load the authz provider class + Constructor constrctor = + Class.forName(authProviderName).getDeclaredConstructor(String.class, PolicyEngine.class); + constrctor.setAccessible(true); + return (AuthorizationProvider) constrctor.newInstance(new Object[] {resourceName, policyEngine}); + } + /** * Validate the privilege for the given operation for the given subject @@ -267,17 +310,18 @@ public void authorize(HiveOperation hiveOp, HiveAuthzPrivileges stmtAuthPrivileg } boolean found = false; - for(AuthorizableType key: requiredInputPrivileges.keySet()) { + for (Map.Entry> entry : requiredInputPrivileges.entrySet()) { + AuthorizableType key = entry.getKey(); for (List inputHierarchy : inputHierarchyList) { if (getAuthzType(inputHierarchy).equals(key)) { found = true; - if (!authProvider.hasAccess(subject, inputHierarchy, requiredInputPrivileges.get(key), activeRoleSet)) { + if (!authProvider.hasAccess(subject, inputHierarchy, entry.getValue(), activeRoleSet)) { throw new AuthorizationException("User " + subject.getName() + " does not have privileges for " + hiveOp.name()); } } } - if(!found && !(key.equals(AuthorizableType.URI)) && !(hiveOp.equals(HiveOperation.QUERY)) + if (!found && !key.equals(AuthorizableType.URI) && !(hiveOp.equals(HiveOperation.QUERY)) && !(hiveOp.equals(HiveOperation.CREATETABLE_AS_SELECT))) { //URI privileges are optional for some privileges: anyPrivilege, tableDDLAndOptionalUriPrivilege //Query can mean select/insert/analyze where all of them have different required privileges. @@ -338,6 +382,10 @@ public HiveAuthzConf getAuthzConf() { return authzConf; } + public HiveConf getHiveConf() { + return hiveConf; + } + private AuthorizableType getAuthzType (List hierarchy){ return hierarchy.get(hierarchy.size() -1).getAuthzType(); } @@ -352,4 +400,8 @@ public List getLastQueryPrivilegeErrors() { public void close() { authProvider.close(); } + + public AuthorizationProvider getCurrentAuthProvider() { + return authProvider; + } } diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/authz/HiveAuthzPrivileges.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/authz/HiveAuthzPrivileges.java index 8cd82ef5e..f164b3014 100644 --- a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/authz/HiveAuthzPrivileges.java +++ b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/authz/HiveAuthzPrivileges.java @@ -52,7 +52,8 @@ public static enum HiveOperationScope { DATABASE, TABLE, FUNCTION, - CONNECT + CONNECT, + COLUMN } public static enum HiveExtendedOperation { diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/authz/HiveAuthzPrivilegesMap.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/authz/HiveAuthzPrivilegesMap.java index 6efeed62b..8e704921e 100644 --- a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/authz/HiveAuthzPrivilegesMap.java +++ b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/authz/HiveAuthzPrivilegesMap.java @@ -30,12 +30,6 @@ public class HiveAuthzPrivilegesMap { private static final Map hiveAuthzStmtPrivMap = new HashMap(); static { - HiveAuthzPrivileges serverPrivilege = new HiveAuthzPrivileges.AuthzPrivilegeBuilder(). - addInputObjectPriviledge(AuthorizableType.Server, EnumSet.of(DBModelAction.ALL)). - setOperationScope(HiveOperationScope.SERVER). - setOperationType(HiveOperationType.DDL). - build(); - HiveAuthzPrivileges createServerPrivilege = new HiveAuthzPrivileges.AuthzPrivilegeBuilder(). addInputObjectPriviledge(AuthorizableType.Server, EnumSet.of(DBModelAction.CREATE)). setOperationScope(HiveOperationScope.SERVER). @@ -45,6 +39,7 @@ public class HiveAuthzPrivilegesMap { HiveAuthzPrivileges tableCreatePrivilege = new HiveAuthzPrivileges.AuthzPrivilegeBuilder(). addOutputObjectPriviledge(AuthorizableType.Db, EnumSet.of(DBModelAction.CREATE)). addInputObjectPriviledge(AuthorizableType.URI, EnumSet.of(DBModelAction.ALL)).//TODO: make it optional + addOutputObjectPriviledge(AuthorizableType.URI, EnumSet.of(DBModelAction.ALL)). setOperationScope(HiveOperationScope.DATABASE). setOperationType(HiveOperationType.DDL). build(); @@ -139,6 +134,13 @@ public class HiveAuthzPrivilegesMap { setOperationType(HiveOperationType.INFO). build(); + // Metadata statements which only require column-level privileges. + HiveAuthzPrivileges columnMetaDataPrivilege = new HiveAuthzPrivileges.AuthzPrivilegeBuilder(). + addInputObjectPriviledge(AuthorizableType.Column, EnumSet.of(DBModelAction.SELECT, DBModelAction.INSERT)). + setOperationScope(HiveOperationScope.COLUMN). + setOperationType(HiveOperationType.INFO). + build(); + HiveAuthzPrivileges dbImportPrivilege = new HiveAuthzPrivileges.AuthzPrivilegeBuilder(). addOutputObjectPriviledge(AuthorizableType.Db, EnumSet.of(DBModelAction.CREATE)). addInputObjectPriviledge(AuthorizableType.URI, EnumSet.of(DBModelAction.ALL)). @@ -224,7 +226,6 @@ public class HiveAuthzPrivilegesMap { hiveAuthzStmtPrivMap.put(HiveOperation.ALTERPARTITION_PROTECTMODE, alterTablePrivilege); hiveAuthzStmtPrivMap.put(HiveOperation.ALTERPARTITION_SERDEPROPERTIES, alterTablePrivilege); - hiveAuthzStmtPrivMap.put(HiveOperation.ALTERTABLE_SERIALIZER, alterTablePrivilege); hiveAuthzStmtPrivMap.put(HiveOperation.ALTERTABLE_MERGEFILES, alterTablePrivilege); hiveAuthzStmtPrivMap.put(HiveOperation.ALTERTABLE_SKEWED, alterTablePrivilege); @@ -237,6 +238,7 @@ public class HiveAuthzPrivilegesMap { hiveAuthzStmtPrivMap.put(HiveOperation.ALTERTABLE_ADDPARTS, addPartitionPrivilege); hiveAuthzStmtPrivMap.put(HiveOperation.ALTERTABLE_RENAME, alterTableRenamePrivilege); + hiveAuthzStmtPrivMap.put(HiveOperation.ALTERTABLE_SERIALIZER, alterTableAndUriPrivilege); hiveAuthzStmtPrivMap.put(HiveOperation.ALTERTABLE_LOCATION, alterTableAndUriPrivilege); hiveAuthzStmtPrivMap.put(HiveOperation.ALTERPARTITION_LOCATION, alterTableAndUriPrivilege); hiveAuthzStmtPrivMap.put(HiveOperation.ALTERTBLPART_SKEWED_LOCATION, alterTableAndUriPrivilege);//TODO: Needs test case @@ -254,9 +256,11 @@ public class HiveAuthzPrivilegesMap { hiveAuthzStmtPrivMap.put(HiveOperation.CREATEFUNCTION, functionPrivilege); hiveAuthzStmtPrivMap.put(HiveOperation.DROPFUNCTION, functionPrivilege); + // SHOWCOLUMNS + hiveAuthzStmtPrivMap.put(HiveOperation.SHOWCOLUMNS, columnMetaDataPrivilege); + // SHOWDATABASES // SHOWTABLES - hiveAuthzStmtPrivMap.put(HiveOperation.SHOWCOLUMNS, tableMetaDataPrivilege); hiveAuthzStmtPrivMap.put(HiveOperation.SHOW_TABLESTATUS, tableMetaDataPrivilege); hiveAuthzStmtPrivMap.put(HiveOperation.SHOW_TBLPROPERTIES, tableMetaDataPrivilege); hiveAuthzStmtPrivMap.put(HiveOperation.SHOW_CREATETABLE, tableMetaDataPrivilege); @@ -283,6 +287,7 @@ public class HiveAuthzPrivilegesMap { new HiveAuthzPrivileges.AuthzPrivilegeBuilder(). addInputObjectPriviledge(AuthorizableType.Table, EnumSet.of(DBModelAction.SELECT)). addInputObjectPriviledge(AuthorizableType.Column, EnumSet.of(DBModelAction.SELECT)). + addInputObjectPriviledge(AuthorizableType.URI,EnumSet.of(DBModelAction.ALL)). addOutputObjectPriviledge(AuthorizableType.Db, EnumSet.of(DBModelAction.CREATE)). setOperationScope(HiveOperationScope.DATABASE). setOperationType(HiveOperationType.DDL). diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/authz/SentryConfigTool.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/authz/SentryConfigTool.java index ecbd6647f..616d46c73 100644 --- a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/authz/SentryConfigTool.java +++ b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/authz/SentryConfigTool.java @@ -17,7 +17,14 @@ package org.apache.sentry.binding.hive.authz; -import com.google.common.collect.Table; +import java.security.CodeSource; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Map; +import java.util.Set; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; @@ -27,6 +34,7 @@ import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.commons.cli.Parser; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.ql.Driver; @@ -37,34 +45,28 @@ import org.apache.log4j.LogManager; import org.apache.sentry.Command; import org.apache.sentry.binding.hive.HiveAuthzBindingHook; -import org.apache.sentry.binding.hive.HiveAuthzBindingSessionHook; +import org.apache.sentry.binding.hive.SentryPolicyFileFormatFactory; +import org.apache.sentry.binding.hive.SentryPolicyFileFormatter; import org.apache.sentry.binding.hive.conf.HiveAuthzConf; import org.apache.sentry.binding.hive.conf.HiveAuthzConf.AuthzConfVars; - import org.apache.sentry.core.common.SentryConfigurationException; import org.apache.sentry.core.common.Subject; -import org.apache.sentry.core.model.db.AccessConstants; -import org.apache.sentry.core.model.db.DBModelAuthorizable; import org.apache.sentry.core.model.db.Server; -import org.apache.sentry.policy.db.DBModelAuthorizables; import org.apache.sentry.provider.common.AuthorizationProvider; -import org.apache.sentry.provider.common.ProviderBackendContext; import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient; -import org.apache.sentry.provider.db.service.thrift.TSentryRole; -import org.apache.sentry.provider.file.KeyValue; -import org.apache.sentry.provider.file.PolicyFileConstants; -import org.apache.sentry.provider.file.SimpleFileProviderBackend; import org.apache.sentry.service.thrift.SentryServiceClientFactory; -import java.security.CodeSource; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.HashSet; -import java.util.Set; - +/** + * set the required system property to be read by HiveConf and AuthzConf + * + * @throws Exception + */ +// Hack, hiveConf doesn't provide a reliable way check if it found a valid +// hive-site +// load auth provider +// get the configured sentry provider +// validate policy files +// import policy files public class SentryConfigTool { private String sentrySiteFile = null; private String policyFile = null; @@ -72,9 +74,11 @@ public class SentryConfigTool { private String jdbcURL = null; private String user = null; private String passWord = null; + private String importPolicyFilePath = null; + private String exportPolicyFilePath = null; private boolean listPrivs = false; private boolean validate = false; - private boolean importPolicy = false; + private boolean importOverwriteRole = false; private HiveConf hiveConf = null; private HiveAuthzConf authzConf = null; private AuthorizationProvider sentryProvider = null; @@ -115,12 +119,20 @@ public void setValidate(boolean validate) { this.validate = validate; } - public boolean isImportPolicy() { - return importPolicy; + public String getImportPolicyFilePath() { + return importPolicyFilePath; + } + + public void setImportPolicyFilePath(String importPolicyFilePath) { + this.importPolicyFilePath = importPolicyFilePath; + } + + public String getExportPolicyFilePath() { + return exportPolicyFilePath; } - public void setImportPolicy(boolean importPolicy) { - this.importPolicy = importPolicy; + public void setExportPolicyFilePath(String exportPolicyFilePath) { + this.exportPolicyFilePath = exportPolicyFilePath; } public String getSentrySiteFile() { @@ -179,6 +191,14 @@ public void setListPrivs(boolean listPrivs) { this.listPrivs = listPrivs; } + public boolean isImportOverwriteRole() { + return importOverwriteRole; + } + + public void setImportOverwriteRole(boolean importOverwriteRole) { + this.importOverwriteRole = importOverwriteRole; + } + /** * set the required system property to be read by HiveConf and AuthzConf * @throws Exception @@ -200,7 +220,7 @@ public void setupConfig() throws Exception { getHiveConf().setVar(ConfVars.SEMANTIC_ANALYZER_HOOK, HiveAuthzBindingHook.class.getName()); try { - System.out.println("Hive config: " + getHiveConf().getHiveSiteLocation()); + System.out.println("Hive config: " + HiveConf.getHiveSiteLocation()); } catch (NullPointerException e) { // Hack, hiveConf doesn't provide a reliable way check if it found a valid // hive-site @@ -252,133 +272,33 @@ public void validatePolicy() throws Exception { System.out.println("No errors found in the policy file"); } - // import policy files + // import the sentry mapping data to database public void importPolicy() throws Exception { - final String requestorUserName = "hive"; - SimpleFileProviderBackend policyFileBackend; - SentryPolicyServiceClient client; - - policyFileBackend = new SimpleFileProviderBackend(getAuthzConf(), - getAuthzConf().get(AuthzConfVars.AUTHZ_PROVIDER_RESOURCE.getVar())); - ProviderBackendContext context = new ProviderBackendContext(); - context.setAllowPerDatabase(true); - policyFileBackend.initialize(context); - client = SentryServiceClientFactory.create(getAuthzConf()); - Set roles = new HashSet(); - for (TSentryRole sentryRole : client.listRoles(requestorUserName)) { - roles.add(sentryRole.getRoleName()); - } - - Table> groupRolePrivilegeTable = - policyFileBackend.getGroupRolePrivilegeTable(); - for(String groupName : groupRolePrivilegeTable.rowKeySet()) { - for(String roleName : groupRolePrivilegeTable.columnKeySet()) { - if (!roles.contains(roleName)) { - client.createRole(requestorUserName, roleName); - System.out.println(String.format("CREATE ROLE %s;", roleName)); - roles.add(roleName); - } - - Set privileges = groupRolePrivilegeTable.get(groupName, roleName); - if (privileges == null) { - continue; - } - client.grantRoleToGroup(requestorUserName, groupName, roleName); - System.out.println(String.format("GRANT ROLE %s TO GROUP %s;", - roleName, groupName)); - - for (String permission : privileges) { - String server = null; - String database = null; - String table = null; - String column = null; - String uri = null; - String action = AccessConstants.ALL; - for (String authorizable : PolicyFileConstants.AUTHORIZABLE_SPLITTER. - trimResults().split(permission)) { - KeyValue kv = new KeyValue(authorizable); - DBModelAuthorizable a = DBModelAuthorizables.from(kv); - if (a == null) { - action = kv.getValue(); - continue; - } - - switch (a.getAuthzType()) { - case Server: - server = a.getName(); - break; - case Db: - database = a.getName(); - break; - case Table: - case View: - table = a.getName(); - break; - case URI: - uri = a.getName(); - break; - case Column: - column = a.getName(); - break; - default: - break; - } - } - - if (uri != null) { - System.out.println(String.format( - "# server=%s", - server)); - System.out.println(String.format( - "GRANT ALL ON URI %s TO ROLE %s;", - uri, roleName)); - - client.grantURIPrivilege(requestorUserName, roleName, server, uri); - } else if (column != null && !AccessConstants.ALL.equals(column)) { - System.out.println(String.format( - "# server=%s, database=%s", - server, database)); - System.out.println(String.format( - "GRANT %s (%s) ON TABLE %s TO ROLE %s;", - "*".equals(action) ? "ALL" : action.toUpperCase(), column, - table, roleName)); - - client.grantColumnPrivilege(requestorUserName, roleName, server, - database, table, column, action); - } else if (table != null && !AccessConstants.ALL.equals(table)) { - System.out.println(String.format( - "# server=%s, database=%s", - server, database)); - System.out.println(String.format( - "GRANT %s ON TABLE %s TO ROLE %s;", - "*".equals(action) ? "ALL" : action.toUpperCase(), table, - roleName)); - - client.grantTablePrivilege(requestorUserName, roleName, server, - database, table, action); - } else if (database != null && !AccessConstants.ALL.equals(database)) { - System.out.println(String.format( - "# server=%s", - server)); - System.out.println(String.format( - "GRANT %s ON DATABASE %s TO ROLE %s;", - "*".equals(action) ? "ALL" : action.toUpperCase(), - database, roleName)); - - client.grantDatabasePrivilege(requestorUserName, roleName, server, - database, action); - } else if (server != null) { - System.out.println(String.format("GRANT ALL ON SERVER %s TO ROLE %s;", - server, roleName)); - - client.grantServerPrivilege(requestorUserName, roleName, server, action); - } else { - System.out.println(String.format("No grant for permission %s", - permission)); - } - } - } - } + String requestorUserName = System.getProperty("user.name", ""); + // get the FileFormatter according to the configuration + SentryPolicyFileFormatter sentryPolicyFileFormatter = SentryPolicyFileFormatFactory + .createFileFormatter(authzConf); + // parse the input file, get the mapping data in map structure + Map>> policyFileMappingData = sentryPolicyFileFormatter.parse( + importPolicyFilePath, authzConf); + // todo: here should be an validator to check the data's value, format, hierarchy + SentryPolicyServiceClient client = SentryServiceClientFactory.create(getAuthzConf()); + // import the mapping data to database + client.importPolicy(policyFileMappingData, requestorUserName, importOverwriteRole); + } + + // export the sentry mapping data to file + public void exportPolicy() throws Exception { + String requestorUserName = System.getProperty("user.name", ""); + SentryPolicyServiceClient client = SentryServiceClientFactory.create(getAuthzConf()); + // export the sentry mapping data from database to map structure + Map>> policyFileMappingData = client + .exportPolicy(requestorUserName); + // get the FileFormatter according to the configuration + SentryPolicyFileFormatter sentryPolicyFileFormatter = SentryPolicyFileFormatFactory + .createFileFormatter(authzConf); + // write the sentry mapping data to exportPolicyFilePath with the data in map structure + sentryPolicyFileFormatter.write(exportPolicyFilePath, policyFileMappingData); } // list permissions for given user @@ -458,8 +378,9 @@ public void verifyRemoteQuery(String queryStr) throws Exception { // verify senty session hook is set private boolean isSentryEnabledOnHiveServer(Statement stmt) throws SQLException { - return HiveAuthzBindingSessionHook.class.getName().equalsIgnoreCase( - readConfig(stmt, HiveConf.ConfVars.HIVE_SERVER2_SESSION_HOOK.varname)); + String bindingString = readConfig(stmt, HiveConf.ConfVars.HIVE_SERVER2_SESSION_HOOK.varname).toUpperCase(); + return bindingString.contains("org.apache.sentry.binding.hive".toUpperCase()) + && bindingString.contains("HiveAuthzBindingSessionHook".toUpperCase()); } // read a config value using 'set' statement @@ -511,7 +432,8 @@ private void usage(Options sentryOptions) { } /** - * parse arguments + * parse arguments + * *
    *   -d,--debug                  Enable debug output
    *   -e,--query             Query privilege verification, requires -u
@@ -524,7 +446,10 @@ private void usage(Options sentryOptions) {
    *   -u,--user              user name
    *   -v,--validate               Validate policy file
    *   -I,--import                 Import policy file
+   *   -E,--export                 Export policy file
+   *   -o,--overwrite              Overwrite the exist role data when do the import
    * 
+ * * @param args */ private void parseArgs(String[] args) { @@ -550,9 +475,12 @@ private void parseArgs(String[] args) { "list privileges for given user, requires -u"); listPrivsOpt.setRequired(false); - Option importOpt = new Option("I", "import", false, + Option importOpt = new Option("I", "import", true, "Import policy file"); + importOpt.setRequired(false); + Option exportOpt = new Option("E", "export", true, "Export policy file"); + exportOpt.setRequired(false); // required args OptionGroup sentryOptGroup = new OptionGroup(); sentryOptGroup.addOption(helpOpt); @@ -561,6 +489,7 @@ private void parseArgs(String[] args) { sentryOptGroup.addOption(listPermsOpt); sentryOptGroup.addOption(listPrivsOpt); sentryOptGroup.addOption(importOpt); + sentryOptGroup.addOption(exportOpt); sentryOptGroup.setRequired(true); sentryOptions.addOptionGroup(sentryOptGroup); @@ -591,6 +520,10 @@ private void parseArgs(String[] args) { debugOpt.setRequired(false); sentryOptions.addOption(debugOpt); + Option overwriteOpt = new Option("o", "overwrite", false, "enable import overwrite"); + overwriteOpt.setRequired(false); + sentryOptions.addOption(overwriteOpt); + try { Parser parser = new GnuParser(); CommandLine cmd = parser.parse(sentryOptions, args); @@ -613,18 +546,22 @@ private void parseArgs(String[] args) { } else if (opt.getOpt().equals("v")) { setValidate(true); } else if (opt.getOpt().equals("I")) { - setImportPolicy(true); + setImportPolicyFilePath(opt.getValue()); + } else if (opt.getOpt().equals("E")) { + setExportPolicyFilePath(opt.getValue()); } else if (opt.getOpt().equals("h")) { usage(sentryOptions); } else if (opt.getOpt().equals("d")) { enableDebug = true; + } else if (opt.getOpt().equals("o")) { + setImportOverwriteRole(true); } } - if (isListPrivs() && (getUser() == null)) { + if (isListPrivs() && getUser() == null) { throw new ParseException("Can't use -l without -u "); } - if ((getQuery() != null) && (getUser() == null)) { + if (getQuery() != null && getUser() == null) { throw new ParseException("Must use -u with -e "); } } catch (ParseException e1) { @@ -654,10 +591,14 @@ public void run(String[] args) throws Exception { sentryTool.validatePolicy(); } - if (sentryTool.isImportPolicy()) { + if (!StringUtils.isEmpty(sentryTool.getImportPolicyFilePath())) { sentryTool.importPolicy(); } + if (!StringUtils.isEmpty(sentryTool.getExportPolicyFilePath())) { + sentryTool.exportPolicy(); + } + // list permissions for give user if (sentryTool.isListPrivs()) { sentryTool.listPrivs(); diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/conf/HiveAuthzConf.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/conf/HiveAuthzConf.java index 0a3b50953..5a89af23f 100644 --- a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/conf/HiveAuthzConf.java +++ b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/conf/HiveAuthzConf.java @@ -16,18 +16,17 @@ */ package org.apache.sentry.binding.hive.conf; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.conf.HiveConf; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.net.MalformedURLException; import java.net.URL; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + public class HiveAuthzConf extends Configuration { @@ -47,6 +46,43 @@ public class HiveAuthzConf extends Configuration { */ public static final String SENTRY_ACTIVE_ROLE_SET = "hive.sentry.active.role.set"; + public static final String HIVE_SENTRY_SECURITY_COMMAND_WHITELIST = + "hive.sentry.security.command.whitelist"; + public static final String HIVE_SENTRY_SECURITY_COMMAND_WHITELIST_DEFAULT = + "set,reset,reload"; + + public static final String HIVE_SENTRY_SERDE_WHITELIST = "hive.sentry.serde.whitelist"; + public static final String HIVE_SENTRY_SERDE_WHITELIST_DEFAULT = "org.apache.hadoop.hive.serde2"; + + // Disable the serde Uri privileges by default for backward compatibilities. + public static final String HIVE_SENTRY_SERDE_URI_PRIVILIEGES_ENABLED = "hive.sentry.turn.on.serde.uri.privileges"; + public static final boolean HIVE_SENTRY_SERDE_URI_PRIVILIEGES_ENABLED_DEFAULT = false; + + public static final String HIVE_UDF_WHITE_LIST = + "concat,substr,substring,space,repeat,ascii,lpad,rpad,size,round,floor,sqrt,ceil," + + "ceiling,rand,abs,pmod,ln,log2,sin,asin,cos,acos,log10,log,exp,power,pow,sign,pi," + + "degrees,radians,atan,tan,e,conv,bin,hex,unhex,base64,unbase64,encode,decode,upper," + + "lower,ucase,lcase,trim,ltrim,rtrim,length,reverse,field,find_in_set,initcap,like," + + "rlike,regexp,regexp_replace,regexp_extract,parse_url,nvl,split,str_to_map,translate" + + ",positive,negative,day,dayofmonth,month,year,hour,minute,second,from_unixtime," + + "to_date,weekofyear,last_day,date_add,date_sub,datediff,add_months,get_json_object," + + "xpath_string,xpath_boolean,xpath_number,xpath_double,xpath_float,xpath_long," + + "xpath_int,xpath_short,xpath,+,-,*,/,%,div,&,|,^,~,current_database,isnull," + + "isnotnull,if,in,and,or,=,==,<=>,!=,<>,<,<=,>,>=,not,!,between,ewah_bitmap_and," + + "ewah_bitmap_or,ewah_bitmap_empty,boolean,tinyint,smallint,int,bigint,float,double," + + "string,date,timestamp,binary,decimal,varchar,char,max,min,sum,count,avg,std,stddev," + + "stddev_pop,stddev_samp,variance,var_pop,var_samp,covar_pop,covar_samp,corr," + + "histogram_numeric,percentile_approx,collect_set,collect_list,ngrams," + + "context_ngrams,ewah_bitmap,compute_stats,percentile," + + "array,assert_true,map,struct,named_struct,create_union,case,when,hash,coalesce," + + "index,in_file,instr,locate,elt,concat_ws,sort_array," + + "array_contains,sentences,map_keys,map_values,format_number,printf,greatest,least," + + "from_utc_timestamp,to_utc_timestamp,unix_timestamp,to_unix_timestamp,explode," + + "inline,json_tuple,parse_url_tuple,posexplode,stack,lead,lag,row_number,rank," + + "dense_rank,percent_rank,cume_dist,ntile,first_value,last_value,noop,noopwithmap," + + "noopstreaming,noopwithmapstreaming,windowingtablefunction,matchpath"; + + public static final String HIVE_UDF_BLACK_LIST = "reflect,reflect2,java_method"; /** * Config setting definitions @@ -57,10 +93,12 @@ public static enum AuthzConfVars { AUTHZ_PROVIDER_RESOURCE("sentry.hive.provider.resource", ""), AUTHZ_PROVIDER_BACKEND("sentry.hive.provider.backend", "org.apache.sentry.provider.file.SimpleFileProviderBackend"), AUTHZ_POLICY_ENGINE("sentry.hive.policy.engine", "org.apache.sentry.policy.db.SimpleDBPolicyEngine"), - AUTHZ_SERVER_NAME("sentry.hive.server", "HS2"), + AUTHZ_POLICY_FILE_FORMATTER( + "sentry.hive.policy.file.formatter", + "org.apache.sentry.binding.hive.SentryIniPolicyFileFormatter"), + AUTHZ_SERVER_NAME("sentry.hive.server", ""), AUTHZ_RESTRICT_DEFAULT_DB("sentry.hive.restrict.defaultDB", "false"), SENTRY_TESTING_MODE("sentry.hive.testing.mode", "false"), - AUTHZ_UDF_WHITELIST("sentry.hive.udf.whitelist", HIVE_UDF_WHITE_LIST), AUTHZ_ALLOW_HIVE_IMPERSONATION("sentry.hive.allow.hive.impersonation", "false"), AUTHZ_ONFAILURE_HOOKS("sentry.hive.failure.hooks", ""), AUTHZ_METASTORE_SERVICE_USERS("sentry.metastore.service.users", null), @@ -71,10 +109,9 @@ public static enum AuthzConfVars { AUTHZ_PROVIDER_DEPRECATED("hive.sentry.provider", "org.apache.sentry.provider.file.ResourceAuthorizationProvider"), AUTHZ_PROVIDER_RESOURCE_DEPRECATED("hive.sentry.provider.resource", ""), - AUTHZ_SERVER_NAME_DEPRECATED("hive.sentry.server", "HS2"), + AUTHZ_SERVER_NAME_DEPRECATED("hive.sentry.server", ""), AUTHZ_RESTRICT_DEFAULT_DB_DEPRECATED("hive.sentry.restrict.defaultDB", "false"), SENTRY_TESTING_MODE_DEPRECATED("hive.sentry.testing.mode", "false"), - AUTHZ_UDF_WHITELIST_DEPRECATED("hive.sentry.udf.whitelist", HIVE_UDF_WHITE_LIST), AUTHZ_ALLOW_HIVE_IMPERSONATION_DEPRECATED("hive.sentry.allow.hive.impersonation", "false"), AUTHZ_ONFAILURE_HOOKS_DEPRECATED("hive.sentry.failure.hooks", ""); @@ -104,30 +141,6 @@ public static String getDefault(String varName) { } } - private static final String HIVE_UDF_WHITE_LIST = - "date,decimal,timestamp," + // SENTRY-312 - "abs,acos,and,array,array_contains,ascii,asin,assert_true,atan,avg," + - "between,bin,case,cast,ceil,ceiling,coalesce,collect_set,compute_stats,concat,concat_ws," + - "UDFConv,UDFHex,UDFSign,UDFToBoolean,UDFToByte,UDFToDouble,UDFToFloat,UDFToInteger,UDFToLong,UDFToShort,UDFToString," + - "context_ngrams,conv,corr,cos,count,covar_pop,covar_samp,create_union,date_add,date_sub," + - "datediff,day,dayofmonth,degrees,div,e,elt,ewah_bitmap,ewah_bitmap_and,ewah_bitmap_empty," + - "ewah_bitmap_or,exp,explode,field,find_in_set,floor,format_number,from_unixtime," + - "from_utc_timestamp,get_json_object,hash,hex,histogram_numeric,hour,if,in,in_file,index," + - "inline,instr,isnotnull,isnull," + // java_method is skipped - "json_tuple,lcase,length,like,ln,locate,log," + - "log10,log2,lower,lpad,ltrim,map,map_keys,map_values,max,min," + - "minute,month,named_struct,negative,ngrams,not,or,parse_url,parse_url_tuple,percentile," + - "percentile_approx,pi,pmod,positive,pow,power,printf,radians,rand," + // reflect is skipped - "regexp,regexp_extract,regexp_replace,repeat,reverse,rlike,round,rpad,rtrim,second," + - "sentences,sign,sin,size,sort_array,space,split,sqrt,stack,std," + - "stddev,stddev_pop,stddev_samp,str_to_map,struct,substr,substring,sum,tan,to_date," + - "to_utc_timestamp,translate,trim,ucase,unhex,union_map,unix_timestamp,upper,var_pop,var_samp," + - "variance,weekofyear,when,xpath,xpath_boolean,xpath_double,xpath_float,xpath_int,xpath_long," + - "xpath_number,xpath_short,xpath_string,year,base64,cume_dist, decode, dense_rank, first_value," + - "lag, last_value, lead, noop, noopwithmap, ntile, nvl, percent_rank, rank, to_unix_timestamp," + - "current_database, char, varchar, matchpath, row_number" + - "unbase64,windowingtablefunction"; - // map of current property names - > deprecated property names. // The binding layer code should work if the deprecated property names are provided, // as long as the new property names aren't also provided. Since the binding code @@ -141,12 +154,10 @@ public static String getDefault(String varName) { currentToDeprecatedProps.put(AuthzConfVars.AUTHZ_SERVER_NAME.getVar(), AuthzConfVars.AUTHZ_SERVER_NAME_DEPRECATED); currentToDeprecatedProps.put(AuthzConfVars.AUTHZ_RESTRICT_DEFAULT_DB.getVar(), AuthzConfVars.AUTHZ_RESTRICT_DEFAULT_DB_DEPRECATED); currentToDeprecatedProps.put(AuthzConfVars.SENTRY_TESTING_MODE.getVar(), AuthzConfVars.SENTRY_TESTING_MODE_DEPRECATED); - currentToDeprecatedProps.put(AuthzConfVars.AUTHZ_UDF_WHITELIST.getVar(), AuthzConfVars.AUTHZ_UDF_WHITELIST_DEPRECATED); currentToDeprecatedProps.put(AuthzConfVars.AUTHZ_ALLOW_HIVE_IMPERSONATION.getVar(), AuthzConfVars.AUTHZ_ALLOW_HIVE_IMPERSONATION_DEPRECATED); currentToDeprecatedProps.put(AuthzConfVars.AUTHZ_ONFAILURE_HOOKS.getVar(), AuthzConfVars.AUTHZ_ONFAILURE_HOOKS_DEPRECATED); }; - @SuppressWarnings("unused") private static final Logger LOG = LoggerFactory .getLogger(HiveAuthzConf.class); public static final String AUTHZ_SITE_FILE = "sentry-site.xml"; diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/AuthorizingObjectStore.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/AuthorizingObjectStore.java index 5a0c9505b..37781b91f 100644 --- a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/AuthorizingObjectStore.java +++ b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/AuthorizingObjectStore.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.HiveOperation; -import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.hive.shims.Utils; import org.apache.sentry.binding.hive.HiveAuthzBindingHook; import org.apache.sentry.binding.hive.authz.HiveAuthzBinding; @@ -303,7 +302,7 @@ private List filterDatabases(List dbList) * @return * @throws MetaException */ - private List filterTables(String dbName, List tabList) + protected List filterTables(String dbName, List tabList) throws MetaException { if (needsAuthorization(getUserName())) { try { @@ -403,7 +402,7 @@ private static Set toTrimed(Set s) { return result; } - private String getNoAccessMessageForTable(String dbName, String tableName) { + protected String getNoAccessMessageForTable(String dbName, String tableName) { return NO_ACCESS_MESSAGE_TABLE + "<" + dbName + ">.<" + tableName + ">"; } diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/MetastoreAuthzBinding.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/MetastoreAuthzBinding.java index f16341ddb..14c31a4de 100644 --- a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/MetastoreAuthzBinding.java +++ b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/MetastoreAuthzBinding.java @@ -35,11 +35,11 @@ import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.events.PreAddPartitionEvent; import org.apache.hadoop.hive.metastore.events.PreAlterPartitionEvent; import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent; -import org.apache.hadoop.hive.metastore.events.PreCreateDatabaseEvent; import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent; import org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent; import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent; @@ -47,7 +47,6 @@ import org.apache.hadoop.hive.metastore.events.PreEventContext; import org.apache.hadoop.hive.ql.metadata.AuthorizationException; import org.apache.hadoop.hive.ql.plan.HiveOperation; -import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.hive.shims.Utils; import org.apache.sentry.SentryUserException; import org.apache.sentry.binding.hive.authz.HiveAuthzBinding; @@ -79,7 +78,7 @@ public class MetastoreAuthzBinding extends MetaStorePreEventListener { /** * Build the set of object hierarchies ie fully qualified db model objects */ - private static class HierarcyBuilder { + protected static class HierarcyBuilder { private List> authHierarchy; public HierarcyBuilder() { @@ -197,7 +196,7 @@ public void onEvent(PreEventContext context) throws MetaException, authorizeAlterPartition((PreAlterPartitionEvent) context); break; case CREATE_DATABASE: - authorizeCreateDatabase((PreCreateDatabaseEvent) context); + authorizeCreateDatabase(); break; case DROP_DATABASE: authorizeDropDatabase((PreDropDatabaseEvent) context); @@ -210,7 +209,7 @@ public void onEvent(PreEventContext context) throws MetaException, } } - private void authorizeCreateDatabase(PreCreateDatabaseEvent context) + private void authorizeCreateDatabase() throws InvalidOperationException, MetaException { authorizeMetastoreAccess(HiveOperation.CREATEDATABASE, new HierarcyBuilder().addServerToOutput(getAuthServer()).build(), @@ -301,7 +300,7 @@ private void authorizeAlterTable(PreAlterTableEvent context) private void authorizeAddPartition(PreAddPartitionEvent context) throws InvalidOperationException, MetaException, NoSuchObjectException { - for (org.apache.hadoop.hive.metastore.api.Partition mapiPart : context.getPartitions()) { + for (Partition mapiPart : context.getPartitions()) { HierarcyBuilder inputBuilder = new HierarcyBuilder(); inputBuilder.addTableToOutput(getAuthServer(), mapiPart .getDbName(), mapiPart.getTableName()); @@ -311,8 +310,11 @@ private void authorizeAddPartition(PreAddPartitionEvent context) // check if we need to validate URI permissions when storage location is // non-default, ie something not under the parent table - if (mapiPart.isSetSd()) { - String partitionLocation = mapiPart.getSd().getLocation(); + String partitionLocation = null; + if (mapiPart.isSetSd()) { + partitionLocation = mapiPart.getSd().getLocation(); + } + if (!StringUtils.isEmpty(partitionLocation)) { String tableLocation = context .getHandler() .get_table(mapiPart.getDbName(), @@ -324,8 +326,8 @@ private void authorizeAddPartition(PreAddPartitionEvent context) } catch (URISyntaxException e) { throw new MetaException(e.getMessage()); } - if (!partitionLocation.equals(tableLocation) && - !partitionLocation.startsWith(tableLocation + File.separator)) { + if (!partitionLocation.equals(tableLocation) && + !partitionLocation.startsWith(tableLocation + File.separator)) { outputBuilder.addUriToOutput(getAuthServer(), uriPath, warehouseDir); } } @@ -334,7 +336,7 @@ private void authorizeAddPartition(PreAddPartitionEvent context) } } - private void authorizeDropPartition(PreDropPartitionEvent context) + protected void authorizeDropPartition(PreDropPartitionEvent context) throws InvalidOperationException, MetaException { authorizeMetastoreAccess( HiveOperation.ALTERTABLE_DROPPARTS, @@ -347,7 +349,7 @@ private void authorizeDropPartition(PreDropPartitionEvent context) } private void authorizeAlterPartition(PreAlterPartitionEvent context) - throws InvalidOperationException, MetaException { + throws InvalidOperationException, MetaException, NoSuchObjectException { /* * There are multiple alter partition options and it's tricky to figure out * which is attempted here. Currently all alter partition need full level @@ -360,15 +362,21 @@ private void authorizeAlterPartition(PreAlterPartitionEvent context) HierarcyBuilder outputBuilder = new HierarcyBuilder().addTableToOutput( getAuthServer(), context.getDbName(), context.getTableName()); - String partitionLocation = getSdLocation(context.getNewPartition().getSd()); + Partition partition = context.getNewPartition(); + String partitionLocation = getSdLocation(partition.getSd()); if (!StringUtils.isEmpty(partitionLocation)) { + String tableLocation = context.getHandler().get_table( + partition.getDbName(), partition.getTableName()).getSd().getLocation(); + String uriPath; try { uriPath = PathUtils.parseDFSURI(warehouseDir, partitionLocation); - } catch (URISyntaxException e) { + } catch (URISyntaxException e) { throw new MetaException(e.getMessage()); } - outputBuilder.addUriToOutput(getAuthServer(), uriPath, warehouseDir); + if (!partitionLocation.startsWith(tableLocation + File.separator)) { + outputBuilder.addUriToOutput(getAuthServer(), uriPath, warehouseDir); + } } authorizeMetastoreAccess( HiveOperation.ALTERPARTITION_LOCATION, @@ -389,7 +397,7 @@ private InvalidOperationException invalidOperationException(Exception e) { * @param outputHierarchy * @throws InvalidOperationException */ - private void authorizeMetastoreAccess(HiveOperation hiveOp, + protected void authorizeMetastoreAccess(HiveOperation hiveOp, List> inputHierarchy, List> outputHierarchy) throws InvalidOperationException { diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryHiveMetaStoreClient.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryHiveMetaStoreClient.java index 6a33ef96b..0330db95f 100644 --- a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryHiveMetaStoreClient.java +++ b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryHiveMetaStoreClient.java @@ -42,17 +42,14 @@ public class SentryHiveMetaStoreClient extends HiveMetaStoreClient implements private HiveAuthzBinding hiveAuthzBinding; private HiveAuthzConf authzConf; - private final HiveConf hiveConf; public SentryHiveMetaStoreClient(HiveConf conf) throws MetaException { super(conf); - this.hiveConf = conf; } public SentryHiveMetaStoreClient(HiveConf conf, HiveMetaHookLoader hookLoader) throws MetaException { super(conf, hookLoader); - this.hiveConf = conf; } @Override diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryMetaStoreFilterHook.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryMetaStoreFilterHook.java index 2ae4fbdc3..b551788a6 100644 --- a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryMetaStoreFilterHook.java +++ b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryMetaStoreFilterHook.java @@ -33,25 +33,20 @@ import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PartitionSpec; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.sentry.binding.hive.HiveAuthzBindingHook; import org.apache.sentry.binding.hive.authz.HiveAuthzBinding; import org.apache.sentry.binding.hive.conf.HiveAuthzConf; -import com.google.common.collect.Lists; - public class SentryMetaStoreFilterHook implements MetaStoreFilterHook { static final protected Log LOG = LogFactory.getLog(SentryMetaStoreFilterHook.class); private HiveAuthzBinding hiveAuthzBinding; private HiveAuthzConf authzConf; - private final HiveConf hiveConf; - public SentryMetaStoreFilterHook(HiveConf hiveConf) { - this.hiveConf = hiveConf; + public SentryMetaStoreFilterHook(HiveConf hiveConf) { //NOPMD } @Override @@ -111,7 +106,7 @@ public Index filterIndex(Index index) throws NoSuchObjectException { @Override public List filterIndexNames(String dbName, String tblName, List indexList) { - return null; + return indexList; } @Override @@ -141,7 +136,7 @@ private List filterDb(List dbList) { /** * Invoke Hive table filtering that removes the entries which use has no * privileges to access - * @param dbList + * @param tabList * @return * @throws MetaException */ diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryMetastorePostEventListener.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryMetastorePostEventListener.java index b6a9a47d8..452757e70 100644 --- a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryMetastorePostEventListener.java +++ b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryMetastorePostEventListener.java @@ -56,16 +56,23 @@ public class SentryMetastorePostEventListener extends MetaStoreEventListener { private final HiveAuthzConf authzConf; private final Server server; - private List sentryPlugins = new ArrayList(); + protected List sentryPlugins = new ArrayList(); public SentryMetastorePostEventListener(Configuration config) { super(config); + if (!(config instanceof HiveConf)) { + String error = "Could not initialize Plugin - Configuration is not an instanceof HiveConf"; + LOGGER.error(error); + throw new RuntimeException(error); + } + authzConf = HiveAuthzConf.getAuthzConf((HiveConf)config); server = new Server(authzConf.get(AuthzConfVars.AUTHZ_SERVER_NAME.getVar())); Iterable pluginClasses = ConfUtilties.CLASS_SPLITTER .split(config.get(ServerConfig.SENTRY_METASTORE_PLUGINS, ServerConfig.SENTRY_METASTORE_PLUGINS_DEFAULT).trim()); + try { for (String pluginClassStr : pluginClasses) { Class clazz = config.getClassByName(pluginClassStr); @@ -75,7 +82,8 @@ public SentryMetastorePostEventListener(Configuration config) { + SentryMetastoreListenerPlugin.class.getName()); } SentryMetastoreListenerPlugin plugin = (SentryMetastoreListenerPlugin) clazz - .getConstructor(Configuration.class).newInstance(config); + .getConstructor(Configuration.class, Configuration.class) + .newInstance(config, authzConf); sentryPlugins.add(plugin); } } catch (Exception e) { @@ -86,6 +94,14 @@ public SentryMetastorePostEventListener(Configuration config) { @Override public void onCreateTable (CreateTableEvent tableEvent) throws MetaException { + + // don't sync paths/privileges if the operation has failed + if (!tableEvent.getStatus()) { + LOGGER.debug("Skip sync paths/privileges with Sentry server for onCreateTable event," + + " since the operation failed. \n"); + return; + } + if (tableEvent.getTable().getSd().getLocation() != null) { String authzObj = tableEvent.getTable().getDbName() + "." + tableEvent.getTable().getTableName(); @@ -94,21 +110,27 @@ public void onCreateTable (CreateTableEvent tableEvent) throws MetaException { plugin.addPath(authzObj, path); } } + // drop the privileges on the given table, in case if anything was left // behind during the drop if (!syncWithPolicyStore(AuthzConfVars.AUTHZ_SYNC_CREATE_WITH_POLICY_STORE)) { return; } - // don't sync privileges if the operation has failed - if (!tableEvent.getStatus()) { - return; - } + dropSentryTablePrivilege(tableEvent.getTable().getDbName(), tableEvent.getTable().getTableName()); } @Override public void onDropTable(DropTableEvent tableEvent) throws MetaException { + + // don't sync paths/privileges if the operation has failed + if (!tableEvent.getStatus()) { + LOGGER.debug("Skip syncing paths/privileges with Sentry server for onDropTable event," + + " since the operation failed. \n"); + return; + } + if (tableEvent.getTable().getSd().getLocation() != null) { String authzObj = tableEvent.getTable().getDbName() + "." + tableEvent.getTable().getTableName(); @@ -120,10 +142,11 @@ public void onDropTable(DropTableEvent tableEvent) throws MetaException { if (!syncWithPolicyStore(AuthzConfVars.AUTHZ_SYNC_DROP_WITH_POLICY_STORE)) { return; } - // don't sync privileges if the operation has failed + if (!tableEvent.getStatus()) { return; } + dropSentryTablePrivilege(tableEvent.getTable().getDbName(), tableEvent.getTable().getTableName()); } @@ -131,6 +154,14 @@ public void onDropTable(DropTableEvent tableEvent) throws MetaException { @Override public void onCreateDatabase(CreateDatabaseEvent dbEvent) throws MetaException { + + // don't sync paths/privileges if the operation has failed + if (!dbEvent.getStatus()) { + LOGGER.debug("Skip syncing paths/privileges with Sentry server for onCreateDatabase event," + + " since the operation failed. \n"); + return; + } + if (dbEvent.getDatabase().getLocationUri() != null) { String authzObj = dbEvent.getDatabase().getName(); String path = dbEvent.getDatabase().getLocationUri(); @@ -138,25 +169,30 @@ public void onCreateDatabase(CreateDatabaseEvent dbEvent) plugin.addPath(authzObj, path); } } - // drop the privileges on the database, incase anything left behind during + // drop the privileges on the database, in case anything left behind during // last drop db if (!syncWithPolicyStore(AuthzConfVars.AUTHZ_SYNC_CREATE_WITH_POLICY_STORE)) { return; } - // don't sync privileges if the operation has failed - if (!dbEvent.getStatus()) { - return; - } + dropSentryDbPrivileges(dbEvent.getDatabase().getName()); } /** - * Drop the privileges on the database // note that child tables will be - * dropped individually by client, so we // just need to handle the removing - * the db privileges. The table drop // should cleanup the table privileges + * Drop the privileges on the database. Note that child tables will be + * dropped individually by client, so we just need to handle the removing + * the db privileges. The table drop should cleanup the table privileges. */ @Override public void onDropDatabase(DropDatabaseEvent dbEvent) throws MetaException { + + // don't sync paths/privileges if the operation has failed + if (!dbEvent.getStatus()) { + LOGGER.debug("Skip syncing paths/privileges with Sentry server for onDropDatabase event," + + " since the operation failed. \n"); + return; + } + String authzObj = dbEvent.getDatabase().getName(); for (SentryMetastoreListenerPlugin plugin : sentryPlugins) { List tNames = dbEvent.getHandler().get_all_tables(authzObj); @@ -165,10 +201,7 @@ public void onDropDatabase(DropDatabaseEvent dbEvent) throws MetaException { if (!syncWithPolicyStore(AuthzConfVars.AUTHZ_SYNC_DROP_WITH_POLICY_STORE)) { return; } - // don't sync privileges if the operation has failed - if (!dbEvent.getStatus()) { - return; - } + dropSentryDbPrivileges(dbEvent.getDatabase().getName()); } @@ -177,31 +210,33 @@ public void onDropDatabase(DropDatabaseEvent dbEvent) throws MetaException { */ @Override public void onAlterTable (AlterTableEvent tableEvent) throws MetaException { - String oldTableName = null, newTableName = null; + // don't sync privileges if the operation has failed if (!tableEvent.getStatus()) { + LOGGER.debug("Skip syncing privileges with Sentry server for onAlterTable event," + + " since the operation failed. \n"); return; } - if (tableEvent.getOldTable() != null) { - oldTableName = tableEvent.getOldTable().getTableName(); - } - if (tableEvent.getNewTable() != null) { - newTableName = tableEvent.getNewTable().getTableName(); - } renameSentryTablePrivilege(tableEvent.getOldTable().getDbName(), - oldTableName, tableEvent.getOldTable().getSd().getLocation(), - tableEvent.getNewTable().getDbName(), newTableName, + tableEvent.getOldTable().getTableName(), + tableEvent.getOldTable().getSd().getLocation(), + tableEvent.getNewTable().getDbName(), + tableEvent.getNewTable().getTableName(), tableEvent.getNewTable().getSd().getLocation()); } @Override public void onAlterPartition(AlterPartitionEvent partitionEvent) throws MetaException { + // don't sync privileges if the operation has failed if (!partitionEvent.getStatus()) { + LOGGER.debug("Skip syncing privileges with Sentry server for onAlterPartition event," + + " since the operation failed. \n"); return; } + String oldLoc = null, newLoc = null; if (partitionEvent.getOldPartition() != null) { oldLoc = partitionEvent.getOldPartition().getSd().getLocation(); @@ -210,7 +245,7 @@ public void onAlterPartition(AlterPartitionEvent partitionEvent) newLoc = partitionEvent.getNewPartition().getSd().getLocation(); } - if ((oldLoc != null) && (newLoc != null) && (!oldLoc.equals(newLoc))) { + if (oldLoc != null && newLoc != null && !oldLoc.equals(newLoc)) { String authzObj = partitionEvent.getOldPartition().getDbName() + "." + partitionEvent.getOldPartition().getTableName(); @@ -224,8 +259,16 @@ public void onAlterPartition(AlterPartitionEvent partitionEvent) @Override public void onAddPartition(AddPartitionEvent partitionEvent) throws MetaException { + + // don't sync path if the operation has failed + if (!partitionEvent.getStatus()) { + LOGGER.debug("Skip syncing path with Sentry server for onAddPartition event," + + " since the operation failed. \n"); + return; + } + for (Partition part : partitionEvent.getPartitions()) { - if ((part.getSd() != null) && (part.getSd().getLocation() != null)) { + if (part.getSd() != null && part.getSd().getLocation() != null) { String authzObj = part.getDbName() + "." + part.getTableName(); String path = part.getSd().getLocation(); for (SentryMetastoreListenerPlugin plugin : sentryPlugins) { @@ -239,6 +282,14 @@ public void onAddPartition(AddPartitionEvent partitionEvent) @Override public void onDropPartition(DropPartitionEvent partitionEvent) throws MetaException { + + // don't sync path if the operation has failed + if (!partitionEvent.getStatus()) { + LOGGER.debug("Skip syncing path with Sentry server for onDropPartition event," + + " since the operation failed. \n"); + return; + } + String authzObj = partitionEvent.getTable().getDbName() + "." + partitionEvent.getTable().getTableName(); String path = partitionEvent.getPartition().getSd().getLocation(); @@ -298,6 +349,9 @@ private void dropSentryPrivileges( .getShortUserName(); SentryPolicyServiceClient sentryClient = getSentryServiceClient(); sentryClient.dropPrivileges(requestorUserName, authorizableTable); + + // Close the connection after dropping privileges is done. + sentryClient.close(); } private void renameSentryTablePrivilege(String oldDbName, String oldTabName, @@ -315,10 +369,12 @@ private void renameSentryTablePrivilege(String oldDbName, String oldTabName, if (!oldTabName.equalsIgnoreCase(newTabName) && syncWithPolicyStore(AuthzConfVars.AUTHZ_SYNC_ALTER_WITH_POLICY_STORE)) { + + SentryPolicyServiceClient sentryClient = getSentryServiceClient(); + try { String requestorUserName = UserGroupInformation.getCurrentUser() .getShortUserName(); - SentryPolicyServiceClient sentryClient = getSentryServiceClient(); sentryClient.renamePrivileges(requestorUserName, oldAuthorizableTable, newAuthorizableTable); } catch (SentryUserException e) { throw new MetaException( @@ -327,6 +383,10 @@ && syncWithPolicyStore(AuthzConfVars.AUTHZ_SYNC_ALTER_WITH_POLICY_STORE)) { + " Error: " + e.getMessage()); } catch (IOException e) { throw new MetaException("Failed to find local user " + e.getMessage()); + } finally { + + // Close the connection after renaming privileges is done. + sentryClient.close(); } } // The HDFS plugin needs to know if it's a path change (set location) @@ -338,7 +398,7 @@ && syncWithPolicyStore(AuthzConfVars.AUTHZ_SYNC_ALTER_WITH_POLICY_STORE)) { private boolean syncWithPolicyStore(AuthzConfVars syncConfVar) { return "true" - .equalsIgnoreCase((authzConf.get(syncConfVar.getVar(), "true"))); + .equalsIgnoreCase(authzConf.get(syncConfVar.getVar(), "true")); } } diff --git a/sentry-binding/sentry-binding-hive/src/test/java/org/apache/sentry/binding/hive/TestHiveAuthzBindings.java b/sentry-binding/sentry-binding-hive/src/test/java/org/apache/sentry/binding/hive/TestHiveAuthzBindings.java index 0622b43d8..24fddf80c 100644 --- a/sentry-binding/sentry-binding-hive/src/test/java/org/apache/sentry/binding/hive/TestHiveAuthzBindings.java +++ b/sentry-binding/sentry-binding-hive/src/test/java/org/apache/sentry/binding/hive/TestHiveAuthzBindings.java @@ -21,7 +21,7 @@ import java.util.Arrays; import java.util.List; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; @@ -42,6 +42,7 @@ import org.apache.sentry.core.model.db.Database; import org.apache.sentry.core.model.db.Server; import org.apache.sentry.core.model.db.Table; +import org.apache.sentry.provider.common.SentryGroupNotFoundException; import org.apache.sentry.provider.file.PolicyFiles; import org.junit.After; import org.junit.Before; @@ -74,11 +75,9 @@ public class TestHiveAuthzBindings { // Tables private static final String PURCHASES_TAB = "purchases"; - private static final String PAYMENT_TAB = "payments"; // Columns private static final String AGE_COL = "age"; - private static final String NAME_COL = "name"; // Entities private List> inputTabHierarcyList = new ArrayList>(); @@ -299,7 +298,8 @@ public void testValidateCreateFunctionAppropiateURI() throws Exception { testAuth.authorize(HiveOperation.CREATEFUNCTION, createFuncPrivileges, ANALYST_SUBJECT, inputTabHierarcyList, outputTabHierarcyList); } - @Test(expected=AuthorizationException.class) + + @Test(expected = SentryGroupNotFoundException.class) public void testValidateCreateFunctionRejectionForUnknownUser() throws Exception { inputTabHierarcyList.add(Arrays.asList(new DBModelAuthorizable[] { new Server(SERVER1), new AccessURI("file:///path/to/some/lib/dir/my.jar") diff --git a/sentry-binding/sentry-binding-hive/src/test/java/org/apache/sentry/binding/hive/TestHiveAuthzConf.java b/sentry-binding/sentry-binding-hive/src/test/java/org/apache/sentry/binding/hive/TestHiveAuthzConf.java index 06b97e6f1..dccbbb658 100644 --- a/sentry-binding/sentry-binding-hive/src/test/java/org/apache/sentry/binding/hive/TestHiveAuthzConf.java +++ b/sentry-binding/sentry-binding-hive/src/test/java/org/apache/sentry/binding/hive/TestHiveAuthzConf.java @@ -39,7 +39,7 @@ public void setUp() { currentProps = Arrays.asList(new AuthzConfVars[] { AuthzConfVars.AUTHZ_PROVIDER, AuthzConfVars.AUTHZ_PROVIDER_RESOURCE, AuthzConfVars.AUTHZ_SERVER_NAME, AuthzConfVars.AUTHZ_RESTRICT_DEFAULT_DB, - AuthzConfVars.SENTRY_TESTING_MODE, AuthzConfVars.AUTHZ_UDF_WHITELIST, + AuthzConfVars.SENTRY_TESTING_MODE, AuthzConfVars.AUTHZ_ALLOW_HIVE_IMPERSONATION, AuthzConfVars.AUTHZ_ONFAILURE_HOOKS }); } @@ -50,6 +50,12 @@ public void testConfig() { authzDepConf.get(AuthzConfVars.AUTHZ_PROVIDER_DEPRECATED.getVar())); Assert.assertEquals("org.apache.sentry.provider.file.fooProvider", authzConf.get(AuthzConfVars.AUTHZ_PROVIDER.getVar())); + + // Test the default value of authz server name is an empty string. + Assert.assertEquals("", + authzConf.get(AuthzConfVars.AUTHZ_SERVER_NAME.getVar())); + Assert.assertEquals("", + authzConf.get(AuthzConfVars.AUTHZ_SERVER_NAME_DEPRECATED.getVar())); } /** diff --git a/sentry-binding/sentry-binding-hive/src/test/java/org/apache/sentry/binding/hive/TestSentryHiveAuthorizationTaskFactory.java b/sentry-binding/sentry-binding-hive/src/test/java/org/apache/sentry/binding/hive/TestSentryHiveAuthorizationTaskFactory.java index a50a40dee..dfe93a573 100644 --- a/sentry-binding/sentry-binding-hive/src/test/java/org/apache/sentry/binding/hive/TestSentryHiveAuthorizationTaskFactory.java +++ b/sentry-binding/sentry-binding-hive/src/test/java/org/apache/sentry/binding/hive/TestSentryHiveAuthorizationTaskFactory.java @@ -17,7 +17,7 @@ */ package org.apache.sentry.binding.hive; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hive.SentryHiveConstants; diff --git a/sentry-binding/sentry-binding-hive/src/test/java/org/apache/sentry/binding/hive/TestSentryIniPolicyFileFormatter.java b/sentry-binding/sentry-binding-hive/src/test/java/org/apache/sentry/binding/hive/TestSentryIniPolicyFileFormatter.java new file mode 100644 index 000000000..2bfc3399b --- /dev/null +++ b/sentry-binding/sentry-binding-hive/src/test/java/org/apache/sentry/binding/hive/TestSentryIniPolicyFileFormatter.java @@ -0,0 +1,220 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.binding.hive; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.Map; +import java.util.Set; + +import org.apache.sentry.binding.hive.conf.HiveAuthzConf; +import org.apache.sentry.policy.common.PolicyConstants; +import org.apache.sentry.provider.common.PolicyFileConstants; +import org.junit.Test; + +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import com.google.common.io.Files; +import com.google.common.io.Resources; + +public class TestSentryIniPolicyFileFormatter { + + private static final String RESOURCE_PATH = "testImportExportPolicy.ini"; + // define the privileges + public static String PRIVILIEGE1 = "server=server1"; + public static String PRIVILIEGE2 = "server=server1->action=select->grantoption=false"; + public static String PRIVILIEGE3 = "server=server1->db=db2->action=insert->grantoption=true"; + public static String PRIVILIEGE4 = "server=server1->db=db1->table=tbl1->action=insert"; + public static String PRIVILIEGE5 = "server=server1->db=db1->table=tbl2->column=col1->action=insert"; + public static String PRIVILIEGE6 = "server=server1->db=db1->table=tbl3->column=col1->action=*->grantoption=true"; + public static String PRIVILIEGE7 = "server=server1->db=db1->table=tbl4->column=col1->action=all->grantoption=true"; + public static String PRIVILIEGE8 = "server=server1->uri=hdfs://testserver:9999/path2->action=insert"; + + private Map>> policyFileMappingData1; + private Map>> policyFileMappingData2; + private Map>> policyFileMappingData3; + private Map>> policyFileMappingData4; + private Map>> policyFileMappingData5; + + private void prepareTestData() { + // test data for: + // [groups] + // group1=role1,role2,role3 + // group2=role1,role2,role3 + // group3=role1,role2,role3 + // [roles] + // role1=privilege1,privilege2,privilege3,privilege4,privilege5,privilege6,privilege7,privilege8 + // role2=privilege1,privilege2,privilege3,privilege4,privilege5,privilege6,privilege7,privilege8 + // role3=privilege1,privilege2,privilege3,privilege4,privilege5,privilege6,privilege7,privilege8 + policyFileMappingData1 = Maps.newHashMap(); + Map> groupRolesMap = Maps.newHashMap(); + Map> rolePrivilegesMap = Maps.newHashMap(); + Set roles = Sets.newHashSet("role1", "role2", "role3"); + groupRolesMap.put("group1", roles); + groupRolesMap.put("group2", roles); + groupRolesMap.put("group3", roles); + for (String roleName : roles) { + rolePrivilegesMap.put(roleName, Sets.newHashSet(PRIVILIEGE1, PRIVILIEGE2, PRIVILIEGE3, + PRIVILIEGE4, PRIVILIEGE5, PRIVILIEGE6, PRIVILIEGE7, PRIVILIEGE8)); + } + policyFileMappingData1.put(PolicyFileConstants.GROUPS, groupRolesMap); + policyFileMappingData1.put(PolicyFileConstants.ROLES, rolePrivilegesMap); + + // test data for: + // [groups] + // group1=role1 + // group2=role2 + // group3=role3 + // [roles] + // role1=privilege1,privilege2,privilege3 + // role2=privilege4,privilege5,privilege6 + // role3=privilege7,privilege8 + policyFileMappingData2 = Maps.newHashMap(); + groupRolesMap = Maps.newHashMap(); + rolePrivilegesMap = Maps.newHashMap(); + groupRolesMap.put("group1", Sets.newHashSet("role1")); + groupRolesMap.put("group2", Sets.newHashSet("role2")); + groupRolesMap.put("group3", Sets.newHashSet("role3")); + rolePrivilegesMap.put("role1", Sets.newHashSet(PRIVILIEGE1, PRIVILIEGE2, PRIVILIEGE3)); + rolePrivilegesMap.put("role2", Sets.newHashSet(PRIVILIEGE4, PRIVILIEGE5, PRIVILIEGE6)); + rolePrivilegesMap.put("role3", Sets.newHashSet(PRIVILIEGE7, PRIVILIEGE8)); + policyFileMappingData2.put(PolicyFileConstants.GROUPS, groupRolesMap); + policyFileMappingData2.put(PolicyFileConstants.ROLES, rolePrivilegesMap); + + // test data for: + // [groups] + // group1=role1,role2 + // group2=role1,role2,role3 + // group3=role2,role3 + // [roles] + // role1=privilege1,privilege2,privilege3,privilege4 + // role2=privilege3,privilege4,privilege5,privilege6 + // role3=privilege5,privilege6,privilege7,privilege8 + policyFileMappingData3 = Maps.newHashMap(); + groupRolesMap = Maps.newHashMap(); + rolePrivilegesMap = Maps.newHashMap(); + groupRolesMap.put("group1", Sets.newHashSet("role1", "role2")); + groupRolesMap.put("group2", Sets.newHashSet("role1", "role2", "role3")); + groupRolesMap.put("group3", Sets.newHashSet("role2", "role3")); + rolePrivilegesMap.put("role1", + Sets.newHashSet(PRIVILIEGE1, PRIVILIEGE2, PRIVILIEGE3, PRIVILIEGE4)); + rolePrivilegesMap.put("role2", + Sets.newHashSet(PRIVILIEGE3, PRIVILIEGE4, PRIVILIEGE5, PRIVILIEGE6)); + rolePrivilegesMap.put("role3", + Sets.newHashSet(PRIVILIEGE5, PRIVILIEGE6, PRIVILIEGE7, PRIVILIEGE8)); + policyFileMappingData3.put(PolicyFileConstants.GROUPS, groupRolesMap); + policyFileMappingData3.put(PolicyFileConstants.ROLES, rolePrivilegesMap); + + // test data for groups only + policyFileMappingData4 = Maps.newHashMap(); + groupRolesMap = Maps.newHashMap(); + rolePrivilegesMap = Maps.newHashMap(); + groupRolesMap.put("group1", Sets.newHashSet("role1", "role2")); + policyFileMappingData4.put(PolicyFileConstants.GROUPS, groupRolesMap); + policyFileMappingData4.put(PolicyFileConstants.ROLES, rolePrivilegesMap); + + // test empty data + policyFileMappingData5 = Maps.newHashMap(); + groupRolesMap = Maps.newHashMap(); + rolePrivilegesMap = Maps.newHashMap(); + policyFileMappingData5.put(PolicyFileConstants.GROUPS, groupRolesMap); + policyFileMappingData5.put(PolicyFileConstants.ROLES, rolePrivilegesMap); + } + + @Test + public void testImportExport() throws Exception { + prepareTestData(); + File baseDir = Files.createTempDir(); + String resourcePath = (new File(baseDir, RESOURCE_PATH)).getAbsolutePath(); + HiveAuthzConf authzConf = new HiveAuthzConf(Resources.getResource("sentry-site.xml")); + SentryIniPolicyFileFormatter iniFormatter = new SentryIniPolicyFileFormatter(); + + // test data1 + iniFormatter.write(resourcePath, policyFileMappingData1); + Map>> parsedMappingData = iniFormatter.parse(resourcePath, + authzConf); + validateSentryMappingData(parsedMappingData, policyFileMappingData1); + + // test data2 + iniFormatter.write(resourcePath, policyFileMappingData2); + parsedMappingData = iniFormatter.parse(resourcePath, authzConf); + validateSentryMappingData(parsedMappingData, policyFileMappingData2); + + // test data3 + iniFormatter.write(resourcePath, policyFileMappingData3); + parsedMappingData = iniFormatter.parse(resourcePath, authzConf); + validateSentryMappingData(parsedMappingData, policyFileMappingData3); + + // test data4 + iniFormatter.write(resourcePath, policyFileMappingData4); + parsedMappingData = iniFormatter.parse(resourcePath, authzConf); + assertTrue(parsedMappingData.get(PolicyFileConstants.GROUPS).isEmpty()); + assertTrue(parsedMappingData.get(PolicyFileConstants.ROLES).isEmpty()); + + // test data5 + iniFormatter.write(resourcePath, policyFileMappingData5); + parsedMappingData = iniFormatter.parse(resourcePath, authzConf); + assertTrue(parsedMappingData.get(PolicyFileConstants.GROUPS).isEmpty()); + assertTrue(parsedMappingData.get(PolicyFileConstants.ROLES).isEmpty()); + (new File(baseDir, RESOURCE_PATH)).delete(); + } + + // verify the mapping data + public void validateSentryMappingData(Map>> actualMappingData, + Map>> expectedMappingData) { + validateGroupRolesMap(actualMappingData.get(PolicyFileConstants.GROUPS), + expectedMappingData.get(PolicyFileConstants.GROUPS)); + validateRolePrivilegesMap(actualMappingData.get(PolicyFileConstants.ROLES), + expectedMappingData.get(PolicyFileConstants.ROLES)); + } + + // verify the mapping data for [group,role] + private void validateGroupRolesMap(Map> actualMap, + Map> expectedMap) { + assertEquals(expectedMap.keySet().size(), actualMap.keySet().size()); + for (String groupName : actualMap.keySet()) { + Set actualRoles = actualMap.get(groupName); + Set expectedRoles = expectedMap.get(groupName); + assertEquals(actualRoles.size(), expectedRoles.size()); + assertTrue(actualRoles.equals(expectedRoles)); + } + } + + // verify the mapping data for [role,privilege] + private void validateRolePrivilegesMap(Map> actualMap, + Map> expectedMap) { + assertEquals(expectedMap.keySet().size(), actualMap.keySet().size()); + for (String roleName : actualMap.keySet()) { + Set actualPrivileges = actualMap.get(roleName); + Set exceptedPrivileges = expectedMap.get(roleName); + assertEquals(exceptedPrivileges.size(), actualPrivileges.size()); + for (String actualPrivilege : actualPrivileges) { + boolean isFound = exceptedPrivileges.contains(actualPrivilege); + if (!isFound) { + String withOptionPrivilege = PolicyConstants.AUTHORIZABLE_JOINER.join(actualPrivilege, + PolicyConstants.KV_JOINER.join(PolicyFileConstants.PRIVILEGE_GRANT_OPTION_NAME, + "false")); + isFound = exceptedPrivileges.contains(withOptionPrivilege); + } + assertTrue(isFound); + } + } + } +} diff --git a/sentry-binding/sentry-binding-hive/src/test/java/org/apache/sentry/binding/hive/TestURI.java b/sentry-binding/sentry-binding-hive/src/test/java/org/apache/sentry/binding/hive/TestURI.java index cdd4e0ba1..8b716c302 100644 --- a/sentry-binding/sentry-binding-hive/src/test/java/org/apache/sentry/binding/hive/TestURI.java +++ b/sentry-binding/sentry-binding-hive/src/test/java/org/apache/sentry/binding/hive/TestURI.java @@ -19,6 +19,7 @@ import java.io.File; import org.apache.commons.io.FileUtils; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -72,6 +73,14 @@ public void testParseURICorrectHDFSPrefix() throws SemanticException { HiveAuthzBindingHook.parseURI("hdfs:///some/path").getName()); } + @Test + public void testWarehouseDirWithoutPrefix() throws SemanticException { + conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://localhost:8020"); + conf.set(ConfVars.METASTOREWAREHOUSE.varname, "/path/to/warehouse"); + Assert.assertEquals("hdfs://localhost:8020/some/path", + HiveAuthzBindingHook.parseURI("/some/path").getName()); + } + @AfterClass public static void clear() { if(baseDir != null) { diff --git a/sentry-binding/sentry-binding-hive/src/test/resources/sentry-site.xml b/sentry-binding/sentry-binding-hive/src/test/resources/sentry-site.xml index 26fdab102..bac5047e3 100644 --- a/sentry-binding/sentry-binding-hive/src/test/resources/sentry-site.xml +++ b/sentry-binding/sentry-binding-hive/src/test/resources/sentry-site.xml @@ -26,9 +26,5 @@ sentry.hive.provider.resource classpath:test-authz-provider.ini - - sentry.hive.server - myHS2 - diff --git a/sentry-binding/sentry-binding-kafka/pom.xml b/sentry-binding/sentry-binding-kafka/pom.xml new file mode 100644 index 000000000..27422067a --- /dev/null +++ b/sentry-binding/sentry-binding-kafka/pom.xml @@ -0,0 +1,77 @@ + + + + 4.0.0 + + + org.apache.sentry + sentry-binding + 1.7.0-incubating-SNAPSHOT + + + sentry-binding-kafka + Sentry Binding for Kafka + + + + junit + junit + test + + + org.apache.sentry + sentry-core-common + + + org.apache.sentry + sentry-core-model-kafka + + + org.apache.sentry + sentry-policy-kafka + + + org.apache.sentry + sentry-provider-common + + + org.apache.sentry + sentry-provider-file + + + org.apache.sentry + sentry-provider-db + + + org.apache.sentry + sentry-policy-common + + + org.apache.hadoop + hadoop-common + provided + + + org.apache.kafka + kafka_2.11 + provided + + + diff --git a/sentry-binding/sentry-binding-kafka/src/main/java/org/apache/sentry/kafka/ConvertUtil.java b/sentry-binding/sentry-binding-kafka/src/main/java/org/apache/sentry/kafka/ConvertUtil.java new file mode 100644 index 000000000..c87830815 --- /dev/null +++ b/sentry-binding/sentry-binding-kafka/src/main/java/org/apache/sentry/kafka/ConvertUtil.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.kafka; + +import java.util.List; + +import kafka.security.auth.Resource; + +import org.apache.sentry.core.common.Authorizable; +import org.apache.sentry.core.model.kafka.Host; + +import com.google.common.collect.Lists; +import org.apache.sentry.core.model.kafka.KafkaAuthorizable; + +public class ConvertUtil { + + public static List convertResourceToAuthorizable(String hostname, + final Resource resource) { + List authorizables = Lists.newArrayList(); + authorizables.add(new Host(hostname)); + authorizables.add(new Authorizable() { + @Override + public String getTypeName() { + final String resourceTypeName = resource.resourceType().name(); + // Kafka's GROUP resource is referred as CONSUMERGROUP within Sentry. + if (resourceTypeName.equalsIgnoreCase("group")) { + return KafkaAuthorizable.AuthorizableType.CONSUMERGROUP.name(); + } else { + return resourceTypeName; + } + } + + @Override + public String getName() { + return resource.name(); + } + }); + return authorizables; + } + +} diff --git a/sentry-binding/sentry-binding-kafka/src/main/java/org/apache/sentry/kafka/authorizer/SentryKafkaAuthorizer.java b/sentry-binding/sentry-binding-kafka/src/main/java/org/apache/sentry/kafka/authorizer/SentryKafkaAuthorizer.java new file mode 100644 index 000000000..3bce6cc40 --- /dev/null +++ b/sentry-binding/sentry-binding-kafka/src/main/java/org/apache/sentry/kafka/authorizer/SentryKafkaAuthorizer.java @@ -0,0 +1,170 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.kafka.authorizer; + +import kafka.network.RequestChannel; +import kafka.security.auth.Acl; +import kafka.security.auth.Authorizer; +import kafka.security.auth.Operation; +import kafka.security.auth.Resource; +import org.apache.kafka.common.security.auth.KafkaPrincipal; +import org.apache.sentry.kafka.binding.KafkaAuthBinding; +import org.apache.sentry.kafka.binding.KafkaAuthBindingSingleton; +import org.apache.sentry.kafka.conf.KafkaAuthConf; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import scala.collection.immutable.Map; +import scala.collection.immutable.Set; + +import java.util.ArrayList; +import java.util.List; + + +public class SentryKafkaAuthorizer implements Authorizer { + + private final static Logger LOG = LoggerFactory.getLogger(SentryKafkaAuthorizer.class); + private final static String INSTANCE_NAME = KafkaAuthConf.AuthzConfVars.getDefault(KafkaAuthConf.KAFKA_SERVICE_INSTANCE_NAME); + + private KafkaAuthBinding binding; + private String kafkaServiceInstanceName = INSTANCE_NAME; + private String requestorName = KafkaAuthConf.AuthzConfVars.getDefault(KafkaAuthConf.KAFKA_SERVICE_USER_NAME); + + String sentry_site = null; + List super_users = null; + + public SentryKafkaAuthorizer() { + } + + @Override + public boolean authorize(RequestChannel.Session session, Operation operation, + Resource resource) { + LOG.debug("Authorizing Session: " + session + " for Operation: " + operation + " on Resource: " + resource); + final KafkaPrincipal user = session.principal(); + if (isSuperUser(user)) { + LOG.debug("Allowing SuperUser: " + user + " in " + session + " for Operation: " + operation + " on Resource: " + resource); + return true; + } + LOG.debug("User: " + user + " is not a SuperUser"); + return binding.authorize(session, operation, resource); +} + + @Override + public void addAcls(Set acls, final Resource resource) { + binding.addAcls(acls, resource); + } + + @Override + public boolean removeAcls(Set acls, final Resource resource) { + return binding.removeAcls(acls, resource); + } + + @Override + public boolean removeAcls(final Resource resource) { + return binding.removeAcls(resource); + } + + @Override + public Set getAcls(Resource resource) { + return binding.getAcls(resource); + } + + @Override + public Map> getAcls(KafkaPrincipal principal) { + return binding.getAcls(principal); + } + + @Override + public Map> getAcls() { + return binding.getAcls(); + } + + @Override + public void close() { + } + + @Override + public void configure(java.util.Map configs) { + final Object sentryKafkaSiteUrlConfig = configs.get(KafkaAuthConf.SENTRY_KAFKA_SITE_URL); + if (sentryKafkaSiteUrlConfig != null) { + this.sentry_site = sentryKafkaSiteUrlConfig.toString(); + } + final Object kafkaSuperUsersConfig = configs.get(KafkaAuthConf.KAFKA_SUPER_USERS); + if (kafkaSuperUsersConfig != null) { + getSuperUsers(kafkaSuperUsersConfig.toString()); + } + final Object kafkaServiceInstanceName = configs.get(KafkaAuthConf.KAFKA_SERVICE_INSTANCE_NAME); + if (kafkaServiceInstanceName != null) { + this.kafkaServiceInstanceName = kafkaServiceInstanceName.toString(); + } + final Object kafkaServiceUserName = configs.get(KafkaAuthConf.KAFKA_SERVICE_USER_NAME); + if (kafkaServiceUserName != null) { + this.requestorName = kafkaServiceUserName.toString(); + } + LOG.info("Configuring Sentry KafkaAuthorizer: " + sentry_site); + final KafkaAuthBindingSingleton instance = KafkaAuthBindingSingleton.getInstance(); + instance.configure(this.kafkaServiceInstanceName, this.requestorName, sentry_site); + this.binding = instance.getAuthBinding(); + } + + private void getSuperUsers(String kafkaSuperUsers) { + super_users = new ArrayList<>(); + String[] superUsers = kafkaSuperUsers.split(";"); + for (String superUser : superUsers) { + if (!superUser.isEmpty()) { + final String trimmedUser = superUser.trim(); + super_users.add(KafkaPrincipal.fromString(trimmedUser)); + LOG.debug("Adding " + trimmedUser + " to list of Kafka SuperUsers."); + } + } + } + + private boolean isSuperUser(KafkaPrincipal user) { + if (super_users != null) { + for (KafkaPrincipal superUser : super_users) { + if (superUser.equals(user)) { + return true; + } + } + } + return false; + } + + /** + * This is not used by Kafka, however as role is a Sentry centric entity having some mean to perform role CRUD will be required. + * This method will be used by a Sentry-Kafka cli that will allow users to perform CRUD of roles and adding roles to groups. + */ + public void addRole(String role) { + binding.addRole(role); + } + + /** + * This is not used by Kafka, however as role is a Sentry centric entity having some mean to add role to groups will be required. + * This method will be used by a Sentry-Kafka cli that will allow users to perform CRUD of roles and adding roles to groups. + */ + public void addRoleToGroups(String role, java.util.Set groups) { + binding.addRoleToGroups(role, groups); + } + + /** + * This is not used by Kafka, however as role is a Sentry centric entity having some mean to perform role CRUD will be required. + * This method will be used by a Sentry-Kafka cli that will allow users to perform CRUD of roles and adding roles to groups. + */ + public void dropAllRoles() { + binding.dropAllRoles(); + } +} diff --git a/sentry-binding/sentry-binding-kafka/src/main/java/org/apache/sentry/kafka/binding/KafkaAuthBinding.java b/sentry-binding/sentry-binding-kafka/src/main/java/org/apache/sentry/kafka/binding/KafkaAuthBinding.java new file mode 100644 index 000000000..8f4a8c484 --- /dev/null +++ b/sentry-binding/sentry-binding-kafka/src/main/java/org/apache/sentry/kafka/binding/KafkaAuthBinding.java @@ -0,0 +1,498 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.kafka.binding; + +import java.lang.reflect.Constructor; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import kafka.security.auth.Acl; +import kafka.security.auth.Allow; +import kafka.security.auth.Allow$; +import kafka.security.auth.Operation$; +import kafka.security.auth.ResourceType$; +import org.apache.hadoop.conf.Configuration; + +import com.google.common.collect.Sets; +import kafka.network.RequestChannel; +import kafka.security.auth.Operation; +import kafka.security.auth.Resource; +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.security.auth.KafkaPrincipal; +import org.apache.sentry.SentryUserException; +import org.apache.sentry.core.common.ActiveRoleSet; +import org.apache.sentry.core.common.Authorizable; +import org.apache.sentry.core.common.Subject; +import org.apache.sentry.core.model.kafka.KafkaActionFactory; +import org.apache.sentry.core.model.kafka.KafkaActionFactory.KafkaAction; +import org.apache.sentry.core.model.kafka.KafkaAuthorizable; +import org.apache.sentry.kafka.ConvertUtil; +import org.apache.sentry.kafka.conf.KafkaAuthConf.AuthzConfVars; +import org.apache.sentry.policy.common.PolicyEngine; +import org.apache.sentry.provider.common.AuthorizationComponent; +import org.apache.sentry.provider.common.AuthorizationProvider; +import org.apache.sentry.provider.common.ProviderBackend; +import org.apache.sentry.provider.db.generic.SentryGenericProviderBackend; +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceClient; +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceClientFactory; +import org.apache.sentry.provider.db.generic.service.thrift.TAuthorizable; +import org.apache.sentry.provider.db.generic.service.thrift.TSentryPrivilege; +import org.apache.sentry.provider.db.generic.service.thrift.TSentryRole; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import scala.Option; +import scala.Predef; +import scala.Tuple2; +import scala.collection.Iterator; +import scala.collection.JavaConversions; +import scala.collection.immutable.Map; + +public class KafkaAuthBinding { + + private static final Logger LOG = LoggerFactory.getLogger(KafkaAuthBinding.class); + private static final String COMPONENT_TYPE = AuthorizationComponent.KAFKA; + private static final String COMPONENT_NAME = COMPONENT_TYPE; + + private final Configuration authConf; + private final AuthorizationProvider authProvider; + private final KafkaActionFactory actionFactory = KafkaActionFactory.getInstance(); + + private ProviderBackend providerBackend; + private String instanceName; + private String requestorName; + + + public KafkaAuthBinding(String instanceName, String requestorName, Configuration authConf) throws Exception { + this.instanceName = instanceName; + this.requestorName = requestorName; + this.authConf = authConf; + this.authProvider = createAuthProvider(); + } + + /** + * Instantiate the configured authz provider + * + * @return {@link AuthorizationProvider} + */ + private AuthorizationProvider createAuthProvider() throws Exception { + /** + * get the authProvider class, policyEngine class, providerBackend class and resources from the + * kafkaAuthConf config + */ + String authProviderName = + authConf.get(AuthzConfVars.AUTHZ_PROVIDER.getVar(), + AuthzConfVars.AUTHZ_PROVIDER.getDefault()); + String resourceName = + authConf.get(AuthzConfVars.AUTHZ_PROVIDER_RESOURCE.getVar(), + AuthzConfVars.AUTHZ_PROVIDER_RESOURCE.getDefault()); + String providerBackendName = + authConf.get(AuthzConfVars.AUTHZ_PROVIDER_BACKEND.getVar(), + AuthzConfVars.AUTHZ_PROVIDER_BACKEND.getDefault()); + String policyEngineName = + authConf.get(AuthzConfVars.AUTHZ_POLICY_ENGINE.getVar(), + AuthzConfVars.AUTHZ_POLICY_ENGINE.getDefault()); + if (resourceName != null && resourceName.startsWith("classpath:")) { + String resourceFileName = resourceName.substring("classpath:".length()); + resourceName = AuthorizationProvider.class.getClassLoader().getResource(resourceFileName).getPath(); + } + if (LOG.isDebugEnabled()) { + LOG.debug("Using authorization provider " + authProviderName + " with resource " + + resourceName + ", policy engine " + policyEngineName + ", provider backend " + + providerBackendName); + } + + // Instantiate the configured providerBackend + Constructor providerBackendConstructor = + Class.forName(providerBackendName) + .getDeclaredConstructor(Configuration.class, String.class); + providerBackendConstructor.setAccessible(true); + providerBackend = + (ProviderBackend) providerBackendConstructor.newInstance(new Object[]{authConf, + resourceName}); + if (providerBackend instanceof SentryGenericProviderBackend) { + ((SentryGenericProviderBackend) providerBackend).setComponentType(COMPONENT_TYPE); + ((SentryGenericProviderBackend) providerBackend).setServiceName(instanceName); + } + + // Instantiate the configured policyEngine + Constructor policyConstructor = + Class.forName(policyEngineName).getDeclaredConstructor(ProviderBackend.class); + policyConstructor.setAccessible(true); + PolicyEngine policyEngine = + (PolicyEngine) policyConstructor.newInstance(new Object[]{providerBackend}); + + // Instantiate the configured authProvider + Constructor constructor = + Class.forName(authProviderName).getDeclaredConstructor(Configuration.class, String.class, + PolicyEngine.class); + constructor.setAccessible(true); + return (AuthorizationProvider) constructor.newInstance(new Object[]{authConf, resourceName, + policyEngine}); + } + + /** + * Authorize access to a Kafka privilege + */ + public boolean authorize(RequestChannel.Session session, Operation operation, Resource resource) { + List authorizables = ConvertUtil.convertResourceToAuthorizable(session.clientAddress().getHostAddress(), resource); + Set actions = Sets.newHashSet(actionFactory.getActionByName(operation.name())); + return authProvider.hasAccess(new Subject(getName(session)), authorizables, actions, ActiveRoleSet.ALL); + } + + public void addAcls(scala.collection.immutable.Set acls, final Resource resource) { + verifyAcls(acls); + LOG.info("Adding Acl: acl->" + acls + " resource->" + resource); + + final Iterator iterator = acls.iterator(); + while (iterator.hasNext()) { + final Acl acl = iterator.next(); + final String role = getRole(acl); + if (!roleExists(role)) { + throw new KafkaException("Can not add Acl for non-existent Role: " + role); + } + execute(new Command() { + @Override + public Void run(SentryGenericServiceClient client) throws Exception { + client.grantPrivilege( + requestorName, role, COMPONENT_NAME, toTSentryPrivilege(acl, resource)); + return null; + } + }); + } + } + + public boolean removeAcls(scala.collection.immutable.Set acls, final Resource resource) { + verifyAcls(acls); + LOG.info("Removing Acl: acl->" + acls + " resource->" + resource); + final Iterator iterator = acls.iterator(); + while (iterator.hasNext()) { + final Acl acl = iterator.next(); + final String role = getRole(acl); + try { + execute(new Command() { + @Override + public Void run(SentryGenericServiceClient client) throws Exception { + client.dropPrivilege( + requestorName, role, toTSentryPrivilege(acl, resource)); + return null; + } + }); + } catch (KafkaException kex) { + LOG.error("Failed to remove acls.", kex); + return false; + } + } + + return true; + } + + public void addRole(final String role) { + if (roleExists(role)) { + throw new KafkaException("Can not create an existing role, " + role + ", again."); + } + + execute(new Command() { + @Override + public Void run(SentryGenericServiceClient client) throws Exception { + client.createRole( + requestorName, role, COMPONENT_NAME); + return null; + } + }); + } + + public void addRoleToGroups(final String role, final java.util.Set groups) { + execute(new Command() { + @Override + public Void run(SentryGenericServiceClient client) throws Exception { + client.addRoleToGroups( + requestorName, role, COMPONENT_NAME, groups); + return null; + } + }); + } + + public void dropAllRoles() { + final List roles = getAllRoles(); + execute(new Command() { + @Override + public Void run(SentryGenericServiceClient client) throws Exception { + for (String role : roles) { + client.dropRole(requestorName, role, COMPONENT_NAME); + } + return null; + } + }); + } + + private List getRolesforGroup(final String groupName) { + final List roles = new ArrayList<>(); + execute(new Command() { + @Override + public Void run(SentryGenericServiceClient client) throws Exception { + for (TSentryRole tSentryRole : client.listRolesByGroupName(requestorName, groupName, COMPONENT_NAME)) { + roles.add(tSentryRole.getRoleName()); + } + return null; + } + }); + + return roles; + } + + private SentryGenericServiceClient getClient() throws Exception { + return SentryGenericServiceClientFactory.create(this.authConf); + } + + public boolean removeAcls(final Resource resource) { + LOG.info("Removing Acls for Resource: resource->" + resource); + List roles = getAllRoles(); + final List tSentryPrivileges = getAllPrivileges(roles); + try { + execute(new Command() { + @Override + public Void run(SentryGenericServiceClient client) throws Exception { + for (TSentryPrivilege tSentryPrivilege : tSentryPrivileges) { + if (isPrivilegeForResource(tSentryPrivilege, resource)) { + client.dropPrivilege( + requestorName, COMPONENT_NAME, tSentryPrivilege); + } + } + return null; + } + }); + } catch (KafkaException kex) { + LOG.error("Failed to remove acls.", kex); + return false; + } + + return true; + } + + public scala.collection.immutable.Set getAcls(final Resource resource) { + final Option> acls = getAcls().get(resource); + if (acls.nonEmpty()) + return acls.get(); + return new scala.collection.immutable.HashSet(); + } + + public Map> getAcls(KafkaPrincipal principal) { + if (principal.getPrincipalType().toLowerCase().equals("group")) { + List roles = getRolesforGroup(principal.getName()); + return getAclsForRoles(roles); + } else { + LOG.info("Did not recognize Principal type: " + principal.getPrincipalType() + ". Returning Acls for all principals."); + return getAcls(); + } + } + + public Map> getAcls() { + final List roles = getAllRoles(); + return getAclsForRoles(roles); + } + + /** + * A Command is a closure used to pass a block of code from individual + * functions to execute, which centralizes connection error + * handling. Command is parameterized on the return type of the function. + */ + private interface Command { + T run(SentryGenericServiceClient client) throws Exception; + } + + private T execute(Command cmd) throws KafkaException { + SentryGenericServiceClient client = null; + try { + client = getClient(); + return cmd.run(client); + } catch (SentryUserException ex) { + String msg = "Unable to excute command on sentry server: " + ex.getMessage(); + LOG.error(msg, ex); + throw new KafkaException(msg, ex); + } catch (Exception ex) { + String msg = "Unable to obtain client:" + ex.getMessage(); + LOG.error(msg, ex); + throw new KafkaException(msg, ex); + } finally { + if (client != null) { + client.close(); + } + } + } + + private TSentryPrivilege toTSentryPrivilege(Acl acl, Resource resource) { + final List authorizables = ConvertUtil.convertResourceToAuthorizable(acl.host(), resource); + final List tAuthorizables = new ArrayList<>(); + for (Authorizable authorizable : authorizables) { + tAuthorizables.add(new TAuthorizable(authorizable.getTypeName(), authorizable.getName())); + } + TSentryPrivilege tSentryPrivilege = new TSentryPrivilege(COMPONENT_NAME, instanceName, tAuthorizables, acl.operation().name()); + return tSentryPrivilege; + } + + private String getRole(Acl acl) { + return acl.principal().getName(); + } + + private boolean isPrivilegeForResource(TSentryPrivilege tSentryPrivilege, Resource resource) { + final java.util.Iterator authorizablesIterator = tSentryPrivilege.getAuthorizablesIterator(); + while (authorizablesIterator.hasNext()) { + TAuthorizable tAuthorizable = authorizablesIterator.next(); + if (tAuthorizable.getType().equals(resource.resourceType().name())) { + return true; + } + } + return false; + } + + private List getAllPrivileges(final List roles) { + final List tSentryPrivileges = new ArrayList<>(); + execute(new Command() { + @Override + public Void run(SentryGenericServiceClient client) throws Exception { + for (String role : roles) { + tSentryPrivileges.addAll(client.listPrivilegesByRoleName( + requestorName, role, COMPONENT_NAME, instanceName)); + } + return null; + } + }); + + return tSentryPrivileges; + } + + private List getAllRoles() { + final List roles = new ArrayList<>(); + execute(new Command() { + @Override + public Void run(SentryGenericServiceClient client) throws Exception { + for (TSentryRole tSentryRole : client.listAllRoles(requestorName, COMPONENT_NAME)) { + roles.add(tSentryRole.getRoleName()); + } + return null; + } + }); + + return roles; + } + + private Map> getAclsForRoles(final List roles) { + return scala.collection.JavaConverters.mapAsScalaMapConverter( + rolePrivilegesToResourceAcls(getRoleToPrivileges(roles))) + .asScala().toMap(Predef.>>conforms()); + } + + private java.util.Map> rolePrivilegesToResourceAcls(java.util.Map> rolePrivilegesMap) { + final java.util.Map> resourceAclsMap = new HashMap<>(); + for (String role : rolePrivilegesMap.keySet()) { + scala.collection.immutable.Set privileges = rolePrivilegesMap.get(role); + final Iterator iterator = privileges.iterator(); + while (iterator.hasNext()) { + TSentryPrivilege privilege = iterator.next(); + final List authorizables = privilege.getAuthorizables(); + String host = null; + String operation = privilege.getAction(); + for (TAuthorizable tAuthorizable : authorizables) { + if (tAuthorizable.getType().equals(KafkaAuthorizable.AuthorizableType.HOST.name())) { + host = tAuthorizable.getName(); + } else { + Resource resource = new Resource(ResourceType$.MODULE$.fromString(tAuthorizable.getType()), tAuthorizable.getName()); + if (operation.equals("*")) { + operation = "All"; + } + Acl acl = new Acl(new KafkaPrincipal("role", role), Allow$.MODULE$, host, Operation$.MODULE$.fromString(operation)); + Set newAclsJava = new HashSet(); + newAclsJava.add(acl); + addExistingAclsForResource(resourceAclsMap, resource, newAclsJava); + final scala.collection.mutable.Set aclScala = JavaConversions.asScalaSet(newAclsJava); + resourceAclsMap.put(resource, aclScala.toSet()); + } + } + } + } + + return resourceAclsMap; + } + + private java.util.Map> getRoleToPrivileges(final List roles) { + final java.util.Map> rolePrivilegesMap = new HashMap<>(); + execute(new Command() { + @Override + public Void run(SentryGenericServiceClient client) throws Exception { + for (String role : roles) { + final Set rolePrivileges = client.listPrivilegesByRoleName( + requestorName, role, COMPONENT_NAME, instanceName); + final scala.collection.immutable.Set rolePrivilegesScala = + scala.collection.JavaConverters.asScalaSetConverter(rolePrivileges).asScala().toSet(); + rolePrivilegesMap.put(role, rolePrivilegesScala); + } + return null; + } + }); + + return rolePrivilegesMap; + } + + private void addExistingAclsForResource(java.util.Map> resourceAclsMap, Resource resource, java.util.Set newAclsJava) { + final scala.collection.immutable.Set existingAcls = resourceAclsMap.get(resource); + if (existingAcls != null) { + final Iterator aclsIter = existingAcls.iterator(); + while (aclsIter.hasNext()) { + Acl curAcl = aclsIter.next(); + newAclsJava.add(curAcl); + } + } + } + + private boolean roleExists(String role) { + return getAllRoles().contains(role); + } + + private void verifyAcls(scala.collection.immutable.Set acls) { + final Iterator iterator = acls.iterator(); + while (iterator.hasNext()) { + final Acl acl = iterator.next(); + assert acl.principal().getPrincipalType().toLowerCase().equals("role") : "Only Acls with KafkaPrincipal of type \"role;\" is supported."; + assert acl.permissionType().name().equals(Allow.name()) : "Only Acls with Permission of type \"Allow\" is supported."; + } + } + + /* + * For SSL session's Kafka creates user names with "CN=" prepended to the user name. + * "=" is used as splitter by Sentry to parse key value pairs and so it is required to strip off "CN=". + * */ + private String getName(RequestChannel.Session session) { + final String principalName = session.principal().getName(); + int start = principalName.indexOf("CN="); + if (start >= 0) { + String tmpName, name = ""; + tmpName = principalName.substring(start + 3); + int end = tmpName.indexOf(","); + if (end > 0) { + name = tmpName.substring(0, end); + } else { + name = tmpName; + } + return name; + } else { + return principalName; + } + } +} diff --git a/sentry-binding/sentry-binding-kafka/src/main/java/org/apache/sentry/kafka/binding/KafkaAuthBindingSingleton.java b/sentry-binding/sentry-binding-kafka/src/main/java/org/apache/sentry/kafka/binding/KafkaAuthBindingSingleton.java new file mode 100644 index 000000000..a0007a3e3 --- /dev/null +++ b/sentry-binding/sentry-binding-kafka/src/main/java/org/apache/sentry/kafka/binding/KafkaAuthBindingSingleton.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.kafka.binding; + +import java.net.MalformedURLException; +import java.net.URL; + +import org.apache.sentry.kafka.conf.KafkaAuthConf; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Strings; + +public class KafkaAuthBindingSingleton { + private static Logger log = LoggerFactory.getLogger(KafkaAuthBindingSingleton.class); + + // Lazy init holder class idiom to avoid DCL + private static class KafkaAuthBindingSingletonHolder { + static final KafkaAuthBindingSingleton instance = new KafkaAuthBindingSingleton(); + } + + private static KafkaAuthConf kafkaAuthConf = null; + + private KafkaAuthBinding binding; + + private KafkaAuthBindingSingleton() { + } + + private KafkaAuthConf loadAuthzConf(String sentry_site) { + if (Strings.isNullOrEmpty(sentry_site)) { + throw new IllegalArgumentException("Configuration key " + KafkaAuthConf.SENTRY_KAFKA_SITE_URL + + " value '" + sentry_site + "' is invalid."); + } + + KafkaAuthConf kafkaAuthConf = null; + try { + kafkaAuthConf = new KafkaAuthConf(new URL(sentry_site)); + } catch (MalformedURLException e) { + throw new IllegalArgumentException("Configuration key " + KafkaAuthConf.SENTRY_KAFKA_SITE_URL + + " specifies a malformed URL '" + sentry_site + "'", e); + } + return kafkaAuthConf; + } + + public void configure(String instanceName, String requestorName, String sentry_site) { + try { + kafkaAuthConf = loadAuthzConf(sentry_site); + binding = new KafkaAuthBinding(instanceName, requestorName, kafkaAuthConf); + log.info("KafkaAuthBinding created successfully"); + } catch (Exception ex) { + log.error("Unable to create KafkaAuthBinding", ex); + throw new RuntimeException("Unable to create KafkaAuthBinding: " + ex.getMessage(), ex); + } + } + + public static KafkaAuthBindingSingleton getInstance() { + return KafkaAuthBindingSingletonHolder.instance; + } + + public KafkaAuthBinding getAuthBinding() { + if (binding == null) { + throw new RuntimeException("KafkaAuthBindingSingleton not configured yet."); + } + return binding; + } + + public KafkaAuthConf getKafkaAuthConf() { + if (binding == null) { + throw new RuntimeException("KafkaAuthBindingSingleton not configured yet."); + } + return kafkaAuthConf; + } +} diff --git a/sentry-binding/sentry-binding-kafka/src/main/java/org/apache/sentry/kafka/conf/KafkaAuthConf.java b/sentry-binding/sentry-binding-kafka/src/main/java/org/apache/sentry/kafka/conf/KafkaAuthConf.java new file mode 100644 index 000000000..e0d767ec3 --- /dev/null +++ b/sentry-binding/sentry-binding-kafka/src/main/java/org/apache/sentry/kafka/conf/KafkaAuthConf.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.sentry.kafka.conf; + +import java.net.URL; + +import org.apache.hadoop.conf.Configuration; +import org.apache.sentry.policy.kafka.SimpleKafkaPolicyEngine; +import org.apache.sentry.provider.common.HadoopGroupResourceAuthorizationProvider; +import org.apache.sentry.provider.db.generic.SentryGenericProviderBackend; + +public class KafkaAuthConf extends Configuration { + /** + * Configuration key used in kafka.properties to point at sentry-site.xml + */ + public static final String SENTRY_KAFKA_SITE_URL = "sentry.kafka.site.url"; + public static final String AUTHZ_SITE_FILE = "sentry-site.xml"; + public static final String KAFKA_SUPER_USERS = "kafka.superusers"; + public static final String KAFKA_SERVICE_INSTANCE_NAME = "sentry.kafka.service.instance"; + public static final String KAFKA_SERVICE_USER_NAME = "sentry.kafka.service.user.name"; + + /** + * Config setting definitions + */ + public static enum AuthzConfVars { + AUTHZ_PROVIDER("sentry.kafka.provider", HadoopGroupResourceAuthorizationProvider.class.getName()), + AUTHZ_PROVIDER_RESOURCE("sentry.kafka.provider.resource", ""), + AUTHZ_PROVIDER_BACKEND("sentry.kafka.provider.backend", SentryGenericProviderBackend.class.getName()), + AUTHZ_POLICY_ENGINE("sentry.kafka.policy.engine", SimpleKafkaPolicyEngine.class.getName()), + AUTHZ_INSTANCE_NAME(KAFKA_SERVICE_INSTANCE_NAME, "kafka"), + AUTHZ_SERVICE_USER_NAME(KAFKA_SERVICE_USER_NAME, "kafka"); + + private final String varName; + private final String defaultVal; + + AuthzConfVars(String varName, String defaultVal) { + this.varName = varName; + this.defaultVal = defaultVal; + } + + public String getVar() { + return varName; + } + + public String getDefault() { + return defaultVal; + } + + public static String getDefault(String varName) { + for (AuthzConfVars oneVar : AuthzConfVars.values()) { + if (oneVar.getVar().equalsIgnoreCase(varName)) { + return oneVar.getDefault(); + } + } + return null; + } + } + + public KafkaAuthConf(URL kafkaAuthzSiteURL) { + super(true); + addResource(kafkaAuthzSiteURL); + } + + @Override + public String get(String varName) { + return get(varName, AuthzConfVars.getDefault(varName)); + } +} diff --git a/sentry-binding/sentry-binding-kafka/src/test/java/org/apache/sentry/kafka/MockGroupMappingServiceProvider.java b/sentry-binding/sentry-binding-kafka/src/test/java/org/apache/sentry/kafka/MockGroupMappingServiceProvider.java new file mode 100644 index 000000000..48f0d3df4 --- /dev/null +++ b/sentry-binding/sentry-binding-kafka/src/test/java/org/apache/sentry/kafka/MockGroupMappingServiceProvider.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.sentry.kafka; + +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.security.GroupMappingServiceProvider; + +import com.google.common.collect.Lists; + +public class MockGroupMappingServiceProvider implements GroupMappingServiceProvider { + + public MockGroupMappingServiceProvider() { + } + + @Override + public List getGroups(String user) throws IOException { + return Lists.newArrayList(user); + } + + @Override + public void cacheGroupsRefresh() throws IOException { + } + + @Override + public void cacheGroupsAdd(List groups) throws IOException { + } + +} diff --git a/sentry-binding/sentry-binding-kafka/src/test/java/org/apache/sentry/kafka/authorizer/ConvertUtilTest.java b/sentry-binding/sentry-binding-kafka/src/test/java/org/apache/sentry/kafka/authorizer/ConvertUtilTest.java new file mode 100644 index 000000000..e08d44212 --- /dev/null +++ b/sentry-binding/sentry-binding-kafka/src/test/java/org/apache/sentry/kafka/authorizer/ConvertUtilTest.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.kafka.authorizer; + +import junit.framework.Assert; +import kafka.security.auth.Resource; +import kafka.security.auth.Resource$; +import kafka.security.auth.ResourceType$; +import org.apache.sentry.core.common.Authorizable; +import org.apache.sentry.core.model.kafka.KafkaAuthorizable; +import org.apache.sentry.kafka.ConvertUtil; +import org.junit.Test; + +import java.util.List; + +public class ConvertUtilTest { + + @Test + public void testCluster() { + String hostname = "localhost"; + String clusterName = Resource$.MODULE$.ClusterResourceName(); + Resource clusterResource = new Resource(ResourceType$.MODULE$.fromString("cluster"), clusterName); + List authorizables = ConvertUtil.convertResourceToAuthorizable(hostname, clusterResource); + for (Authorizable auth : authorizables) { + if (auth.getTypeName().equalsIgnoreCase(KafkaAuthorizable.AuthorizableType.CLUSTER.name())) { + Assert.assertEquals(auth.getName(), clusterName); + } else if (auth.getTypeName().equalsIgnoreCase(KafkaAuthorizable.AuthorizableType.HOST.name())) { + Assert.assertEquals(auth.getName(), hostname); + } else { + Assert.fail("Unexpected type found: " + auth.getTypeName()); + } + } + Assert.assertEquals(authorizables.size(), 2); + } + + @Test + public void testTopic() { + String hostname = "localhost"; + String topicName = "t1"; + Resource topicResource = new Resource(ResourceType$.MODULE$.fromString("topic"), topicName); + List authorizables = ConvertUtil.convertResourceToAuthorizable(hostname, topicResource); + for (Authorizable auth : authorizables) { + if (auth.getTypeName().equalsIgnoreCase(KafkaAuthorizable.AuthorizableType.TOPIC.name())) { + Assert.assertEquals(auth.getName(), topicName); + } else if (auth.getTypeName().equalsIgnoreCase(KafkaAuthorizable.AuthorizableType.HOST.name())) { + Assert.assertEquals(auth.getName(), hostname); + } else { + Assert.fail("Unexpected type found: " + auth.getTypeName()); + } + } + Assert.assertEquals(authorizables.size(), 2); + } + + @Test + public void testConsumerGroup() { + String hostname = "localhost"; + String consumerGroup = "g1"; + Resource consumerGroupResource = new Resource(ResourceType$.MODULE$.fromString("group"), consumerGroup); + List authorizables = ConvertUtil.convertResourceToAuthorizable(hostname, consumerGroupResource); + for (Authorizable auth : authorizables) { + if (auth.getTypeName().equalsIgnoreCase(KafkaAuthorizable.AuthorizableType.CONSUMERGROUP.name())) { + Assert.assertEquals(auth.getName(),consumerGroup); + } else if (auth.getTypeName().equalsIgnoreCase(KafkaAuthorizable.AuthorizableType.HOST.name())) { + Assert.assertEquals(auth.getName(),hostname); + } else { + Assert.fail("Unexpected type found: " + auth.getTypeName()); + } + } + Assert.assertEquals(authorizables.size(), 2); + } +} diff --git a/sentry-binding/sentry-binding-kafka/src/test/java/org/apache/sentry/kafka/authorizer/SentryKafkaAuthorizerTest.java b/sentry-binding/sentry-binding-kafka/src/test/java/org/apache/sentry/kafka/authorizer/SentryKafkaAuthorizerTest.java new file mode 100644 index 000000000..f40d8c2d0 --- /dev/null +++ b/sentry-binding/sentry-binding-kafka/src/test/java/org/apache/sentry/kafka/authorizer/SentryKafkaAuthorizerTest.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.kafka.authorizer; + +import kafka.network.RequestChannel; +import kafka.security.auth.Operation$; +import kafka.security.auth.Resource; +import kafka.security.auth.Resource$; +import kafka.security.auth.ResourceType$; +import kafka.server.KafkaConfig; +import org.apache.kafka.common.security.auth.KafkaPrincipal; +import org.apache.sentry.kafka.conf.KafkaAuthConf; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.Properties; + +public class SentryKafkaAuthorizerTest { + + private SentryKafkaAuthorizer authorizer; + private InetAddress testHostName1; + private InetAddress testHostName2; + private String resourceName; + private Resource clusterResource; + private Resource topic1Resource; + private KafkaConfig config; + + public SentryKafkaAuthorizerTest() throws UnknownHostException { + authorizer = new SentryKafkaAuthorizer(); + testHostName1 = InetAddress.getByAddress("host1", new byte[] {1, 2, 3, 4}); + testHostName2 = InetAddress.getByAddress("host2", new byte[] {2, 3, 4, 5}); + resourceName = Resource$.MODULE$.ClusterResourceName(); + clusterResource = new Resource(ResourceType$.MODULE$.fromString("cluster"), resourceName); + topic1Resource = new Resource(ResourceType$.MODULE$.fromString("topic"), "t1"); + } + + @Before + public void setUp() { + Properties props = new Properties(); + String sentry_site_path = SentryKafkaAuthorizerTest.class.getClassLoader().getResource(KafkaAuthConf.AUTHZ_SITE_FILE).getPath(); + // Kafka check this prop when creating a config instance + props.put("zookeeper.connect", "test"); + props.put("sentry.kafka.site.url", "file://" + sentry_site_path); + + config = KafkaConfig.fromProps(props); + authorizer.configure(config.originals()); + } + + @Test + public void testAdmin() { + + KafkaPrincipal admin = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "admin"); + RequestChannel.Session host1Session = new RequestChannel.Session(admin, testHostName1); + RequestChannel.Session host2Session = new RequestChannel.Session(admin, testHostName2); + + Assert.assertTrue("Test failed.", authorizer.authorize(host1Session, Operation$.MODULE$.fromString("Create"), clusterResource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host1Session, Operation$.MODULE$.fromString("Describe"), clusterResource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host1Session, Operation$.MODULE$.fromString("ClusterAction"), clusterResource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host1Session, Operation$.MODULE$.fromString("Read"), topic1Resource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host1Session, Operation$.MODULE$.fromString("Write"), topic1Resource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host1Session, Operation$.MODULE$.fromString("Create"), topic1Resource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host1Session, Operation$.MODULE$.fromString("Delete"), topic1Resource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host1Session, Operation$.MODULE$.fromString("Alter"), topic1Resource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host1Session, Operation$.MODULE$.fromString("Describe"), topic1Resource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host1Session, Operation$.MODULE$.fromString("ClusterAction"),topic1Resource)); + + Assert.assertTrue("Test failed.", authorizer.authorize(host2Session, Operation$.MODULE$.fromString("Create"), clusterResource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host2Session, Operation$.MODULE$.fromString("Describe"), clusterResource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host2Session, Operation$.MODULE$.fromString("ClusterAction"), clusterResource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host2Session, Operation$.MODULE$.fromString("Read"), topic1Resource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host2Session, Operation$.MODULE$.fromString("Write"), topic1Resource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host2Session, Operation$.MODULE$.fromString("Create"), topic1Resource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host2Session, Operation$.MODULE$.fromString("Delete"), topic1Resource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host2Session, Operation$.MODULE$.fromString("Alter"), topic1Resource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host2Session, Operation$.MODULE$.fromString("Describe"), topic1Resource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host2Session, Operation$.MODULE$.fromString("ClusterAction"), topic1Resource)); + } + + @Test + public void testSubAdmin() { + KafkaPrincipal admin = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "subadmin"); + RequestChannel.Session host1Session = new RequestChannel.Session(admin, testHostName1); + RequestChannel.Session host2Session = new RequestChannel.Session(admin, testHostName2); + + Assert.assertTrue("Test failed.", authorizer.authorize(host1Session, Operation$.MODULE$.fromString("Create"), clusterResource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host1Session, Operation$.MODULE$.fromString("Describe"), clusterResource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host1Session, Operation$.MODULE$.fromString("ClusterAction"), clusterResource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host1Session, Operation$.MODULE$.fromString("Read"), topic1Resource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host1Session, Operation$.MODULE$.fromString("Write"), topic1Resource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host1Session, Operation$.MODULE$.fromString("Create"), topic1Resource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host1Session, Operation$.MODULE$.fromString("Delete"), topic1Resource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host1Session, Operation$.MODULE$.fromString("Alter"), topic1Resource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host1Session, Operation$.MODULE$.fromString("Describe"), topic1Resource)); + Assert.assertTrue("Test failed.", authorizer.authorize(host1Session, Operation$.MODULE$.fromString("ClusterAction"),topic1Resource)); + + Assert.assertFalse("Test failed.", authorizer.authorize(host2Session, Operation$.MODULE$.fromString("Create"), clusterResource)); + Assert.assertFalse("Test failed.", authorizer.authorize(host2Session, Operation$.MODULE$.fromString("Describe"), clusterResource)); + Assert.assertFalse("Test failed.", authorizer.authorize(host2Session, Operation$.MODULE$.fromString("ClusterAction"), clusterResource)); + Assert.assertFalse("Test failed.", authorizer.authorize(host2Session, Operation$.MODULE$.fromString("Read"), topic1Resource)); + Assert.assertFalse("Test failed.", authorizer.authorize(host2Session, Operation$.MODULE$.fromString("Write"), topic1Resource)); + Assert.assertFalse("Test failed.", authorizer.authorize(host2Session, Operation$.MODULE$.fromString("Create"), topic1Resource)); + Assert.assertFalse("Test failed.", authorizer.authorize(host2Session, Operation$.MODULE$.fromString("Delete"), topic1Resource)); + Assert.assertFalse("Test failed.", authorizer.authorize(host2Session, Operation$.MODULE$.fromString("Alter"), topic1Resource)); + Assert.assertFalse("Test failed.", authorizer.authorize(host2Session, Operation$.MODULE$.fromString("Describe"), topic1Resource)); + Assert.assertFalse("Test failed.", authorizer.authorize(host2Session, Operation$.MODULE$.fromString("ClusterAction"), topic1Resource)); + + } +} diff --git a/sentry-binding/sentry-binding-kafka/src/test/resources/core-site.xml b/sentry-binding/sentry-binding-kafka/src/test/resources/core-site.xml new file mode 100644 index 000000000..61a046391 --- /dev/null +++ b/sentry-binding/sentry-binding-kafka/src/test/resources/core-site.xml @@ -0,0 +1,26 @@ + + + + + + + hadoop.security.group.mapping + org.apache.sentry.kafka.MockGroupMappingServiceProvider + + + diff --git a/sentry-binding/sentry-binding-kafka/src/test/resources/log4j.properties b/sentry-binding/sentry-binding-kafka/src/test/resources/log4j.properties new file mode 100644 index 000000000..d42c02c1e --- /dev/null +++ b/sentry-binding/sentry-binding-kafka/src/test/resources/log4j.properties @@ -0,0 +1,30 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +sentry.root.logger=DEBUG,console +log4j.rootLogger=${sentry.root.logger} + +log4j.appender.console=org.apache.log4j.ConsoleAppender +log4j.appender.console.target=System.out +log4j.appender.console.layout=org.apache.log4j.PatternLayout +log4j.appender.console.layout.ConversionPattern=%d (%t) [%p - %l] %m%n + +log4g.logger.kafka.utils.Logging=WARN +log4j.logger.org.apache.kafka=WARN +log4j.logger.org.apache.sentry=DEBUG +log4j.logger.org.apache.zookeeper=WARN +log4j.logger.org.I0Itec.zkclient=WARN +log4j.logger.org.apache.hadoop=WARN +log4j.category.DataNucleus=OFF diff --git a/sentry-binding/sentry-binding-kafka/src/test/resources/sentry-site.xml b/sentry-binding/sentry-binding-kafka/src/test/resources/sentry-site.xml new file mode 100644 index 000000000..69ce5a781 --- /dev/null +++ b/sentry-binding/sentry-binding-kafka/src/test/resources/sentry-site.xml @@ -0,0 +1,42 @@ + + + + + + + sentry.kafka.provider + org.apache.sentry.provider.common.HadoopGroupResourceAuthorizationProvider + + + hadoop.security.group.mapping + test + + + sentry.kafka.provider.resource + classpath:test-authz-provider.ini + + + sentry.kafka.policy.engine + org.apache.sentry.policy.kafka.SimpleKafkaPolicyEngine + + + sentry.kafka.provider.backend + org.apache.sentry.provider.file.SimpleFileProviderBackend + + + diff --git a/sentry-binding/sentry-binding-kafka/src/test/resources/test-authz-provider.ini b/sentry-binding/sentry-binding-kafka/src/test/resources/test-authz-provider.ini new file mode 100644 index 000000000..520e1d032 --- /dev/null +++ b/sentry-binding/sentry-binding-kafka/src/test/resources/test-authz-provider.ini @@ -0,0 +1,38 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +[groups] +admin = admin_all +subadmin = admin_host1 +consumer0 = consumer_t1_all +consumer1 = consumer_t1_host1 +consumer2 = consumer_t2_host2 +producer0 = producer_t1_all +producer1 = producer_t1_host1 +producer2 = producer_t2_host2 +consumer_producer0 = consumer_producer_t1 + +[roles] +admin_all = host=*->action=all +admin_host1 = host=1.2.3.4->action=all +consumer_t1_all = host=*->topic=t1->action=read +consumer_t1_host1 = host=host1->topic=t1->action=read +consumer_t2_host2 = host=host2->topic=t2->action=read +producer_t1_all = host=*->topic=t1->action=write +producer_t1_host1 = host=host1->topic=t1->action=write +producer_t2_host2 = host=host2->topic=t2->action=write +consumer_producer_t1 = host=host1->topic=t1->action=all diff --git a/sentry-binding/sentry-binding-solr/pom.xml b/sentry-binding/sentry-binding-solr/pom.xml index 4e785e737..e8e3013ac 100644 --- a/sentry-binding/sentry-binding-solr/pom.xml +++ b/sentry-binding/sentry-binding-solr/pom.xml @@ -22,7 +22,7 @@ limitations under the License. org.apache.sentry sentry-binding - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-binding-solr @@ -77,7 +77,7 @@ limitations under the License. org.apache.maven.plugins maven-jar-plugin - 2.2 + 2.4 diff --git a/sentry-binding/sentry-binding-solr/src/main/java/org/apache/sentry/binding/solr/authz/SolrAuthzBinding.java b/sentry-binding/sentry-binding-solr/src/main/java/org/apache/sentry/binding/solr/authz/SolrAuthzBinding.java index 373ee8c7f..a6d6c8b3e 100644 --- a/sentry-binding/sentry-binding-solr/src/main/java/org/apache/sentry/binding/solr/authz/SolrAuthzBinding.java +++ b/sentry-binding/sentry-binding-solr/src/main/java/org/apache/sentry/binding/solr/authz/SolrAuthzBinding.java @@ -16,33 +16,46 @@ */ package org.apache.sentry.binding.solr.authz; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; +import static org.apache.sentry.core.model.search.SearchConstants.SENTRY_SEARCH_SERVICE_DEFAULT; +import static org.apache.sentry.core.model.search.SearchConstants.SENTRY_SEARCH_SERVICE_KEY; +import static org.apache.sentry.core.model.search.SearchModelAuthorizable.AuthorizableType.Collection; + import java.io.File; import java.io.IOException; import java.lang.reflect.Constructor; import java.util.Arrays; +import java.util.List; import java.util.Set; import org.apache.hadoop.conf.Configuration; -import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION; import org.apache.hadoop.fs.Path; import org.apache.hadoop.security.UserGroupInformation; import org.apache.sentry.SentryUserException; import org.apache.sentry.binding.solr.conf.SolrAuthzConf; import org.apache.sentry.binding.solr.conf.SolrAuthzConf.AuthzConfVars; +import org.apache.sentry.core.common.Action; import org.apache.sentry.core.common.ActiveRoleSet; import org.apache.sentry.core.common.Subject; import org.apache.sentry.core.model.search.Collection; import org.apache.sentry.core.model.search.SearchModelAction; import org.apache.sentry.policy.common.PolicyEngine; +import org.apache.sentry.provider.common.AuthorizationComponent; import org.apache.sentry.provider.common.AuthorizationProvider; import org.apache.sentry.provider.common.GroupMappingService; +import org.apache.sentry.provider.common.HadoopGroupResourceAuthorizationProvider; import org.apache.sentry.provider.common.ProviderBackend; -import org.apache.sentry.provider.db.generic.service.thrift.SearchPolicyServiceClient; -import org.apache.sentry.provider.db.generic.service.thrift.SearchProviderBackend; +import org.apache.sentry.provider.db.generic.SentryGenericProviderBackend; +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceClient; +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceClientFactory; +import org.apache.sentry.provider.db.generic.service.thrift.TAuthorizable; +import org.apache.sentry.provider.db.generic.service.thrift.TSentryGrantOption; +import org.apache.sentry.provider.db.generic.service.thrift.TSentryPrivilege; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Strings; +import com.google.common.collect.Lists; public class SolrAuthzBinding { private static final Logger LOG = LoggerFactory @@ -84,21 +97,39 @@ private AuthorizationProvider getAuthProvider() throws Exception { authzConf.get(AuthzConfVars.AUTHZ_PROVIDER_BACKEND.getVar()); String policyEngineName = authzConf.get(AuthzConfVars.AUTHZ_POLICY_ENGINE.getVar()); + String serviceName = authzConf.get(SENTRY_SEARCH_SERVICE_KEY, SENTRY_SEARCH_SERVICE_DEFAULT); LOG.debug("Using authorization provider " + authProviderName + " with resource " + resourceName + ", policy engine " + policyEngineName + ", provider backend " + providerBackendName); // load the provider backend class + if (kerberosEnabledProp.equalsIgnoreCase("true")) { + initKerberos(keytabProp, principalProp); + } else { + // set configuration so that group mappings are properly setup even if + // we don't use kerberos, for testing + UserGroupInformation.setConfiguration(authzConf); + } + + // the SearchProviderBackend is deleted in SENTRY-828, this is for the compatible with the + // previous Sentry. + if ("org.apache.sentry.provider.db.generic.service.thrift.SearchProviderBackend" + .equals(providerBackendName)) { + providerBackendName = SentryGenericProviderBackend.class.getName(); + } Constructor providerBackendConstructor = Class.forName(providerBackendName).getDeclaredConstructor(Configuration.class, String.class); providerBackendConstructor.setAccessible(true); - if (kerberosEnabledProp.equalsIgnoreCase("true")) { - initKerberos(keytabProp, principalProp); - } providerBackend = (ProviderBackend) providerBackendConstructor.newInstance(new Object[] {authzConf, resourceName}); + if (providerBackend instanceof SentryGenericProviderBackend) { + ((SentryGenericProviderBackend) providerBackend) + .setComponentType(AuthorizationComponent.Search); + ((SentryGenericProviderBackend) providerBackend).setServiceName(serviceName); + } + // load the policy engine class Constructor policyConstructor = Class.forName(policyEngineName).getDeclaredConstructor(ProviderBackend.class); @@ -106,6 +137,12 @@ private AuthorizationProvider getAuthProvider() throws Exception { PolicyEngine policyEngine = (PolicyEngine) policyConstructor.newInstance(new Object[] {providerBackend}); + // if unset, set the hadoop auth provider to use new groups, so we don't + // conflict with the group mappings that may already be set up + if (authzConf.get(HadoopGroupResourceAuthorizationProvider.USE_NEW_GROUPS) == null) { + authzConf.setBoolean(HadoopGroupResourceAuthorizationProvider.USE_NEW_GROUPS ,true); + } + // load the authz provider class Constructor constrctor = Class.forName(authProviderName).getDeclaredConstructor(Configuration.class, String.class, PolicyEngine.class); @@ -191,7 +228,7 @@ public void initKerberos(String keytabFile, String principal) { } synchronized (SolrAuthzBinding.class) { if (kerberosInit == null) { - kerberosInit = new Boolean(true); + kerberosInit = Boolean.TRUE; final String authVal = authzConf.get(HADOOP_SECURITY_AUTHENTICATION); final String kerberos = "kerberos"; if (authVal != null && !authVal.equals(kerberos)) { @@ -221,11 +258,11 @@ public void initKerberos(String keytabFile, String principal) { * If the binding uses the searchProviderBackend, it can sync privilege with Sentry Service */ public boolean isSyncEnabled() { - return (providerBackend instanceof SearchProviderBackend); + return providerBackend instanceof SentryGenericProviderBackend; } - public SearchPolicyServiceClient getClient() throws Exception { - return new SearchPolicyServiceClient(authzConf); + public SentryGenericServiceClient getClient() throws Exception { + return SentryGenericServiceClientFactory.create(authzConf); } /** @@ -237,10 +274,19 @@ public void deleteCollectionPrivilege(String collection) throws SentrySolrAuthor if (!isSyncEnabled()) { return; } - SearchPolicyServiceClient client = null; + SentryGenericServiceClient client = null; try { client = getClient(); - client.dropCollectionPrivilege(collection, bindingSubject.getName()); + TSentryPrivilege tPrivilege = new TSentryPrivilege(); + tPrivilege.setComponent(AuthorizationComponent.Search); + tPrivilege.setServiceName(authzConf.get(SENTRY_SEARCH_SERVICE_KEY, + SENTRY_SEARCH_SERVICE_DEFAULT)); + tPrivilege.setAction(Action.ALL); + tPrivilege.setGrantOption(TSentryGrantOption.UNSET); + List authorizables = Lists.newArrayList(new TAuthorizable(Collection.name(), + collection)); + tPrivilege.setAuthorizables(authorizables); + client.dropPrivilege(bindingSubject.getName(), AuthorizationComponent.Search, tPrivilege); } catch (SentryUserException ex) { throw new SentrySolrAuthorizationException("User " + bindingSubject.getName() + " can't delete privileges for collection " + collection); diff --git a/sentry-binding/sentry-binding-solr/src/test/java/org/apache/sentry/binding/solr/TestSolrAuthzBinding.java b/sentry-binding/sentry-binding-solr/src/test/java/org/apache/sentry/binding/solr/TestSolrAuthzBinding.java index 1bc01a2d8..182aa39c7 100644 --- a/sentry-binding/sentry-binding-solr/src/test/java/org/apache/sentry/binding/solr/TestSolrAuthzBinding.java +++ b/sentry-binding/sentry-binding-solr/src/test/java/org/apache/sentry/binding/solr/TestSolrAuthzBinding.java @@ -16,22 +16,26 @@ */ package org.apache.sentry.binding.solr; -import static junit.framework.Assert.assertEquals; -import static junit.framework.Assert.assertTrue; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.File; import java.io.FileNotFoundException; +import java.io.IOException; import java.lang.reflect.InvocationTargetException; +import java.util.Arrays; import java.util.Collections; import java.util.EnumSet; +import java.util.List; import java.util.Set; import java.util.UUID; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.commons.io.FileUtils; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.security.GroupMappingServiceProvider; import org.apache.sentry.binding.solr.authz.SentrySolrAuthorizationException; import org.apache.sentry.binding.solr.authz.SolrAuthzBinding; import org.apache.sentry.binding.solr.conf.SolrAuthzConf; @@ -39,6 +43,7 @@ import org.apache.sentry.core.common.Subject; import org.apache.sentry.core.model.search.Collection; import org.apache.sentry.core.model.search.SearchModelAction; +import org.apache.sentry.provider.common.SentryGroupNotFoundException; import org.apache.sentry.provider.file.PolicyFiles; import org.junit.After; import org.junit.Before; @@ -174,17 +179,40 @@ public void testGroupMapping() throws Exception { new SolrAuthzConf(Resources.getResource("sentry-site.xml")); setUsableAuthzConf(solrAuthzConf); SolrAuthzBinding binding = new SolrAuthzBinding(solrAuthzConf); - Set emptyList = Collections.emptySet(); // check non-existant users - assertEquals(binding.getGroups(null), emptyList); - assertEquals(binding.getGroups("nonExistantUser"), emptyList); + try { + binding.getGroups(null); + Assert.fail("Expected SentryGroupNotFoundException"); + } catch (SentryGroupNotFoundException e) { + } + try { + binding.getGroups("nonExistantUser"); + Assert.fail("Expected SentryGroupNotFoundException"); + } catch (SentryGroupNotFoundException e) { + } // check group names don't map to user names - assertEquals(binding.getGroups("corporal"), emptyList); - assertEquals(binding.getGroups("sergeant"), emptyList); - assertEquals(binding.getGroups("general"), emptyList); - assertEquals(binding.getGroups("othergeneralgroup"), emptyList); + try { + binding.getGroups("corporal"); + Assert.fail("Expected SentryGroupNotFoundException"); + } catch (SentryGroupNotFoundException e) { + } + try { + binding.getGroups("sergeant"); + Assert.fail("Expected SentryGroupNotFoundException"); + } catch (SentryGroupNotFoundException e) { + } + try { + binding.getGroups("general"); + Assert.fail("Expected SentryGroupNotFoundException"); + } catch (SentryGroupNotFoundException e) { + } + try { + binding.getGroups("othergeneralgroup"); + Assert.fail("Expected SentryGroupNotFoundException"); + } catch (SentryGroupNotFoundException e) { + } // check valid group names assertEquals(binding.getGroups("corporal1"), Sets.newHashSet("corporal")); @@ -203,19 +231,27 @@ public void testGetRoles() throws Exception { SolrAuthzBinding binding = new SolrAuthzBinding(solrAuthzConf); Set emptySet = Collections.emptySet(); - // check non-existant users - assertEquals(binding.getRoles(null), emptySet); - assertEquals(binding.getRoles("nonExistantUser"), emptySet); - // check user with undefined group assertEquals(binding.getRoles("undefinedGroupUser"), emptySet); // check group with undefined role assertEquals(binding.getRoles("undefinedRoleUser"), emptySet); // check role names don't map in the other direction - assertEquals(binding.getRoles("corporal_role"), emptySet); - assertEquals(binding.getRoles("sergeant_role"), emptySet); - assertEquals(binding.getRoles("general_role"), emptySet); + try { + binding.getRoles("corporal_role"); + Assert.fail("Expected SentryGroupNotFoundException"); + } catch (SentryGroupNotFoundException e) { + } + try { + binding.getRoles("sergeant_role"); + Assert.fail("Expected SentryGroupNotFoundException"); + } catch (SentryGroupNotFoundException e) { + } + try { + binding.getRoles("general_role"); + Assert.fail("Expected SentryGroupNotFoundException"); + } catch (SentryGroupNotFoundException e) { + } // check valid users assertEquals(binding.getRoles("corporal1"), Sets.newHashSet("corporal_role")); @@ -256,7 +292,11 @@ public void testNoUser() throws Exception { new SolrAuthzConf(Resources.getResource("sentry-site.xml")); setUsableAuthzConf(solrAuthzConf); SolrAuthzBinding binding = new SolrAuthzBinding(solrAuthzConf); - expectAuthException(binding, new Subject("bogus"), infoCollection, querySet); + try { + binding.authorizeCollection(new Subject("bogus"), infoCollection, querySet); + Assert.fail("Expected SentryGroupNotFoundException"); + } catch (SentryGroupNotFoundException e) { + } } /** @@ -344,7 +384,6 @@ public void testResourceWithSchemeNotSet() throws Exception { + System.currentTimeMillis()).getAbsolutePath()); String resourceOnHDFS = "/hdfs" + File.separator + UUID.randomUUID() + File.separator + "test-authz-provider.ini"; try { - Path src = new Path(baseDir.getPath(), RESOURCE_PATH); // Copy resource to HDFSS dfsCluster.getFileSystem().copyFromLocalFile(false, new Path(baseDir.getPath(), RESOURCE_PATH), @@ -359,4 +398,34 @@ public void testResourceWithSchemeNotSet() throws Exception { } } } + + @Test + public void testCustomGroupMapping() throws Exception { + SolrAuthzConf solrAuthzConf = + new SolrAuthzConf(Resources.getResource("sentry-site.xml")); + setUsableAuthzConf(solrAuthzConf); + solrAuthzConf.set(AuthzConfVars.AUTHZ_PROVIDER.getVar(), "org.apache.sentry.provider.common.HadoopGroupResourceAuthorizationProvider"); + solrAuthzConf.set("hadoop.security.group.mapping", + FoobarGroupMappingServiceProvider.class.getName()); + SolrAuthzBinding binding = new SolrAuthzBinding(solrAuthzConf); + final String user = "userTestSolrAuthzBinding"; + assertEquals(1, binding.getGroups(user).size()); + assertTrue(binding.getGroups(user).contains("foobar")); + } + + /** + * GroupMappingServiceProvider that returns "foobar" for any group + */ + private static class FoobarGroupMappingServiceProvider implements GroupMappingServiceProvider { + @Override + public List getGroups(String user) throws IOException { + return Arrays.asList("foobar"); + } + + @Override + public void cacheGroupsRefresh() throws IOException {} + + @Override + public void cacheGroupsAdd(List groups) throws IOException {} + } } diff --git a/sentry-binding/sentry-binding-sqoop/pom.xml b/sentry-binding/sentry-binding-sqoop/pom.xml new file mode 100644 index 000000000..20cbda037 --- /dev/null +++ b/sentry-binding/sentry-binding-sqoop/pom.xml @@ -0,0 +1,80 @@ + + + + 4.0.0 + + + org.apache.sentry + sentry-binding + 1.7.0-incubating-SNAPSHOT + + + sentry-binding-sqoop + Sentry Binding for Sqoop + + + + junit + junit + test + + + org.apache.sentry + sentry-core-common + + + org.apache.sentry + sentry-core-model-sqoop + + + org.apache.sentry + sentry-provider-common + + + org.apache.sentry + sentry-provider-file + + + org.apache.sentry + sentry-provider-db + + + org.apache.sentry + sentry-policy-common + + + org.apache.sentry + sentry-policy-sqoop + + + org.apache.hadoop + hadoop-common + provided + + + org.apache.sqoop + sqoop-common + + + org.apache.sqoop + sqoop-security + + + + diff --git a/sentry-binding/sentry-binding-sqoop/src/main/java/org/apache/sentry/sqoop/PrincipalDesc.java b/sentry-binding/sentry-binding-sqoop/src/main/java/org/apache/sentry/sqoop/PrincipalDesc.java new file mode 100644 index 000000000..cc9096c4e --- /dev/null +++ b/sentry-binding/sentry-binding-sqoop/src/main/java/org/apache/sentry/sqoop/PrincipalDesc.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.sqoop; + +public class PrincipalDesc { + public static enum PrincipalType { + USER, + ROLE, + GROUP; + } + + private String name; + private PrincipalType type; + + public PrincipalDesc(String name, String type) { + this.name = name; + this.type = fromStr(type); + } + + private PrincipalType fromStr(String str) { + return Enum.valueOf(PrincipalType.class, str.toUpperCase()); + } + + public String getName() { + return name; + } + + public PrincipalType getType() { + return type; + } + + public static PrincipalDesc fromStr(String name, String type) { + return new PrincipalDesc(name, type); + } +} diff --git a/sentry-binding/sentry-binding-sqoop/src/main/java/org/apache/sentry/sqoop/SentrySqoopError.java b/sentry-binding/sentry-binding-sqoop/src/main/java/org/apache/sentry/sqoop/SentrySqoopError.java new file mode 100644 index 000000000..b86c59f37 --- /dev/null +++ b/sentry-binding/sentry-binding-sqoop/src/main/java/org/apache/sentry/sqoop/SentrySqoopError.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.sqoop; + +public class SentrySqoopError { + public static final String SHOW_GRANT_NOT_SUPPORTED_FOR_PRINCIPAL = + "Sentry does only support show roles on group, not supported on "; + public static final String AUTHORIZE_CHECK_NOT_SUPPORT_FOR_PRINCIPAL = + "Sentry does only support authorization check on user principal, not supported on "; + public static final String SHOW_PRIVILEGE_NOT_SUPPORTED_FOR_PRINCIPAL = + "Sentry does only support show privilege on role, not supported on "; + public static final String GRANT_REVOKE_PRIVILEGE_NOT_SUPPORT_FOR_PRINCIPAL = + "Sentry does only support grant/revoke privilege to/from role, not supported on "; + public static final String GRANT_REVOKE_ROLE_NOT_SUPPORT_FOR_PRINCIPAL = + "Sentry does only support grant/revoke role to/from group, not supported on "; + public static final String NOT_IMPLEMENT_YET = + "Sentry does not implement yet "; +} diff --git a/sentry-binding/sentry-binding-sqoop/src/main/java/org/apache/sentry/sqoop/authz/SentryAccessController.java b/sentry-binding/sentry-binding-sqoop/src/main/java/org/apache/sentry/sqoop/authz/SentryAccessController.java new file mode 100644 index 000000000..3d115e8ca --- /dev/null +++ b/sentry-binding/sentry-binding-sqoop/src/main/java/org/apache/sentry/sqoop/authz/SentryAccessController.java @@ -0,0 +1,192 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.sqoop.authz; + +import java.util.List; + +import org.apache.log4j.Logger; +import org.apache.sentry.core.common.Subject; +import org.apache.sentry.sqoop.PrincipalDesc; +import org.apache.sentry.sqoop.PrincipalDesc.PrincipalType; +import org.apache.sentry.sqoop.SentrySqoopError; +import org.apache.sentry.sqoop.binding.SqoopAuthBinding; +import org.apache.sentry.sqoop.binding.SqoopAuthBindingSingleton; +import org.apache.sqoop.common.SqoopException; +import org.apache.sqoop.model.MPrincipal; +import org.apache.sqoop.model.MPrivilege; +import org.apache.sqoop.model.MResource; +import org.apache.sqoop.model.MRole; +import org.apache.sqoop.security.AuthorizationAccessController; +import org.apache.sqoop.security.SecurityError; + +public class SentryAccessController extends AuthorizationAccessController { + private static final Logger LOG = Logger.getLogger(SentryAccessController.class); + private final SqoopAuthBinding binding; + + public SentryAccessController() throws Exception { + this.binding = SqoopAuthBindingSingleton.getInstance().getAuthBinding(); + } + + private Subject getSubject() { + return new Subject(SentryAuthorizationHander.getAuthenticator().getUserName()); + } + + @Override + public void createRole(MRole role) throws SqoopException { + binding.createRole(getSubject(), role.getName()); + } + + @Override + public void dropRole(MRole role) throws SqoopException { + binding.dropRole(getSubject(), role.getName()); + } + + @Override + public List getAllRoles() throws SqoopException { + return binding.listAllRoles(getSubject()); + } + + @Override + public List getPrincipalsByRole(MRole role) throws SqoopException { + /** + * Sentry does not implement this function yet + */ + throw new SqoopException(SecurityError.AUTH_0014, SentrySqoopError.NOT_IMPLEMENT_YET); + } + + @Override + public List getPrivilegesByPrincipal(MPrincipal principal, + MResource resource) throws SqoopException { + /** + * Sentry Only supports get privilege by role + */ + PrincipalDesc principalDesc = PrincipalDesc.fromStr(principal.getName(), principal.getType()); + if (principalDesc.getType() != PrincipalType.ROLE) { + throw new SqoopException(SecurityError.AUTH_0014, + SentrySqoopError.SHOW_PRIVILEGE_NOT_SUPPORTED_FOR_PRINCIPAL + + principalDesc.getType().name()); + } + return binding.listPrivilegeByRole(getSubject(), principalDesc.getName(), resource); + } + + @Override + public List getRolesByPrincipal(MPrincipal principal) throws SqoopException { + /** + * Sentry Only supports get privilege by role + */ + PrincipalDesc principalDesc = PrincipalDesc.fromStr(principal.getName(), principal.getType()); + if (principalDesc.getType() != PrincipalType.GROUP) { + throw new SqoopException(SecurityError.AUTH_0014, + SentrySqoopError.SHOW_GRANT_NOT_SUPPORTED_FOR_PRINCIPAL + + principalDesc.getType().name()); + } + return binding.listRolesByGroup(getSubject(), principalDesc.getName()); + } + + @Override + public void grantPrivileges(List principals, List privileges) + throws SqoopException { + for (MPrincipal principal : principals) { + PrincipalDesc principalDesc = PrincipalDesc.fromStr(principal.getName(), principal.getType()); + if (principalDesc.getType() != PrincipalType.ROLE) { + throw new SqoopException(SecurityError.AUTH_0014, + SentrySqoopError.GRANT_REVOKE_PRIVILEGE_NOT_SUPPORT_FOR_PRINCIPAL + + principalDesc.getType().name()); + } + + for (MPrivilege privilege : privileges) { + if (LOG.isDebugEnabled()) { + LOG.debug("Going to grant privilege : " + privilege + + " to principal: " + principal); + } + binding.grantPrivilege(getSubject(), principal.getName(), privilege); + } + } + } + + @Override + public void grantRole(List principals, List roles) + throws SqoopException { + for (MPrincipal principal : principals) { + PrincipalDesc principalDesc = PrincipalDesc.fromStr(principal.getName(), principal.getType()); + if (principalDesc.getType() != PrincipalType.GROUP) { + throw new SqoopException(SecurityError.AUTH_0014, + SentrySqoopError.GRANT_REVOKE_ROLE_NOT_SUPPORT_FOR_PRINCIPAL + + principalDesc.getType().name()); + } + for (MRole role : roles) { + if (LOG.isDebugEnabled()) { + LOG.debug("Going to grant role : " + role.getName() + + " to principal: " + principal); + } + binding.grantGroupToRole(getSubject(), principal.getName(), role); + } + } + } + + @Override + public void removeResource(MResource resource) throws SqoopException { + binding.dropPrivilege(resource); + } + + @Override + public void revokePrivileges(List principals, List privileges) + throws SqoopException { + for (MPrincipal principal : principals) { + PrincipalDesc principalDesc = PrincipalDesc.fromStr(principal.getName(), principal.getType()); + if (principalDesc.getType() != PrincipalType.ROLE) { + throw new SqoopException(SecurityError.AUTH_0014, + SentrySqoopError.GRANT_REVOKE_PRIVILEGE_NOT_SUPPORT_FOR_PRINCIPAL + + principalDesc.getType().name()); + } + + for (MPrivilege privilege : privileges) { + if (LOG.isDebugEnabled()) { + LOG.debug("Going to revoke privilege : " + privilege + + " from principal: " + principal); + } + binding.revokePrivilege(getSubject(), principal.getName(), privilege); + } + } + } + + @Override + public void revokeRole(List principals, List roles) + throws SqoopException { + for (MPrincipal principal : principals) { + PrincipalDesc principalDesc = PrincipalDesc.fromStr(principal.getName(), principal.getType()); + if (principalDesc.getType() != PrincipalType.GROUP) { + throw new SqoopException(SecurityError.AUTH_0014, + SentrySqoopError.GRANT_REVOKE_ROLE_NOT_SUPPORT_FOR_PRINCIPAL + + principalDesc.getType().name()); + } + for (MRole role : roles) { + if (LOG.isDebugEnabled()) { + LOG.debug("Going to revoke role : " + role.getName() + + " from principal: " + principal); + } + binding.revokeGroupfromRole(getSubject(), principal.getName(), role); + } + } + } + + @Override + public void updateResource(MResource srcResource, MResource dstResource) + throws SqoopException { + binding.renamePrivilege(getSubject(), srcResource, dstResource); + } +} diff --git a/sentry-binding/sentry-binding-sqoop/src/main/java/org/apache/sentry/sqoop/authz/SentryAuthorizationHander.java b/sentry-binding/sentry-binding-sqoop/src/main/java/org/apache/sentry/sqoop/authz/SentryAuthorizationHander.java new file mode 100644 index 000000000..93bf3f304 --- /dev/null +++ b/sentry-binding/sentry-binding-sqoop/src/main/java/org/apache/sentry/sqoop/authz/SentryAuthorizationHander.java @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.sqoop.authz; + +import java.util.List; + +import org.apache.sqoop.common.SqoopException; +import org.apache.sqoop.model.MPrincipal; +import org.apache.sqoop.model.MPrivilege; +import org.apache.sqoop.model.MResource; +import org.apache.sqoop.model.MRole; +import org.apache.sqoop.security.AuthenticationProvider; +import org.apache.sqoop.security.authorization.DefaultAuthorizationHandler; + +public class SentryAuthorizationHander extends DefaultAuthorizationHandler { + private static AuthenticationProvider authenticator; + + public static AuthenticationProvider getAuthenticator() { + if (authenticator == null) { + throw new RuntimeException("authenticator can't be null"); + } + return authenticator; + } + @Override + public void doInitialize(AuthenticationProvider authenticationProvider, String serverName) + throws ClassNotFoundException, IllegalAccessException, + InstantiationException { + super.doInitialize(authenticationProvider, serverName); + authenticator = authenticationProvider; + } + + @Override + public void checkPrivileges(MPrincipal principal, List privileges) + throws SqoopException { + authorizationValidator.checkPrivileges(principal, privileges); + } + + @Override + public void createRole(MRole role) throws SqoopException { + authorizationAccessController.createRole(role); + } + + @Override + public void dropRole(MRole role) throws SqoopException { + authorizationAccessController.dropRole(role); + } + + @Override + public List getAllRoles() throws SqoopException { + return authorizationAccessController.getAllRoles(); + } + + @Override + public List getPrincipalsByRole(MRole role) throws SqoopException { + return authorizationAccessController.getPrincipalsByRole(role); + } + + @Override + public List getPrivilegesByPrincipal(MPrincipal principal, + MResource resource) throws SqoopException { + return authorizationAccessController.getPrivilegesByPrincipal(principal, resource); + } + + @Override + public List getRolesByPrincipal(MPrincipal principal) throws SqoopException { + return authorizationAccessController.getRolesByPrincipal(principal); + } + + @Override + public void grantPrivileges(List principals, List privileges) + throws SqoopException { + authorizationAccessController.grantPrivileges(principals, privileges); + } + + @Override + public void grantRole(List principals, List roles) + throws SqoopException { + authorizationAccessController.grantRole(principals, roles); + } + + @Override + public void removeResource(MResource resource) throws SqoopException { + authorizationAccessController.removeResource(resource); + } + + @Override + public void revokePrivileges(List principals, List privileges) + throws SqoopException { + authorizationAccessController.revokePrivileges(principals, privileges); + } + + @Override + public void revokeRole(List principals, List roles) + throws SqoopException { + authorizationAccessController.revokeRole(principals, roles); + } + + @Override + public void updateResource(MResource srcResource, MResource dstResource) + throws SqoopException { + authorizationAccessController.updateResource(srcResource, dstResource); + } +} diff --git a/sentry-binding/sentry-binding-sqoop/src/main/java/org/apache/sentry/sqoop/authz/SentryAuthorizationValidator.java b/sentry-binding/sentry-binding-sqoop/src/main/java/org/apache/sentry/sqoop/authz/SentryAuthorizationValidator.java new file mode 100644 index 000000000..51f3f2970 --- /dev/null +++ b/sentry-binding/sentry-binding-sqoop/src/main/java/org/apache/sentry/sqoop/authz/SentryAuthorizationValidator.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.sqoop.authz; + +import java.util.List; + +import org.apache.sentry.core.common.Subject; +import org.apache.sentry.sqoop.PrincipalDesc; +import org.apache.sentry.sqoop.PrincipalDesc.PrincipalType; +import org.apache.sentry.sqoop.SentrySqoopError; +import org.apache.sentry.sqoop.binding.SqoopAuthBinding; +import org.apache.sentry.sqoop.binding.SqoopAuthBindingSingleton; +import org.apache.sqoop.common.SqoopException; +import org.apache.sqoop.model.MPrincipal; +import org.apache.sqoop.model.MPrivilege; +import org.apache.sqoop.security.AuthorizationValidator; +import org.apache.sqoop.security.SecurityError; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class SentryAuthorizationValidator extends AuthorizationValidator { + private static final Logger LOG = LoggerFactory.getLogger(SentryAuthorizationValidator.class); + private final SqoopAuthBinding binding; + + public SentryAuthorizationValidator() throws Exception { + this.binding = SqoopAuthBindingSingleton.getInstance().getAuthBinding(); + } + + @Override + public void checkPrivileges(MPrincipal principal, List privileges) throws SqoopException { + if (privileges == null || privileges.isEmpty()) { + return; + } + PrincipalDesc principalDesc = new PrincipalDesc(principal.getName(), principal.getType()); + if (principalDesc.getType() != PrincipalType.USER) { + throw new SqoopException(SecurityError.AUTH_0014,SentrySqoopError.AUTHORIZE_CHECK_NOT_SUPPORT_FOR_PRINCIPAL); + } + for (MPrivilege privilege : privileges) { + if (LOG.isDebugEnabled()) { + LOG.debug("Going to authorize check on privilege : " + privilege + + " for principal: " + principal); + } + if (!binding.authorize(new Subject(principalDesc.getName()), privilege)) { + throw new SqoopException(SecurityError.AUTH_0014, "User " + principalDesc.getName() + + " does not have privileges for : " + privilege.toString()); + } + } + } +} diff --git a/sentry-binding/sentry-binding-sqoop/src/main/java/org/apache/sentry/sqoop/binding/SqoopAuthBinding.java b/sentry-binding/sentry-binding-sqoop/src/main/java/org/apache/sentry/sqoop/binding/SqoopAuthBinding.java new file mode 100644 index 000000000..84560316f --- /dev/null +++ b/sentry-binding/sentry-binding-sqoop/src/main/java/org/apache/sentry/sqoop/binding/SqoopAuthBinding.java @@ -0,0 +1,411 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.sqoop.binding; + +import java.lang.reflect.Constructor; +import java.util.List; +import java.util.Set; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.sentry.SentryUserException; +import org.apache.sentry.core.common.ActiveRoleSet; +import org.apache.sentry.core.common.Authorizable; +import org.apache.sentry.core.common.Subject; +import org.apache.sentry.core.model.sqoop.Server; +import org.apache.sentry.core.model.sqoop.SqoopActionConstant; +import org.apache.sentry.core.model.sqoop.SqoopActionFactory; +import org.apache.sentry.policy.common.PolicyEngine; +import org.apache.sentry.provider.common.AuthorizationComponent; +import org.apache.sentry.provider.common.AuthorizationProvider; +import org.apache.sentry.provider.common.ProviderBackend; +import org.apache.sentry.provider.db.generic.SentryGenericProviderBackend; +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceClient; +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceClientFactory; +import org.apache.sentry.provider.db.generic.service.thrift.TAuthorizable; +import org.apache.sentry.provider.db.generic.service.thrift.TSentryGrantOption; +import org.apache.sentry.provider.db.generic.service.thrift.TSentryPrivilege; +import org.apache.sentry.provider.db.generic.service.thrift.TSentryRole; +import org.apache.sentry.sqoop.conf.SqoopAuthConf.AuthzConfVars; +import org.apache.sqoop.common.SqoopException; +import org.apache.sqoop.model.MPrivilege; +import org.apache.sqoop.model.MResource; +import org.apache.sqoop.model.MRole; +import org.apache.sqoop.security.SecurityError; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; + +public class SqoopAuthBinding { + private static final Logger LOG = LoggerFactory.getLogger(SqoopAuthBinding.class); + private static final String COMPONENT_TYPE = AuthorizationComponent.SQOOP; + + private final Configuration authConf; + private final AuthorizationProvider authProvider; + private final Server sqoopServer; + private final Subject bindingSubject; + private ProviderBackend providerBackend; + + private final SqoopActionFactory actionFactory = new SqoopActionFactory(); + + public SqoopAuthBinding(Configuration authConf, String serverName) throws Exception { + this.authConf = authConf; + this.authConf.set(AuthzConfVars.AUTHZ_SERVER_NAME.getVar(), serverName); + this.sqoopServer = new Server(serverName); + this.authProvider = createAuthProvider(); + /** The Sqoop server principal will use the binding */ + this.bindingSubject = new Subject(UserGroupInformation.getCurrentUser() + .getShortUserName()); + } + + /** + * Instantiate the configured authz provider + * @return {@link AuthorizationProvider} + */ + private AuthorizationProvider createAuthProvider() throws Exception { + /** + * get the authProvider class, policyEngine class, providerBackend class and resources from the sqoopAuthConf config + */ + String authProviderName = authConf.get(AuthzConfVars.AUTHZ_PROVIDER.getVar(),AuthzConfVars.AUTHZ_PROVIDER.getDefault()); + String resourceName = authConf.get(AuthzConfVars.AUTHZ_PROVIDER_RESOURCE.getVar(), AuthzConfVars.AUTHZ_PROVIDER_RESOURCE.getDefault()); + String providerBackendName = authConf.get(AuthzConfVars.AUTHZ_PROVIDER_BACKEND.getVar(), AuthzConfVars.AUTHZ_PROVIDER_BACKEND.getDefault()); + String policyEngineName = authConf.get(AuthzConfVars.AUTHZ_POLICY_ENGINE.getVar(), AuthzConfVars.AUTHZ_POLICY_ENGINE.getDefault()); + String serviceName = authConf.get(AuthzConfVars.AUTHZ_SERVER_NAME.getVar()); + if (LOG.isDebugEnabled()) { + LOG.debug("Using authorization provider " + authProviderName + + " with resource " + resourceName + ", policy engine " + + policyEngineName + ", provider backend " + providerBackendName); + } + + // the SqoopProviderBackend is deleted in SENTRY-828, this is for the compatible with the + // previous Sentry. + if ("org.apache.sentry.sqoop.binding.SqoopProviderBackend".equals(providerBackendName)) { + providerBackendName = SentryGenericProviderBackend.class.getName(); + } + + //Instantiate the configured providerBackend + Constructor providerBackendConstructor = Class.forName(providerBackendName) + .getDeclaredConstructor(Configuration.class, String.class); + providerBackendConstructor.setAccessible(true); + providerBackend = (ProviderBackend) providerBackendConstructor.newInstance(new Object[] { + authConf, resourceName }); + if (providerBackend instanceof SentryGenericProviderBackend) { + ((SentryGenericProviderBackend) providerBackend).setComponentType(COMPONENT_TYPE); + ((SentryGenericProviderBackend) providerBackend).setServiceName(serviceName); + } + + //Instantiate the configured policyEngine + Constructor policyConstructor = + Class.forName(policyEngineName).getDeclaredConstructor(String.class, ProviderBackend.class); + policyConstructor.setAccessible(true); + PolicyEngine policyEngine = + (PolicyEngine) policyConstructor.newInstance(new Object[] {sqoopServer.getName(), providerBackend}); + + //Instantiate the configured authProvider + Constructor constrctor = + Class.forName(authProviderName).getDeclaredConstructor(Configuration.class, String.class, PolicyEngine.class); + constrctor.setAccessible(true); + return (AuthorizationProvider) constrctor.newInstance(new Object[] {authConf, resourceName, policyEngine}); + } + + /** + * Authorize access to a Sqoop privilege + * @param subject + * @param authorizable + * @param action + * @return true or false + */ + public boolean authorize(Subject subject, MPrivilege privilege) { + List authorizables = toAuthorizable(privilege.getResource()); + if (!hasServerInclude(authorizables)) { + authorizables.add(0, sqoopServer); + } + return authProvider.hasAccess(subject, + authorizables, + Sets.newHashSet(actionFactory.getActionByName(privilege.getAction())), ActiveRoleSet.ALL); + } + + public boolean hasServerInclude(List authorizables) { + for (Authorizable authorizable : authorizables) { + if (authorizable.getTypeName().equalsIgnoreCase(sqoopServer.getTypeName())) { + return true; + } + } + return false; + } + + /** + * The Sentry-296(generate client for connection pooling) has already finished development and reviewed by now. When it + * was committed to master, the getClient method was needed to refactor using the connection pool + */ + private SentryGenericServiceClient getClient() throws Exception { + return SentryGenericServiceClientFactory.create(authConf); + } + + public void createRole(final Subject subject, final String role) throws SqoopException { + execute(new Command() { + @Override + public Void run(SentryGenericServiceClient client) throws Exception { + client.createRole(subject.getName(), role, COMPONENT_TYPE); + return null; + } + }); + } + + public void dropRole(final Subject subject, final String role) throws SqoopException { + execute(new Command() { + @Override + public Void run(SentryGenericServiceClient client) throws Exception { + client.dropRole(subject.getName(), role, COMPONENT_TYPE); + return null; + } + }); + } + + public List listAllRoles(final Subject subject) throws SqoopException { + Set tSentryRoles = execute(new Command>() { + @Override + public Set run(SentryGenericServiceClient client) + throws Exception { + return client.listAllRoles(subject.getName(), COMPONENT_TYPE); + } + }); + + List roles = Lists.newArrayList(); + for (TSentryRole tRole : tSentryRoles) { + roles.add(new MRole(tRole.getRoleName())); + } + return roles; + } + + public List listRolesByGroup(final Subject subject, final String groupName) throws SqoopException { + Set tSentryRoles = execute(new Command>() { + @Override + public Set run(SentryGenericServiceClient client) + throws Exception { + return client.listRolesByGroupName(subject.getName(), groupName, COMPONENT_TYPE); + } + }); + + List roles = Lists.newArrayList(); + for (TSentryRole tSentryRole : tSentryRoles) { + roles.add(new MRole(tSentryRole.getRoleName())); + } + return roles; + } + + public List listPrivilegeByRole(final Subject subject, final String role, final MResource resource) throws SqoopException { + Set tSentryPrivileges = execute(new Command>() { + @Override + public Set run(SentryGenericServiceClient client) + throws Exception { + if (resource == null) { + return client.listPrivilegesByRoleName(subject.getName(), role, COMPONENT_TYPE, sqoopServer.getName()); + } else if (resource.getType().equalsIgnoreCase(MResource.TYPE.SERVER.name())) { + return client.listPrivilegesByRoleName(subject.getName(), role, COMPONENT_TYPE, resource.getName()); + } else { + return client.listPrivilegesByRoleName(subject.getName(), role, COMPONENT_TYPE, sqoopServer.getName(), toAuthorizable(resource)); + } + } + }); + + List privileges = Lists.newArrayList(); + for (TSentryPrivilege tSentryPrivilege : tSentryPrivileges) { + privileges.add(toSqoopPrivilege(tSentryPrivilege)); + } + return privileges; + } + + public void grantPrivilege(final Subject subject, final String role, final MPrivilege privilege) throws SqoopException { + execute(new Command() { + @Override + public Void run(SentryGenericServiceClient client) throws Exception { + client.grantPrivilege(subject.getName(), role, COMPONENT_TYPE, toTSentryPrivilege(privilege)); + return null; + } + }); + } + + public void revokePrivilege(final Subject subject, final String role, final MPrivilege privilege) throws SqoopException { + execute(new Command() { + @Override + public Void run(SentryGenericServiceClient client) throws Exception { + client.revokePrivilege(subject.getName(), role, COMPONENT_TYPE, toTSentryPrivilege(privilege)); + return null; + } + }); + } + + public void grantGroupToRole(final Subject subject, final String group, final MRole role) throws SqoopException { + execute(new Command() { + @Override + public Void run(SentryGenericServiceClient client) throws Exception { + client.addRoleToGroups(subject.getName(), role.getName(), COMPONENT_TYPE, Sets.newHashSet(group)); + return null; + } + }); + } + + public void revokeGroupfromRole(final Subject subject, final String group, final MRole role) throws SqoopException { + execute(new Command() { + @Override + public Void run(SentryGenericServiceClient client) throws Exception { + client.deleteRoleToGroups(subject.getName(), role.getName(), COMPONENT_TYPE, Sets.newHashSet(group)); + return null; + } + }); + } + + public void renamePrivilege(final Subject subject, final MResource srcResource, final MResource dstResource) throws SqoopException { + execute(new Command() { + @Override + public Void run(SentryGenericServiceClient client) throws Exception { + client.renamePrivilege(subject.getName(), COMPONENT_TYPE, sqoopServer.getName(), + toAuthorizable(srcResource), toAuthorizable(dstResource)); + return null; + } + }); + } + + public void dropPrivilege(final MResource resource) throws SqoopException { + execute(new Command() { + @Override + public Void run(SentryGenericServiceClient client) throws Exception { + TSentryPrivilege privilege = new TSentryPrivilege(); + privilege.setComponent(COMPONENT_TYPE); + privilege.setServiceName(sqoopServer.getName()); + privilege.setAuthorizables(toTSentryAuthorizable(resource)); + privilege.setAction(SqoopActionConstant.ALL); + client.dropPrivilege(bindingSubject.getName(), COMPONENT_TYPE, privilege); + return null; + } + }); + } + + private MPrivilege toSqoopPrivilege(TSentryPrivilege tPrivilege) { + //construct a sqoop resource + boolean grantOption = false; + if (tPrivilege.getGrantOption() == TSentryGrantOption.TRUE) { + grantOption = true; + } + //construct a sqoop privilege + return new MPrivilege( + toSqoopResource(tPrivilege.getAuthorizables()), + tPrivilege.getAction().equalsIgnoreCase(SqoopActionConstant.ALL) ? SqoopActionConstant.ALL_NAME + : tPrivilege.getAction(), grantOption); + } + + private MResource toSqoopResource(List authorizables) { + if (authorizables == null || authorizables.isEmpty()) { + //server resource + return new MResource(sqoopServer.getName(), MResource.TYPE.SERVER); + } else { + //currently Sqoop only has one-level hierarchy authorizable resource + return new MResource(authorizables.get(0).getName(), authorizables.get(0).getType()); + } + } + + /** + * construct a Sentry privilege to call by the thrift API + * @param privilege + * @return {@link TSentryPrivilege} + */ + private TSentryPrivilege toTSentryPrivilege(MPrivilege privilege) { + TSentryPrivilege tSentryPrivilege = new TSentryPrivilege(); + tSentryPrivilege.setComponent(COMPONENT_TYPE); + tSentryPrivilege.setServiceName(sqoopServer.getName()); + tSentryPrivilege.setAction(privilege.getAction().equalsIgnoreCase( + SqoopActionConstant.ALL_NAME) ? SqoopActionConstant.ALL : privilege + .getAction()); + if (privilege.isWith_grant_option()) { + tSentryPrivilege.setGrantOption(TSentryGrantOption.TRUE); + } else { + tSentryPrivilege.setGrantOption(TSentryGrantOption.FALSE); + } + tSentryPrivilege.setAuthorizables(toTSentryAuthorizable(privilege.getResource())); + return tSentryPrivilege; + } + + + private List toTSentryAuthorizable(MResource resource) { + List tAuthorizables = Lists.newArrayList(); + /** + * Currently Sqoop supports grant privileges on server object, but the server name must be equaled the configuration + * of org.apache.sqoop.security.authorization.server_name in the Sqoop.properties. + */ + if (resource.getType().equalsIgnoreCase(MResource.TYPE.SERVER.name())) { + if (!resource.getName().equalsIgnoreCase(sqoopServer.getName())) { + throw new IllegalArgumentException( resource.getName() + " must be equal to " + sqoopServer.getName() + "\n" + + " Currently Sqoop supports grant/revoke privileges on server object, but the server name must be equal to the configuration " + + "of org.apache.sqoop.security.authorization.server_name in the Sqoop.properties"); + } + } else { + tAuthorizables.add(new TAuthorizable(resource.getType(), resource.getName())); + } + return tAuthorizables; + } + + private List toAuthorizable(final MResource resource) { + List authorizables = Lists.newArrayList(); + if (resource == null) { + return authorizables; + } + authorizables.add(new Authorizable() { + @Override + public String getTypeName() { + return resource.getType(); + } + + @Override + public String getName() { + return resource.getName(); + } + }); + return authorizables; + } + + /** + * A Command is a closure used to pass a block of code from individual + * functions to execute, which centralizes connection error + * handling. Command is parameterized on the return type of the function. + */ + private interface Command { + T run(SentryGenericServiceClient client) throws Exception; + } + + private T execute(Command cmd) throws SqoopException { + SentryGenericServiceClient client = null; + try { + client = getClient(); + return cmd.run(client); + } catch (SentryUserException ex) { + String msg = "Unable to excute command on sentry server: " + ex.getMessage(); + LOG.error(msg, ex); + throw new SqoopException(SecurityError.AUTH_0014, msg, ex); + } catch (Exception ex) { + String msg = "Unable to obtain client:" + ex.getMessage(); + LOG.error(msg, ex); + throw new SqoopException(SecurityError.AUTH_0014, msg, ex); + } finally { + if (client != null) { + client.close(); + } + } + } +} diff --git a/sentry-binding/sentry-binding-sqoop/src/main/java/org/apache/sentry/sqoop/binding/SqoopAuthBindingSingleton.java b/sentry-binding/sentry-binding-sqoop/src/main/java/org/apache/sentry/sqoop/binding/SqoopAuthBindingSingleton.java new file mode 100644 index 000000000..39e001fdf --- /dev/null +++ b/sentry-binding/sentry-binding-sqoop/src/main/java/org/apache/sentry/sqoop/binding/SqoopAuthBindingSingleton.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.sqoop.binding; + +import java.net.MalformedURLException; +import java.net.URL; + +import org.apache.sentry.sqoop.conf.SqoopAuthConf; +import org.apache.sentry.sqoop.conf.SqoopAuthConf.AuthzConfVars; +import org.apache.sqoop.core.SqoopConfiguration; +import org.apache.sqoop.security.SecurityConstants; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Strings; + +public class SqoopAuthBindingSingleton { + private static Logger log = LoggerFactory.getLogger(SqoopAuthBindingSingleton.class); + + // Lazy init holder class idiom to avoid DTL + private static class SqoopAuthBindingSingletonHolder { + static final SqoopAuthBindingSingleton instance = new SqoopAuthBindingSingleton(); + } + + private SqoopAuthBinding binding; + + private SqoopAuthBindingSingleton() { + SqoopAuthBinding tmpBinding = null; + try { + String serverName = SqoopConfiguration.getInstance().getContext().getString(SecurityConstants.SERVER_NAME); + if (Strings.isNullOrEmpty(serverName)) { + throw new IllegalArgumentException(SecurityConstants.SERVER_NAME + " can't be null or empty"); + } + SqoopAuthConf conf = loadAuthzConf(); + validateSentrySqoopConfig(conf); + tmpBinding = new SqoopAuthBinding(conf, serverName.trim()); + log.info("SqoopAuthBinding created successfully"); + } catch (Exception ex) { + log.error("Unable to create SqoopAuthBinding", ex); + throw new RuntimeException("Unable to create SqoopAuthBinding: " + ex.getMessage(), ex); + } + binding = tmpBinding; + } + + private SqoopAuthConf loadAuthzConf() { + String sentry_site = SqoopConfiguration.getInstance().getContext() + .getString(SqoopAuthConf.SENTRY_SQOOP_SITE_URL); + if (Strings.isNullOrEmpty(sentry_site)) { + throw new IllegalArgumentException("Configuration key " + SqoopAuthConf.SENTRY_SQOOP_SITE_URL + + " value '" + sentry_site + "' is invalid."); + } + + SqoopAuthConf sqoopAuthConf = null; + try { + sqoopAuthConf = new SqoopAuthConf(new URL(sentry_site)); + } catch (MalformedURLException e) { + throw new IllegalArgumentException("Configuration key " + SqoopAuthConf.SENTRY_SQOOP_SITE_URL + + " specifies a malformed URL '" + sentry_site + "'", e); + } + return sqoopAuthConf; + } + + private void validateSentrySqoopConfig(SqoopAuthConf conf) { + boolean isTestingMode = Boolean.parseBoolean(conf.get(AuthzConfVars.AUTHZ_TESTING_MODE.getVar(), + AuthzConfVars.AUTHZ_TESTING_MODE.getDefault())); + String authentication = SqoopConfiguration.getInstance().getContext() + .getString(SecurityConstants.AUTHENTICATION_TYPE, SecurityConstants.TYPE.SIMPLE.name()); + String kerberos = SecurityConstants.TYPE.KERBEROS.name(); + if(!isTestingMode && !kerberos.equalsIgnoreCase(authentication)) { + throw new IllegalArgumentException(SecurityConstants.AUTHENTICATION_TYPE + "can't be set simple mode in non-testing mode"); + } + } + + public static SqoopAuthBindingSingleton getInstance() { + return SqoopAuthBindingSingletonHolder.instance; + } + + public SqoopAuthBinding getAuthBinding() { + return binding; + } +} diff --git a/sentry-binding/sentry-binding-sqoop/src/main/java/org/apache/sentry/sqoop/conf/SqoopAuthConf.java b/sentry-binding/sentry-binding-sqoop/src/main/java/org/apache/sentry/sqoop/conf/SqoopAuthConf.java new file mode 100644 index 000000000..097e7f70a --- /dev/null +++ b/sentry-binding/sentry-binding-sqoop/src/main/java/org/apache/sentry/sqoop/conf/SqoopAuthConf.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.sqoop.conf; + +import java.net.URL; + +import org.apache.hadoop.conf.Configuration; + +public class SqoopAuthConf extends Configuration { + /** + * Configuration key used in sqoop.properties to point at sentry-site.xml + */ + public static final String SENTRY_SQOOP_SITE_URL = "sentry.sqoop.site.url"; + /** + * Config setting definitions + */ + public static enum AuthzConfVars { + AUTHZ_PROVIDER("sentry.sqoop.provider","org.apache.sentry.provider.common.HadoopGroupResourceAuthorizationProvider"), + AUTHZ_PROVIDER_RESOURCE("sentry.sqoop.provider.resource", ""), + AUTHZ_PROVIDER_BACKEND( + "sentry.sqoop.provider.backend", + "org.apache.sentry.provider.db.generic.SentryGenericProviderBackend"), + AUTHZ_POLICY_ENGINE("sentry.sqoop.policy.engine","org.apache.sentry.policy.sqoop.SimpleSqoopPolicyEngine"), + AUTHZ_SERVER_NAME("sentry.sqoop.name", ""), + AUTHZ_TESTING_MODE("sentry.sqoop.testing.mode", "false"); + + private final String varName; + private final String defaultVal; + + AuthzConfVars(String varName, String defaultVal) { + this.varName = varName; + this.defaultVal = defaultVal; + } + + public String getVar() { + return varName; + } + + public String getDefault() { + return defaultVal; + } + + public static String getDefault(String varName) { + for (AuthzConfVars oneVar : AuthzConfVars.values()) { + if (oneVar.getVar().equalsIgnoreCase(varName)) { + return oneVar.getDefault(); + } + } + return null; + } + } + + public static final String AUTHZ_SITE_FILE = "sentry-site.xml"; + + public SqoopAuthConf(URL sqoopAuthzSiteURL) { + super(true); + addResource(sqoopAuthzSiteURL); + } + + @Override + public String get(String varName) { + return get(varName, AuthzConfVars.getDefault(varName)); + } +} diff --git a/sentry-provider/sentry-provider-file/src/main/java/org/apache/sentry/provider/file/PolicyFileConstants.java b/sentry-binding/sentry-binding-sqoop/src/test/java/org/apache/sentry/sqoop/MockAuthenticationProvider.java similarity index 69% rename from sentry-provider/sentry-provider-file/src/main/java/org/apache/sentry/provider/file/PolicyFileConstants.java rename to sentry-binding/sentry-binding-sqoop/src/test/java/org/apache/sentry/sqoop/MockAuthenticationProvider.java index b2bc531ad..0cd9fc6b4 100644 --- a/sentry-provider/sentry-provider-file/src/main/java/org/apache/sentry/provider/file/PolicyFileConstants.java +++ b/sentry-binding/sentry-binding-sqoop/src/test/java/org/apache/sentry/sqoop/MockAuthenticationProvider.java @@ -14,15 +14,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.sentry.provider.file; +package org.apache.sentry.sqoop; -import org.apache.sentry.provider.common.ProviderConstants; +import org.apache.sqoop.security.AuthenticationProvider; -public class PolicyFileConstants extends ProviderConstants { +public class MockAuthenticationProvider extends AuthenticationProvider { - public static final String DATABASES = "databases"; - public static final String GROUPS = "groups"; - public static final String ROLES = "roles"; - public static final String USERS = "users"; + @Override + public String[] getGroupNames() { + return new String[]{""}; + } + @Override + public String getUserName() { + return ""; + } } diff --git a/sentry-binding/sentry-binding-sqoop/src/test/java/org/apache/sentry/sqoop/TestSentryAuthorizationHander.java b/sentry-binding/sentry-binding-sqoop/src/test/java/org/apache/sentry/sqoop/TestSentryAuthorizationHander.java new file mode 100644 index 000000000..c7cca54fc --- /dev/null +++ b/sentry-binding/sentry-binding-sqoop/src/test/java/org/apache/sentry/sqoop/TestSentryAuthorizationHander.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.sqoop; + +import static org.junit.Assert.fail; + +import java.io.File; + +import org.apache.commons.io.FileUtils; +import org.apache.sentry.provider.file.PolicyFiles; +import org.apache.sentry.sqoop.conf.SqoopAuthConf; +import org.apache.sentry.sqoop.conf.SqoopAuthConf.AuthzConfVars; +import org.apache.sqoop.security.SecurityFactory; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.google.common.io.Files; +import com.google.common.io.Resources; + +public class TestSentryAuthorizationHander { + private static final String RESOURCE_PATH = "test-authz-provider.ini"; + private SqoopAuthConf authzConf; + private File baseDir; + + @Before + public void setup() throws Exception { + baseDir = Files.createTempDir(); + PolicyFiles.copyToDir(baseDir, RESOURCE_PATH); + authzConf = new SqoopAuthConf(Resources.getResource("sentry-site.xml")); + authzConf.set(AuthzConfVars.AUTHZ_PROVIDER_RESOURCE.getVar(), new File(baseDir, RESOURCE_PATH).getPath()); + } + + @After + public void teardown() { + if(baseDir != null) { + FileUtils.deleteQuietly(baseDir); + } + } + + /** + * Test that incorrect specification of classes for + * AUTHZ_ACCESS_CONTROLLER and AUTHZ_ACCESS_VALIDATOR + * correctly throw ClassNotFoundExceptions + */ + @Test + public void testClassNotFound() throws Exception { + try { + SecurityFactory.getAuthorizationAccessController("org.apache.sentry.sqoop.authz.BogusSentryAccessController"); + fail("Exception should have been thrown"); + } catch (Exception ex) { + } + + try { + SecurityFactory.getAuthorizationValidator("org.apache.sentry.sqoop.authz.BogusSentryAuthorizationValidator"); + fail("Exception should have been thrown"); + } catch (Exception ex) { + } + } +} diff --git a/sentry-binding/sentry-binding-sqoop/src/test/java/org/apache/sentry/sqoop/TestSqoopAuthConf.java b/sentry-binding/sentry-binding-sqoop/src/test/java/org/apache/sentry/sqoop/TestSqoopAuthConf.java new file mode 100644 index 000000000..e4991e1be --- /dev/null +++ b/sentry-binding/sentry-binding-sqoop/src/test/java/org/apache/sentry/sqoop/TestSqoopAuthConf.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.sqoop; + +import java.util.Arrays; +import java.util.List; + +import org.apache.sentry.sqoop.conf.SqoopAuthConf; +import org.apache.sentry.sqoop.conf.SqoopAuthConf.AuthzConfVars; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import com.google.common.io.Resources; + +public class TestSqoopAuthConf { + private static SqoopAuthConf authAllConf; + private static SqoopAuthConf authNoConf; + private static List currentProps; + + @BeforeClass + public static void setup() throws Exception { + authAllConf = new SqoopAuthConf(Resources.getResource("sentry-site.xml")); + authNoConf = new SqoopAuthConf(Resources.getResource("no-configure-sentry-site.xml")); + currentProps = Arrays.asList(new AuthzConfVars[]{ + AuthzConfVars.AUTHZ_PROVIDER, AuthzConfVars.AUTHZ_PROVIDER_BACKEND, + AuthzConfVars.AUTHZ_POLICY_ENGINE, AuthzConfVars.AUTHZ_PROVIDER_RESOURCE + }); + } + + @Test + public void testPropertiesHaveConfigured() { + Assert.assertEquals("org.apache.sentry.provider.common.HadoopGroupResourceAuthorizationProvider", + authAllConf.get(AuthzConfVars.AUTHZ_PROVIDER.getVar())); + Assert.assertEquals("classpath:test-authz-provider.ini", + authAllConf.get(AuthzConfVars.AUTHZ_PROVIDER_RESOURCE.getVar())); + Assert.assertEquals("org.apache.sentry.policy.sqoop.SimpleSqoopPolicyEngine", + authAllConf.get(AuthzConfVars.AUTHZ_POLICY_ENGINE.getVar())); + Assert.assertEquals("true", authAllConf.get(AuthzConfVars.AUTHZ_TESTING_MODE.getVar())); + } + + @Test + public void testPropertiesNoConfigured() { + for (AuthzConfVars currentVar : currentProps) { + Assert.assertEquals(currentVar.getDefault(), authNoConf.get(currentVar.getVar())); + } + } +} diff --git a/sentry-binding/sentry-binding-sqoop/src/test/resources/no-configure-sentry-site.xml b/sentry-binding/sentry-binding-sqoop/src/test/resources/no-configure-sentry-site.xml new file mode 100644 index 000000000..f64271256 --- /dev/null +++ b/sentry-binding/sentry-binding-sqoop/src/test/resources/no-configure-sentry-site.xml @@ -0,0 +1,22 @@ + + + + + + + diff --git a/sentry-binding/sentry-binding-sqoop/src/test/resources/sentry-site.xml b/sentry-binding/sentry-binding-sqoop/src/test/resources/sentry-site.xml new file mode 100644 index 000000000..2c9898079 --- /dev/null +++ b/sentry-binding/sentry-binding-sqoop/src/test/resources/sentry-site.xml @@ -0,0 +1,38 @@ + + + + + + + sentry.sqoop.provider + org.apache.sentry.provider.common.HadoopGroupResourceAuthorizationProvider + + + sentry.sqoop.provider.resource + classpath:test-authz-provider.ini + + + sentry.sqoop.policy.engine + org.apache.sentry.policy.sqoop.SimpleSqoopPolicyEngine + + + sentry.sqoop.testing.mode + true + + + diff --git a/sentry-binding/sentry-binding-sqoop/src/test/resources/test-authz-provider.ini b/sentry-binding/sentry-binding-sqoop/src/test/resources/test-authz-provider.ini new file mode 100644 index 000000000..dc11b4b35 --- /dev/null +++ b/sentry-binding/sentry-binding-sqoop/src/test/resources/test-authz-provider.ini @@ -0,0 +1,40 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +[groups] +developer = jdbc_connector_role, hdfs_connector_role,kafka_connector_role,kite_connector_role,\ + jobs_analyst_role,links_analyst_role +analyst = jobs_analyst_role,links_analyst_role +connectors_operator = jdbc_connector_role, hdfs_connector_role,kafka_connector_role,kite_connector_role +jobs_analyst = jobs_analyst_role +job1_2_operator = job1_role,job2_role +links_analyst = links_analyst_role +link1_2_operator = link1_role,link2_role +admin = admin_role + +[roles] +admin_role = server=server1->action=* +jdbc_connector_role = server=server1->connector=generic-jdbc-connector->action=read +hdfs_connector_role = server=server1->connector=hdfs-connector->action=read +kafka_connector_role = server=server1->connector=kafka-connector->action=read +kite_connector_role = server=server1->connector=kite-connector->action=read +jobs_analyst_role = server=server1->job=*->action=* +job1_role = server=server1->job=job1->action=read +job2_role = server=server1->job=job2->action=read +links_analyst_role = server=server1->link=*->action=* +link1_role = server=server1->link=link1->action=read +link2_role = server=server1->link=link2->action=read \ No newline at end of file diff --git a/sentry-core/pom.xml b/sentry-core/pom.xml index 707534e36..06d92dea8 100644 --- a/sentry-core/pom.xml +++ b/sentry-core/pom.xml @@ -21,11 +21,11 @@ limitations under the License. org.apache.sentry sentry - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-core - Sentry core + Sentry Core pom @@ -34,6 +34,7 @@ limitations under the License. sentry-core-model-indexer sentry-core-model-search sentry-core-model-sqoop + sentry-core-model-kafka diff --git a/sentry-core/sentry-core-common/pom.xml b/sentry-core/sentry-core-common/pom.xml index feff0304d..21a167745 100644 --- a/sentry-core/sentry-core-common/pom.xml +++ b/sentry-core/sentry-core-common/pom.xml @@ -21,7 +21,7 @@ limitations under the License. org.apache.sentry sentry-core - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-core-common diff --git a/sentry-core/sentry-core-common/src/main/java/org/apache/sentry/Command.java b/sentry-core/sentry-core-common/src/main/java/org/apache/sentry/Command.java index 528f7d750..5af4cadd1 100644 --- a/sentry-core/sentry-core-common/src/main/java/org/apache/sentry/Command.java +++ b/sentry-core/sentry-core-common/src/main/java/org/apache/sentry/Command.java @@ -19,5 +19,5 @@ public interface Command { - public void run(String[] args) throws Exception; + void run(String[] args) throws Exception; } diff --git a/sentry-core/sentry-core-common/src/main/java/org/apache/sentry/SentryMain.java b/sentry-core/sentry-core-common/src/main/java/org/apache/sentry/SentryMain.java index 7b1b6ace4..1ccf7decb 100644 --- a/sentry-core/sentry-core-common/src/main/java/org/apache/sentry/SentryMain.java +++ b/sentry-core/sentry-core-common/src/main/java/org/apache/sentry/SentryMain.java @@ -27,6 +27,8 @@ import org.slf4j.LoggerFactory; import com.google.common.collect.ImmutableMap; +import java.io.FileInputStream; +import java.util.Properties; public class SentryMain { private static final String HELP_SHORT = "h"; @@ -57,8 +59,26 @@ public static void main(String[] args) CommandLine commandLine = parser.parse(options, args, true); String log4jconf = commandLine.getOptionValue(LOG4J_CONF); - if ((log4jconf != null)&&(log4jconf.length() > 0)) { - PropertyConfigurator.configure(log4jconf); + if (log4jconf != null && log4jconf.length() > 0) { + Properties log4jProperties = new Properties(); + + // Firstly load log properties from properties file + FileInputStream istream = new FileInputStream(log4jconf); + log4jProperties.load(istream); + istream.close(); + + // Set the log level of DataNucleus.Query to INFO only if it is not set in the + // properties file + if (!log4jProperties.containsKey("log4j.category.DataNucleus.Query")) { + log4jProperties.setProperty("log4j.category.DataNucleus.Query", "INFO"); + + // Enable debug log for DataNucleus.Query only when log.threshold is TRACE + if (log4jProperties.getProperty("log.threshold").equalsIgnoreCase("TRACE")) { + log4jProperties.setProperty("log4j.category.DataNucleus.Query", "DEBUG"); + } + } + + PropertyConfigurator.configure(log4jProperties); Logger sentryLogger = LoggerFactory.getLogger(SentryMain.class); sentryLogger.info("Configuring log4j to use [" + log4jconf + "]"); } @@ -101,9 +121,10 @@ private static void printVersion() { private static void printHelp(Options options, String msg) { String sentry = "sentry"; - if(msg != null) + if (msg != null) { sentry = msg + sentry; + } (new HelpFormatter()).printHelp(sentry, options); System.exit(1); } -} \ No newline at end of file +} diff --git a/sentry-core/sentry-core-common/src/main/java/org/apache/sentry/core/common/Action.java b/sentry-core/sentry-core-common/src/main/java/org/apache/sentry/core/common/Action.java index 1479e5c1b..77c91d23b 100644 --- a/sentry-core/sentry-core-common/src/main/java/org/apache/sentry/core/common/Action.java +++ b/sentry-core/sentry-core-common/src/main/java/org/apache/sentry/core/common/Action.java @@ -17,6 +17,6 @@ package org.apache.sentry.core.common; public interface Action { - public static final String ALL = "*"; - public String getValue(); + String ALL = "*"; + String getValue(); } diff --git a/sentry-core/sentry-core-common/src/main/java/org/apache/sentry/core/common/Authorizable.java b/sentry-core/sentry-core-common/src/main/java/org/apache/sentry/core/common/Authorizable.java index 352323796..d49a53d3b 100644 --- a/sentry-core/sentry-core-common/src/main/java/org/apache/sentry/core/common/Authorizable.java +++ b/sentry-core/sentry-core-common/src/main/java/org/apache/sentry/core/common/Authorizable.java @@ -17,7 +17,7 @@ package org.apache.sentry.core.common; public interface Authorizable { - public String getName(); + String getName(); - public String getTypeName(); + String getTypeName(); } diff --git a/sentry-core/sentry-core-common/src/main/java/org/apache/sentry/core/common/BitFieldAction.java b/sentry-core/sentry-core-common/src/main/java/org/apache/sentry/core/common/BitFieldAction.java index 5aa0f83d0..ce0e4fbcd 100644 --- a/sentry-core/sentry-core-common/src/main/java/org/apache/sentry/core/common/BitFieldAction.java +++ b/sentry-core/sentry-core-common/src/main/java/org/apache/sentry/core/common/BitFieldAction.java @@ -55,7 +55,7 @@ public boolean equals(Object obj) { return false; } BitFieldAction that = (BitFieldAction)obj; - return (code == that.code) && (name.equalsIgnoreCase(that.name)); + return code == that.code && name.equalsIgnoreCase(that.name); } @Override diff --git a/sentry-core/sentry-core-common/src/main/java/org/apache/sentry/core/common/utils/PathUtils.java b/sentry-core/sentry-core-common/src/main/java/org/apache/sentry/core/common/utils/PathUtils.java index 6cb599c71..c7002e0e4 100644 --- a/sentry-core/sentry-core-common/src/main/java/org/apache/sentry/core/common/utils/PathUtils.java +++ b/sentry-core/sentry-core-common/src/main/java/org/apache/sentry/core/common/utils/PathUtils.java @@ -42,11 +42,10 @@ public static boolean impliesURI(URI privilegeURI, URI requestURI) throws URISyn return false; } // ensure that either both schemes are null or equal - if (privilegeURI.getScheme() == null) { - if (requestURI.getScheme() != null) { - return false; - } - } else if (!privilegeURI.getScheme().equals(requestURI.getScheme())) { + if (privilegeURI.getScheme() == null && requestURI.getScheme() != null) { + return false; + } + if (privilegeURI.getScheme() != null && !privilegeURI.getScheme().equals(requestURI.getScheme())) { return false; } // request path does not contain relative parts /a/../b && diff --git a/sentry-core/sentry-core-common/src/test/java/org/apache/sentry/core/common/utils/TestPathUtils.java b/sentry-core/sentry-core-common/src/test/java/org/apache/sentry/core/common/utils/TestPathUtils.java index 4166c7757..d7d5e0ad8 100644 --- a/sentry-core/sentry-core-common/src/test/java/org/apache/sentry/core/common/utils/TestPathUtils.java +++ b/sentry-core/sentry-core-common/src/test/java/org/apache/sentry/core/common/utils/TestPathUtils.java @@ -16,9 +16,9 @@ */ package org.apache.sentry.core.common.utils; -import static junit.framework.Assert.assertEquals; -import static junit.framework.Assert.assertFalse; -import static junit.framework.Assert.assertTrue; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.net.URI; diff --git a/sentry-core/sentry-core-model-db/pom.xml b/sentry-core/sentry-core-model-db/pom.xml index 43ce4b854..902b129a6 100644 --- a/sentry-core/sentry-core-model-db/pom.xml +++ b/sentry-core/sentry-core-model-db/pom.xml @@ -21,7 +21,7 @@ limitations under the License. org.apache.sentry sentry-core - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-core-model-db diff --git a/sentry-core/sentry-core-model-db/src/main/java/org/apache/sentry/core/model/db/Column.java b/sentry-core/sentry-core-model-db/src/main/java/org/apache/sentry/core/model/db/Column.java index 89aabfc7e..305fd1f5a 100644 --- a/sentry-core/sentry-core-model-db/src/main/java/org/apache/sentry/core/model/db/Column.java +++ b/sentry-core/sentry-core-model-db/src/main/java/org/apache/sentry/core/model/db/Column.java @@ -23,6 +23,8 @@ public class Column implements DBModelAuthorizable { */ public static final Column ALL = new Column(AccessConstants.ALL); + public static final Column SOME = new Column(AccessConstants.SOME); + private final String name; public Column(String name) { diff --git a/sentry-core/sentry-core-model-db/src/main/java/org/apache/sentry/core/model/db/DBModelAuthorizable.java b/sentry-core/sentry-core-model-db/src/main/java/org/apache/sentry/core/model/db/DBModelAuthorizable.java index 4d74356d7..4ce01b2c4 100644 --- a/sentry-core/sentry-core-model-db/src/main/java/org/apache/sentry/core/model/db/DBModelAuthorizable.java +++ b/sentry-core/sentry-core-model-db/src/main/java/org/apache/sentry/core/model/db/DBModelAuthorizable.java @@ -29,5 +29,5 @@ public enum AuthorizableType { URI }; - public AuthorizableType getAuthzType(); + AuthorizableType getAuthzType(); } diff --git a/sentry-core/sentry-core-model-indexer/pom.xml b/sentry-core/sentry-core-model-indexer/pom.xml index 76108c198..68069f4a4 100644 --- a/sentry-core/sentry-core-model-indexer/pom.xml +++ b/sentry-core/sentry-core-model-indexer/pom.xml @@ -21,7 +21,7 @@ limitations under the License. org.apache.sentry sentry-core - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-core-model-indexer diff --git a/sentry-core/sentry-core-model-indexer/src/main/java/org/apache/sentry/core/model/indexer/IndexerModelAuthorizable.java b/sentry-core/sentry-core-model-indexer/src/main/java/org/apache/sentry/core/model/indexer/IndexerModelAuthorizable.java index d92a5c870..b3a387323 100644 --- a/sentry-core/sentry-core-model-indexer/src/main/java/org/apache/sentry/core/model/indexer/IndexerModelAuthorizable.java +++ b/sentry-core/sentry-core-model-indexer/src/main/java/org/apache/sentry/core/model/indexer/IndexerModelAuthorizable.java @@ -24,5 +24,5 @@ public enum AuthorizableType { Indexer }; - public AuthorizableType getAuthzType(); + AuthorizableType getAuthzType(); } diff --git a/sentry-core/sentry-core-model-indexer/src/test/java/org/apache/sentry/core/indexer/TestIndexer.java b/sentry-core/sentry-core-model-indexer/src/test/java/org/apache/sentry/core/indexer/TestIndexer.java index 843fd823a..06b8de813 100644 --- a/sentry-core/sentry-core-model-indexer/src/test/java/org/apache/sentry/core/indexer/TestIndexer.java +++ b/sentry-core/sentry-core-model-indexer/src/test/java/org/apache/sentry/core/indexer/TestIndexer.java @@ -16,7 +16,7 @@ * limitations under the License. */ -import junit.framework.Assert; +import org.junit.Assert; import org.apache.sentry.core.model.indexer.Indexer; import org.junit.Test; diff --git a/sentry-core/sentry-core-model-indexer/src/test/java/org/apache/sentry/core/indexer/TestIndexerBitFieldAction.java b/sentry-core/sentry-core-model-indexer/src/test/java/org/apache/sentry/core/indexer/TestIndexerBitFieldAction.java index a490cd89a..532f9ec63 100644 --- a/sentry-core/sentry-core-model-indexer/src/test/java/org/apache/sentry/core/indexer/TestIndexerBitFieldAction.java +++ b/sentry-core/sentry-core-model-indexer/src/test/java/org/apache/sentry/core/indexer/TestIndexerBitFieldAction.java @@ -17,8 +17,6 @@ */ package org.apache.sentry.core.indexer; -import java.util.List; - import org.apache.sentry.core.model.indexer.IndexerActionFactory; import org.apache.sentry.core.model.indexer.IndexerActionFactory.IndexerAction; import org.apache.sentry.core.model.indexer.IndexerActionFactory.IndexerBitFieldAction; @@ -27,9 +25,9 @@ import com.google.common.collect.Lists; -import static junit.framework.Assert.assertTrue; -import static junit.framework.Assert.assertFalse; -import static junit.framework.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertEquals; public class TestIndexerBitFieldAction { IndexerActionFactory actionFactory = new IndexerActionFactory(); diff --git a/sentry-core/sentry-core-model-kafka/pom.xml b/sentry-core/sentry-core-model-kafka/pom.xml new file mode 100644 index 000000000..cadd4ac8e --- /dev/null +++ b/sentry-core/sentry-core-model-kafka/pom.xml @@ -0,0 +1,43 @@ + + + + 4.0.0 + + org.apache.sentry + sentry-core + 1.7.0-incubating-SNAPSHOT + + + sentry-core-model-kafka + Sentry Core Model Kafka + + + + org.apache.sentry + sentry-core-common + + + + junit + junit + test + + + + diff --git a/sentry-core/sentry-core-model-kafka/src/main/java/org/apache/sentry/core/model/kafka/Cluster.java b/sentry-core/sentry-core-model-kafka/src/main/java/org/apache/sentry/core/model/kafka/Cluster.java new file mode 100644 index 000000000..edf36c82b --- /dev/null +++ b/sentry-core/sentry-core-model-kafka/src/main/java/org/apache/sentry/core/model/kafka/Cluster.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.core.model.kafka; + +/** + * Represents Cluster authorizable in Kafka model. + */ +public class Cluster implements KafkaAuthorizable { + public static final String NAME = "kafka-cluster"; + + /** + * Get type of Kafka's cluster authorizable. + * + * @return Type of Kafka's cluster authorizable. + */ + @Override + public AuthorizableType getAuthzType() { + return AuthorizableType.CLUSTER; + } + + /** + * Get name of Kafka's cluster. + * + * @return Name of Kafka's cluster. + */ + @Override + public String getName() { + return NAME; + } + + /** + * Get type name of Kafka's cluster authorizable. + * + * @return Type name of Kafka's cluster authorizable. + */ + @Override + public String getTypeName() { + return getAuthzType().name(); + } +} diff --git a/sentry-core/sentry-core-model-kafka/src/main/java/org/apache/sentry/core/model/kafka/ConsumerGroup.java b/sentry-core/sentry-core-model-kafka/src/main/java/org/apache/sentry/core/model/kafka/ConsumerGroup.java new file mode 100644 index 000000000..5fc4e8c25 --- /dev/null +++ b/sentry-core/sentry-core-model-kafka/src/main/java/org/apache/sentry/core/model/kafka/ConsumerGroup.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.core.model.kafka; +/** + * Represents the ConsumerGroup authorizable in the Kafka model + */ +public class ConsumerGroup implements KafkaAuthorizable { + private String name; + + /** + * Create a Consumer-Group authorizable for Kafka cluster of a given name. + * + * @param name Name of Consumer-Group in a Kafka cluster. + */ + public ConsumerGroup(String name) { + this.name = name; + } + + /** + * Get type of Kafka's consumer-group authorizable. + * + * @return Type of Kafka's consumer-group authorizable. + */ + @Override + public AuthorizableType getAuthzType() { + return AuthorizableType.CONSUMERGROUP; + } + + /** + * Get name of Kafka's consumer-group. + * + * @return Name of Kafka's consumer-group. + */ + @Override + public String getName() { + return name; + } + + /** + * Get type name of Kafka's consumer-group authorizable. + * + * @return Type name of Kafka's consumer-group authorizable. + */ + @Override + public String getTypeName() { + return getAuthzType().name(); + } +} diff --git a/sentry-core/sentry-core-model-kafka/src/main/java/org/apache/sentry/core/model/kafka/Host.java b/sentry-core/sentry-core-model-kafka/src/main/java/org/apache/sentry/core/model/kafka/Host.java new file mode 100644 index 000000000..48a18f69b --- /dev/null +++ b/sentry-core/sentry-core-model-kafka/src/main/java/org/apache/sentry/core/model/kafka/Host.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.core.model.kafka; + +/** + * Represents Host authorizable in Kafka model + */ +public class Host implements KafkaAuthorizable { + private String name; + + /** + * Create a Kafka's Host authorizable of a given string representation. + * + * @param name String representation of host. + */ + public Host(String name) { + this.name = name; + } + + /** + * Get authorizable type of Host authorizable. + * + * @return Type of Host authorizable. + */ + @Override + public AuthorizableType getAuthzType() { + return AuthorizableType.HOST; + } + + /** + * Get name of Kafka's host authorizable. + * + * @return Name of Kafka's host authorizable. + */ + @Override + public String getName() { + return name; + } + + /** + * Get type name of Kafka's host authorizable. + * + * @return Type name of Kafka's host authorizable. + */ + @Override + public String getTypeName() { + return getAuthzType().name(); + } + +} diff --git a/sentry-core/sentry-core-model-kafka/src/main/java/org/apache/sentry/core/model/kafka/KafkaActionConstant.java b/sentry-core/sentry-core-model-kafka/src/main/java/org/apache/sentry/core/model/kafka/KafkaActionConstant.java new file mode 100644 index 000000000..17d7fb74c --- /dev/null +++ b/sentry-core/sentry-core-model-kafka/src/main/java/org/apache/sentry/core/model/kafka/KafkaActionConstant.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.core.model.kafka; + +/** + * Actions supported by Kafka on its authorizable resources. + */ +public class KafkaActionConstant { + + public static final String ALL = "ALL"; + public static final String READ = "read"; + public static final String WRITE = "write"; + public static final String CREATE = "create"; + public static final String DELETE = "delete"; + public static final String ALTER = "alter"; + public static final String DESCRIBE = "describe"; + public static final String CLUSTER_ACTION = "clusteraction"; + + public static final String actionName = "action"; +} diff --git a/sentry-core/sentry-core-model-kafka/src/main/java/org/apache/sentry/core/model/kafka/KafkaActionFactory.java b/sentry-core/sentry-core-model-kafka/src/main/java/org/apache/sentry/core/model/kafka/KafkaActionFactory.java new file mode 100644 index 000000000..fc3bf7aa9 --- /dev/null +++ b/sentry-core/sentry-core-model-kafka/src/main/java/org/apache/sentry/core/model/kafka/KafkaActionFactory.java @@ -0,0 +1,193 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.sentry.core.model.kafka; + +import java.util.Arrays; +import java.util.List; + +import org.apache.sentry.core.common.BitFieldAction; +import org.apache.sentry.core.common.BitFieldActionFactory; + +import com.google.common.collect.Lists; + +/** + * Factory for creating actions supported by Kafka. + */ +public class KafkaActionFactory extends BitFieldActionFactory { + private static KafkaActionFactory instance; + private KafkaActionFactory() {} + + /** + * Get instance of KafkaActionFactory, which is a singleton. + * + * @return Instance of KafkaActionFactory. + */ + public static KafkaActionFactory getInstance() { + if (instance == null) { + instance = new KafkaActionFactory(); + } + + return instance; + } + + /** + * Types of actions supported by Kafka. + */ + public enum KafkaActionType { + READ(KafkaActionConstant.READ, 1), + WRITE(KafkaActionConstant.WRITE, 2), + CREATE(KafkaActionConstant.CREATE, 4), + DELETE(KafkaActionConstant.DELETE, 8), + ALTER(KafkaActionConstant.ALTER, 16), + DESCRIBE(KafkaActionConstant.DESCRIBE, 32), + CLUSTER_ACTION(KafkaActionConstant.CLUSTER_ACTION, 64), + ALL(KafkaActionConstant.ALL, READ.getCode() | WRITE.getCode() | CREATE.getCode() + | DELETE.getCode() | ALTER.getCode()| DESCRIBE.getCode() | CLUSTER_ACTION.getCode()); + + private String name; + private int code; + + /** + * Create Kafka action type based on provided kafkaAction and code. + * + * @param name Name of Kafka action. + * @param code Integer representation of Kafka action's code. + */ + KafkaActionType(String name, int code) { + this.name = name; + this.code = code; + } + + /** + * Get code for this Kafka's action. + * + * @return Code for this Kafka's action. + */ + public int getCode() { + return code; + } + + /** + * Get kafkaAction of this Kafka's action. + * + * @return Name of this Kafka's action. + */ + public String getName() { + return name; + } + + /** + * Check if Kafka action type with {@code kafkaAction} as string representation exists. + * + * @param name String representation of a valid Kafka action type. + * @return If Kafka action type with {@code kafkaAction} as string representation exists. + */ + static boolean hasActionType(String name) { + for (KafkaActionType action : KafkaActionType.values()) { + if (action.name.equalsIgnoreCase(name)) { + return true; + } + } + return false; + } + + /** + * Create Kafka's action of type provided as kafkaAction. + * + * @param name String representation of Kafka's action type. + * @return Kafka's action type based on provided kafkaAction, if such action type is found, else null. + */ + static KafkaActionType getActionByName(String name) { + for (KafkaActionType action : KafkaActionType.values()) { + if (action.name.equalsIgnoreCase(name)) { + return action; + } + } + return null; // Can't get ActionType of provided kafkaAction + } + + /** + * Create Kafka's action types represented by provided code. + * + * @param code Integer representation of Kafka's action types. + * @return List of Kafka's action types represented by provided code, if none action types are found return an empty list. + */ + static List getActionByCode(int code) { + List actions = Lists.newArrayList(); + for (KafkaActionType action : KafkaActionType.values()) { + if (((action.code & code) == action.code) && (action != KafkaActionType.ALL)) { + // KafkaActionType.ALL action should not return in the list + actions.add(action); + } + } + if (actions.isEmpty()) { + return Arrays.asList(); + } + return actions; + } + } + + /** + * Kafka Action + */ + public static class KafkaAction extends BitFieldAction { + /** + * Create Kafka action based on provided kafkaAction. + * + * @param name Name of Kafka action. + */ + public KafkaAction(String name) { + this(KafkaActionType.getActionByName(name)); + } + + /** + * Create Kafka action based on provided Kafka action type. + * + * @param actionType Type of Kafka action for which action has to be created. + */ + public KafkaAction(KafkaActionType actionType) { + super(actionType.name(), actionType.getCode()); + } + } + + /** + * Get Kafka actions represented by provided action code. + * + * @param actionCode Integer code for required Kafka actions. + * @return List of Kafka actions represented by provided action code. + */ + @Override + public List getActionsByCode(int actionCode) { + List actions = Lists.newArrayList(); + for (KafkaActionType action : KafkaActionType.getActionByCode(actionCode)) { + actions.add(new KafkaAction(action)); + } + return actions; + } + + /** + * Get Kafka action represented by provided action kafkaAction. + * + * @param name String representation of required action kafkaAction. + * @return Kafka action represented by provided action kafkaAction. + */ + @Override + public KafkaAction getActionByName(String name) { + if (name.equalsIgnoreCase("*")) { + return new KafkaAction("ALL"); + } + return KafkaActionType.hasActionType(name) ? new KafkaAction(name) : null; + } +} diff --git a/sentry-core/sentry-core-model-kafka/src/main/java/org/apache/sentry/core/model/kafka/KafkaAuthorizable.java b/sentry-core/sentry-core-model-kafka/src/main/java/org/apache/sentry/core/model/kafka/KafkaAuthorizable.java new file mode 100644 index 000000000..18600f18d --- /dev/null +++ b/sentry-core/sentry-core-model-kafka/src/main/java/org/apache/sentry/core/model/kafka/KafkaAuthorizable.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.core.model.kafka; + +import org.apache.sentry.core.common.Authorizable; + +/** + * This interface represents authorizable resource in Kafka component. + * It uses conjunction with generic authorization model (SENTRY-398). + * + * Authorizables here are mapped to Kafka resources based on below mentioned mapping. + * + * CLUSTER -> Kafka Cluster resource, users are required to have access to this resource in + * order to perform cluster level actions like create topic, delete topic, etc. + * + * HOST -> Kafka allows to authorize requests based on the host it is coming from. Though, + * Host is not a resource in Kafka, each Kafka Acl has host in it. In order to + * provide host based resource authorization, Host is treated as a Kafka resource + * in Sentry. + * + * TOPIC -> Kafka Topic resource, users are required to have access to this resource in + * order to perform topic level actions like reading from a topic, writing to a + * topic, etc. + * + * CONSUMERGROUP -> Kafka ConsumerGroup resource, users are required to have access to this resource + * in order to perform ConsumerGroup level actions like joining a consumer group, + * querying offset for a partition for a particular consumer group. + */ +public interface KafkaAuthorizable extends Authorizable { + /** + * Types of resources that Kafka supports authorization on. + */ + public enum AuthorizableType { + CLUSTER, + HOST, + TOPIC, + CONSUMERGROUP + }; + + /** + * Get type of this Kafka authorizable. + * @return Type of this Kafka authorizable. + */ + public AuthorizableType getAuthzType(); // NOPMD - TODO(sdp) Remove before merge +} diff --git a/sentry-core/sentry-core-model-kafka/src/main/java/org/apache/sentry/core/model/kafka/Topic.java b/sentry-core/sentry-core-model-kafka/src/main/java/org/apache/sentry/core/model/kafka/Topic.java new file mode 100644 index 000000000..2b7c05ea4 --- /dev/null +++ b/sentry-core/sentry-core-model-kafka/src/main/java/org/apache/sentry/core/model/kafka/Topic.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.core.model.kafka; +/** + * Represents the Topic authorizable in the Kafka model + */ +public class Topic implements KafkaAuthorizable { + private String name; + + /** + * Create a Topic authorizable for Kafka cluster of a given name. + * + * @param name Name of Kafka topic. + */ + public Topic(String name) { + this.name = name; + } + + /** + * Get type of Kafka's topic authorizable. + * + * @return Type of Kafka's topic authorizable. + */ + @Override + public AuthorizableType getAuthzType() { + return AuthorizableType.TOPIC; + } + + /** + * Get name of Kafka's topic. + * + * @return Name of Kafka's topic. + */ + @Override + public String getName() { + return name; + } + + /** + * Get type name of Kafka's topic authorizable. + * + * @return Type name of Kafka's topic authorizable. + */ + @Override + public String getTypeName() { + return getAuthzType().name(); + } +} diff --git a/sentry-core/sentry-core-model-kafka/src/test/java/org/apache/sentry/core/model/kafka/TestKafkaAction.java b/sentry-core/sentry-core-model-kafka/src/test/java/org/apache/sentry/core/model/kafka/TestKafkaAction.java new file mode 100644 index 000000000..dcab5d526 --- /dev/null +++ b/sentry-core/sentry-core-model-kafka/src/test/java/org/apache/sentry/core/model/kafka/TestKafkaAction.java @@ -0,0 +1,179 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.sentry.core.model.kafka; + +import static junit.framework.Assert.assertEquals; +import static junit.framework.Assert.assertFalse; +import static junit.framework.Assert.assertTrue; + +import org.apache.sentry.core.model.kafka.KafkaActionFactory.KafkaAction; +import org.junit.Test; + +import com.google.common.collect.Lists; + +/** + * Test KafkaActionFactory creates expected Kafka action instances. + */ +public class TestKafkaAction { + private KafkaActionFactory factory = KafkaActionFactory.getInstance(); + + @Test + public void testImpliesAction() { + KafkaAction readAction = (KafkaAction) factory.getActionByName(KafkaActionConstant.READ); + KafkaAction writeAction = (KafkaAction) factory.getActionByName(KafkaActionConstant.WRITE); + KafkaAction createAction = (KafkaAction) factory.getActionByName(KafkaActionConstant.CREATE); + KafkaAction deleteAction = (KafkaAction) factory.getActionByName(KafkaActionConstant.DELETE); + KafkaAction alterAction = (KafkaAction) factory.getActionByName(KafkaActionConstant.ALTER); + KafkaAction describeAction = + (KafkaAction) factory.getActionByName(KafkaActionConstant.DESCRIBE); + KafkaAction adminAction = (KafkaAction) factory.getActionByName(KafkaActionConstant.CLUSTER_ACTION); + KafkaAction allAction = (KafkaAction) factory.getActionByName(KafkaActionConstant.ALL); + + assertTrue(allAction.implies(readAction)); + assertTrue(allAction.implies(writeAction)); + assertTrue(allAction.implies(createAction)); + assertTrue(allAction.implies(deleteAction)); + assertTrue(allAction.implies(alterAction)); + assertTrue(allAction.implies(describeAction)); + assertTrue(allAction.implies(adminAction)); + assertTrue(allAction.implies(allAction)); + + assertTrue(readAction.implies(readAction)); + assertFalse(readAction.implies(writeAction)); + assertFalse(readAction.implies(createAction)); + assertFalse(readAction.implies(deleteAction)); + assertFalse(readAction.implies(alterAction)); + assertFalse(readAction.implies(describeAction)); + assertFalse(readAction.implies(adminAction)); + assertFalse(readAction.implies(allAction)); + + assertFalse(writeAction.implies(readAction)); + assertTrue(writeAction.implies(writeAction)); + assertFalse(writeAction.implies(createAction)); + assertFalse(writeAction.implies(deleteAction)); + assertFalse(writeAction.implies(alterAction)); + assertFalse(writeAction.implies(describeAction)); + assertFalse(writeAction.implies(adminAction)); + assertFalse(writeAction.implies(allAction)); + + assertFalse(createAction.implies(readAction)); + assertFalse(createAction.implies(writeAction)); + assertTrue(createAction.implies(createAction)); + assertFalse(createAction.implies(deleteAction)); + assertFalse(createAction.implies(alterAction)); + assertFalse(createAction.implies(describeAction)); + assertFalse(createAction.implies(adminAction)); + assertFalse(createAction.implies(allAction)); + + assertFalse(deleteAction.implies(readAction)); + assertFalse(deleteAction.implies(writeAction)); + assertFalse(deleteAction.implies(createAction)); + assertTrue(deleteAction.implies(deleteAction)); + assertFalse(deleteAction.implies(alterAction)); + assertFalse(deleteAction.implies(describeAction)); + assertFalse(deleteAction.implies(adminAction)); + assertFalse(deleteAction.implies(allAction)); + + assertFalse(alterAction.implies(readAction)); + assertFalse(alterAction.implies(writeAction)); + assertFalse(alterAction.implies(createAction)); + assertFalse(alterAction.implies(deleteAction)); + assertTrue(alterAction.implies(alterAction)); + assertFalse(alterAction.implies(describeAction)); + assertFalse(alterAction.implies(adminAction)); + assertFalse(alterAction.implies(allAction)); + + assertFalse(describeAction.implies(readAction)); + assertFalse(describeAction.implies(writeAction)); + assertFalse(describeAction.implies(createAction)); + assertFalse(describeAction.implies(deleteAction)); + assertFalse(describeAction.implies(alterAction)); + assertTrue(describeAction.implies(describeAction)); + assertFalse(describeAction.implies(adminAction)); + assertFalse(describeAction.implies(allAction)); + + assertFalse(adminAction.implies(readAction)); + assertFalse(adminAction.implies(writeAction)); + assertFalse(adminAction.implies(createAction)); + assertFalse(adminAction.implies(deleteAction)); + assertFalse(adminAction.implies(alterAction)); + assertFalse(adminAction.implies(describeAction)); + assertTrue(adminAction.implies(adminAction)); + assertFalse(adminAction.implies(allAction)); + } + + @Test + public void testGetActionByName() throws Exception { + KafkaAction readAction = (KafkaAction) factory.getActionByName(KafkaActionConstant.READ); + KafkaAction writeAction = (KafkaAction) factory.getActionByName(KafkaActionConstant.WRITE); + KafkaAction createAction = (KafkaAction) factory.getActionByName(KafkaActionConstant.CREATE); + KafkaAction deleteAction = (KafkaAction) factory.getActionByName(KafkaActionConstant.DELETE); + KafkaAction alterAction = (KafkaAction) factory.getActionByName(KafkaActionConstant.ALTER); + KafkaAction describeAction = + (KafkaAction) factory.getActionByName(KafkaActionConstant.DESCRIBE); + KafkaAction adminAction = (KafkaAction) factory.getActionByName(KafkaActionConstant.CLUSTER_ACTION); + KafkaAction allAction = (KafkaAction) factory.getActionByName(KafkaActionConstant.ALL); + + assertTrue(readAction.equals(new KafkaAction(KafkaActionConstant.READ))); + assertTrue(writeAction.equals(new KafkaAction(KafkaActionConstant.WRITE))); + assertTrue(createAction.equals(new KafkaAction(KafkaActionConstant.CREATE))); + assertTrue(deleteAction.equals(new KafkaAction(KafkaActionConstant.DELETE))); + assertTrue(alterAction.equals(new KafkaAction(KafkaActionConstant.ALTER))); + assertTrue(describeAction.equals(new KafkaAction(KafkaActionConstant.DESCRIBE))); + assertTrue(adminAction.equals(new KafkaAction(KafkaActionConstant.CLUSTER_ACTION))); + assertTrue(allAction.equals(new KafkaAction(KafkaActionConstant.ALL))); + } + + @Test + public void testGetActionsByCode() throws Exception { + KafkaAction readAction = new KafkaAction(KafkaActionConstant.READ); + KafkaAction writeAction = new KafkaAction(KafkaActionConstant.WRITE); + KafkaAction createAction = (KafkaAction) factory.getActionByName(KafkaActionConstant.CREATE); + KafkaAction deleteAction = (KafkaAction) factory.getActionByName(KafkaActionConstant.DELETE); + KafkaAction alterAction = (KafkaAction) factory.getActionByName(KafkaActionConstant.ALTER); + KafkaAction describeAction = + (KafkaAction) factory.getActionByName(KafkaActionConstant.DESCRIBE); + KafkaAction adminAction = (KafkaAction) factory.getActionByName(KafkaActionConstant.CLUSTER_ACTION); + KafkaAction allAction = new KafkaAction(KafkaActionConstant.ALL); + + assertEquals(Lists.newArrayList(readAction), + factory.getActionsByCode(readAction.getActionCode())); + assertEquals(Lists.newArrayList(writeAction), + factory.getActionsByCode(writeAction.getActionCode())); + assertEquals(Lists.newArrayList(createAction), + factory.getActionsByCode(createAction.getActionCode())); + assertEquals(Lists.newArrayList(deleteAction), + factory.getActionsByCode(deleteAction.getActionCode())); + assertEquals(Lists.newArrayList(alterAction), + factory.getActionsByCode(alterAction.getActionCode())); + assertEquals(Lists.newArrayList(describeAction), + factory.getActionsByCode(describeAction.getActionCode())); + assertEquals(Lists.newArrayList(adminAction), + factory.getActionsByCode(adminAction.getActionCode())); + assertEquals(Lists.newArrayList(readAction, writeAction, createAction, deleteAction, + alterAction, describeAction, adminAction), factory.getActionsByCode(allAction + .getActionCode())); + } + + @Test + public void testGetActionForInvalidName() { + assertEquals("Failed to NOT create Kafka action for invalid name.", null, factory.getActionByName("INVALID")); + } + + @Test + public void testGetActionForInvalidCode() { + assertEquals("Failed to NOT create Kafka actions for invalid code.", 0, factory.getActionsByCode(0).size()); + } +} diff --git a/sentry-core/sentry-core-model-kafka/src/test/java/org/apache/sentry/core/model/kafka/TestKafkaAuthorizable.java b/sentry-core/sentry-core-model-kafka/src/test/java/org/apache/sentry/core/model/kafka/TestKafkaAuthorizable.java new file mode 100644 index 000000000..04316f289 --- /dev/null +++ b/sentry-core/sentry-core-model-kafka/src/test/java/org/apache/sentry/core/model/kafka/TestKafkaAuthorizable.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.core.model.kafka; + +import junit.framework.Assert; + +import org.apache.sentry.core.model.kafka.KafkaAuthorizable.AuthorizableType; +import org.junit.Test; + +/** + * Test proper KafkaAuthorizable is created for various Kafka resources. + */ +public class TestKafkaAuthorizable { + + @Test + public void testName() throws Exception { + String name = "simple"; + Host host = new Host(name); + Assert.assertEquals(host.getName(), name); + + Cluster cluster = new Cluster(); + Assert.assertEquals(cluster.getName(), Cluster.NAME); + + Topic topic = new Topic(name); + Assert.assertEquals(topic.getName(), name); + + ConsumerGroup consumerGroup = new ConsumerGroup(name); + Assert.assertEquals(consumerGroup.getName(), name); + } + + @Test + public void testAuthType() throws Exception { + Host host = new Host("host1"); + Assert.assertEquals(host.getAuthzType(), AuthorizableType.HOST); + + Cluster cluster = new Cluster(); + Assert.assertEquals(cluster.getAuthzType(), AuthorizableType.CLUSTER); + + Topic topic = new Topic("topic1"); + Assert.assertEquals(topic.getAuthzType(), AuthorizableType.TOPIC); + + ConsumerGroup consumerGroup = new ConsumerGroup("consumerGroup1"); + Assert.assertEquals(consumerGroup.getAuthzType(), AuthorizableType.CONSUMERGROUP); + } +} diff --git a/sentry-core/sentry-core-model-search/pom.xml b/sentry-core/sentry-core-model-search/pom.xml index 3c4aaab32..5f0adc393 100644 --- a/sentry-core/sentry-core-model-search/pom.xml +++ b/sentry-core/sentry-core-model-search/pom.xml @@ -21,7 +21,7 @@ limitations under the License. org.apache.sentry sentry-core - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-core-model-search diff --git a/sentry-core/sentry-core-model-search/src/main/java/org/apache/sentry/core/model/search/SearchConstants.java b/sentry-core/sentry-core-model-search/src/main/java/org/apache/sentry/core/model/search/SearchConstants.java index 16b919527..9f76bda61 100644 --- a/sentry-core/sentry-core-model-search/src/main/java/org/apache/sentry/core/model/search/SearchConstants.java +++ b/sentry-core/sentry-core-model-search/src/main/java/org/apache/sentry/core/model/search/SearchConstants.java @@ -22,10 +22,10 @@ public class SearchConstants { public static final String QUERY = "query"; public static final String UPDATE = "update"; /** - * The property of sentry.search.cluster was used to distinguish itself from multiple search clusters. For example, there are two - * search clusters: cluster1 and cluster2 implemented authorization via sentry, and it must set the value of - * sentry.search.cluster=cluster1 or cluster2 to communicate with sentry service for authorization + * The property of sentry.search.service is used to distinguish itself from multiple search services. For example, there are two + * search services: service1 and service2 implemented authorization via sentry, and it must set the value of + * sentry.search.service=service1 or service2 to communicate with sentry service for authorization */ - public static final String SENTRY_SEARCH_CLUSTER_KEY = "sentry.search.cluster"; - public static final String SENTRY_SEARCH_CLUSTER_DEFAULT = "clutser1"; + public static final String SENTRY_SEARCH_SERVICE_KEY = "sentry.search.service"; + public static final String SENTRY_SEARCH_SERVICE_DEFAULT = "service1"; } diff --git a/sentry-core/sentry-core-model-search/src/main/java/org/apache/sentry/core/model/search/SearchModelAuthorizable.java b/sentry-core/sentry-core-model-search/src/main/java/org/apache/sentry/core/model/search/SearchModelAuthorizable.java index d6a9d54a0..5a55963d4 100644 --- a/sentry-core/sentry-core-model-search/src/main/java/org/apache/sentry/core/model/search/SearchModelAuthorizable.java +++ b/sentry-core/sentry-core-model-search/src/main/java/org/apache/sentry/core/model/search/SearchModelAuthorizable.java @@ -25,5 +25,5 @@ public enum AuthorizableType { Field }; - public AuthorizableType getAuthzType(); + AuthorizableType getAuthzType(); } diff --git a/sentry-core/sentry-core-model-search/src/test/java/org/apache/sentry/core/search/TestCollection.java b/sentry-core/sentry-core-model-search/src/test/java/org/apache/sentry/core/search/TestCollection.java index 1bf70696f..231140163 100644 --- a/sentry-core/sentry-core-model-search/src/test/java/org/apache/sentry/core/search/TestCollection.java +++ b/sentry-core/sentry-core-model-search/src/test/java/org/apache/sentry/core/search/TestCollection.java @@ -16,7 +16,7 @@ * limitations under the License. */ -import junit.framework.Assert; +import org.junit.Assert; import org.apache.sentry.core.model.search.Collection; import org.junit.Test; diff --git a/sentry-core/sentry-core-model-search/src/test/java/org/apache/sentry/core/search/TestSearchBitFieldAction.java b/sentry-core/sentry-core-model-search/src/test/java/org/apache/sentry/core/search/TestSearchBitFieldAction.java index 0ae49d661..0056f4085 100644 --- a/sentry-core/sentry-core-model-search/src/test/java/org/apache/sentry/core/search/TestSearchBitFieldAction.java +++ b/sentry-core/sentry-core-model-search/src/test/java/org/apache/sentry/core/search/TestSearchBitFieldAction.java @@ -17,8 +17,6 @@ */ package org.apache.sentry.core.search; -import java.util.List; - import org.apache.sentry.core.model.search.SearchActionFactory; import org.apache.sentry.core.model.search.SearchActionFactory.SearchAction; import org.apache.sentry.core.model.search.SearchActionFactory.SearchBitFieldAction; @@ -27,9 +25,9 @@ import com.google.common.collect.Lists; -import static junit.framework.Assert.assertTrue; -import static junit.framework.Assert.assertFalse; -import static junit.framework.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertEquals; public class TestSearchBitFieldAction { SearchActionFactory actionFactory = new SearchActionFactory(); diff --git a/sentry-core/sentry-core-model-sqoop/pom.xml b/sentry-core/sentry-core-model-sqoop/pom.xml index 3626190d8..b5000590a 100644 --- a/sentry-core/sentry-core-model-sqoop/pom.xml +++ b/sentry-core/sentry-core-model-sqoop/pom.xml @@ -21,7 +21,7 @@ limitations under the License. org.apache.sentry sentry-core - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-core-model-sqoop diff --git a/sentry-core/sentry-core-model-sqoop/src/main/java/org/apache/sentry/core/model/sqoop/SqoopActionFactory.java b/sentry-core/sentry-core-model-sqoop/src/main/java/org/apache/sentry/core/model/sqoop/SqoopActionFactory.java index c1f33ecc8..e7ba5f171 100644 --- a/sentry-core/sentry-core-model-sqoop/src/main/java/org/apache/sentry/core/model/sqoop/SqoopActionFactory.java +++ b/sentry-core/sentry-core-model-sqoop/src/main/java/org/apache/sentry/core/model/sqoop/SqoopActionFactory.java @@ -56,8 +56,7 @@ static SqoopActionType getActionByName(String name) { static List getActionByCode(int code) { List actions = Lists.newArrayList(); for (SqoopActionType action : SqoopActionType.values()) { - if (((action.code & code) == action.code ) && - (action != SqoopActionType.ALL)) { + if ((action.code & code) == action.code && action != SqoopActionType.ALL) { //SqoopActionType.ALL action should not return in the list actions.add(action); } diff --git a/sentry-core/sentry-core-model-sqoop/src/main/java/org/apache/sentry/core/model/sqoop/SqoopAuthorizable.java b/sentry-core/sentry-core-model-sqoop/src/main/java/org/apache/sentry/core/model/sqoop/SqoopAuthorizable.java index b57f4a7eb..934875efb 100644 --- a/sentry-core/sentry-core-model-sqoop/src/main/java/org/apache/sentry/core/model/sqoop/SqoopAuthorizable.java +++ b/sentry-core/sentry-core-model-sqoop/src/main/java/org/apache/sentry/core/model/sqoop/SqoopAuthorizable.java @@ -23,7 +23,7 @@ * It used conjunction with the generic authorization model(SENTRY-398). */ public interface SqoopAuthorizable extends Authorizable { - public static final String ALL = "*"; + String ALL = "*"; public enum AuthorizableType { SERVER, CONNECTOR, @@ -31,5 +31,5 @@ public enum AuthorizableType { JOB }; - public AuthorizableType getAuthzType(); + AuthorizableType getAuthzType(); } diff --git a/sentry-core/sentry-core-model-sqoop/src/test/java/org/apache/sentry/core/model/sqoop/TestSqoopAction.java b/sentry-core/sentry-core-model-sqoop/src/test/java/org/apache/sentry/core/model/sqoop/TestSqoopAction.java index 8a86f735d..9c86158e2 100644 --- a/sentry-core/sentry-core-model-sqoop/src/test/java/org/apache/sentry/core/model/sqoop/TestSqoopAction.java +++ b/sentry-core/sentry-core-model-sqoop/src/test/java/org/apache/sentry/core/model/sqoop/TestSqoopAction.java @@ -16,9 +16,9 @@ */ package org.apache.sentry.core.model.sqoop; -import static junit.framework.Assert.assertEquals; -import static junit.framework.Assert.assertFalse; -import static junit.framework.Assert.assertTrue; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import org.apache.sentry.core.model.sqoop.SqoopActionFactory.SqoopAction; import org.junit.Test; diff --git a/sentry-core/sentry-core-model-sqoop/src/test/java/org/apache/sentry/core/model/sqoop/TestSqoopAuthorizable.java b/sentry-core/sentry-core-model-sqoop/src/test/java/org/apache/sentry/core/model/sqoop/TestSqoopAuthorizable.java index c346290d1..502f7b508 100644 --- a/sentry-core/sentry-core-model-sqoop/src/test/java/org/apache/sentry/core/model/sqoop/TestSqoopAuthorizable.java +++ b/sentry-core/sentry-core-model-sqoop/src/test/java/org/apache/sentry/core/model/sqoop/TestSqoopAuthorizable.java @@ -17,12 +17,8 @@ package org.apache.sentry.core.model.sqoop; -import junit.framework.Assert; +import org.junit.Assert; -import org.apache.sentry.core.model.sqoop.Connector; -import org.apache.sentry.core.model.sqoop.Job; -import org.apache.sentry.core.model.sqoop.Link; -import org.apache.sentry.core.model.sqoop.Server; import org.apache.sentry.core.model.sqoop.SqoopAuthorizable.AuthorizableType; import org.junit.Test; diff --git a/sentry-dist/pom.xml b/sentry-dist/pom.xml index f7a663bcb..4e078f08b 100644 --- a/sentry-dist/pom.xml +++ b/sentry-dist/pom.xml @@ -20,7 +20,7 @@ limitations under the License. org.apache.sentry sentry - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-dist Sentry Distribution @@ -54,6 +54,14 @@ limitations under the License. org.apache.sentry sentry-binding-solr + + org.apache.sentry + sentry-binding-sqoop + + + org.apache.sentry + solr-sentry-core + org.apache.sentry solr-sentry-handlers @@ -90,6 +98,10 @@ limitations under the License. org.apache.sentry sentry-policy-search + + org.apache.sentry + sentry-policy-sqoop + diff --git a/sentry-dist/src/main/assembly/bin.xml b/sentry-dist/src/main/assembly/bin.xml index beaa34846..5727fc964 100644 --- a/sentry-dist/src/main/assembly/bin.xml +++ b/sentry-dist/src/main/assembly/bin.xml @@ -71,6 +71,16 @@ org.apache.derby:derby + + lib/plugins + false + false + true + true + + org.apache.curator:curator-x-discovery + + diff --git a/sentry-hdfs/pom.xml b/sentry-hdfs/pom.xml index 145523560..06081c5e8 100644 --- a/sentry-hdfs/pom.xml +++ b/sentry-hdfs/pom.xml @@ -22,7 +22,7 @@ limitations under the License. org.apache.sentry sentry - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-hdfs diff --git a/sentry-hdfs/sentry-hdfs-common/.gitignore b/sentry-hdfs/sentry-hdfs-common/.gitignore deleted file mode 100644 index 91ad75bb4..000000000 --- a/sentry-hdfs/sentry-hdfs-common/.gitignore +++ /dev/null @@ -1,18 +0,0 @@ -*.class -target/ -.classpath -.project -.settings -.metadata -.idea/ -*.iml -derby.log -datanucleus.log -sentry-core/sentry-core-common/src/gen -**/TempStatsStore/ -# Package Files # -*.jar -*.war -*.ear -test-output/ -maven-repo/ diff --git a/sentry-hdfs/sentry-hdfs-common/pom.xml b/sentry-hdfs/sentry-hdfs-common/pom.xml index 34f69e95e..c748e5670 100644 --- a/sentry-hdfs/sentry-hdfs-common/pom.xml +++ b/sentry-hdfs/sentry-hdfs-common/pom.xml @@ -21,7 +21,7 @@ limitations under the License. org.apache.sentry sentry-hdfs - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-hdfs-common @@ -54,6 +54,32 @@ limitations under the License. hadoop-common provided + + org.apache.curator + curator-x-discovery + ${curator.version} + + + org.apache.hadoop + hadoop-minikdc + test + + + org.apache.sentry + sentry-provider-db + provided + + + org.apache.sentry + sentry-provider-file + test + + + org.apache.sentry + sentry-provider-db + test-jar + test + ${basedir}/src/main/java diff --git a/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/SentryHDFSService.java b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/SentryHDFSService.java index 663fe4e3a..254f72bc6 100644 --- a/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/SentryHDFSService.java +++ b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/SentryHDFSService.java @@ -3347,25 +3347,25 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_related_pat case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map130 = iprot.readMapBegin(); - struct.success = new HashMap>(2*_map130.size); - for (int _i131 = 0; _i131 < _map130.size; ++_i131) + org.apache.thrift.protocol.TMap _map138 = iprot.readMapBegin(); + struct.success = new HashMap>(2*_map138.size); + for (int _i139 = 0; _i139 < _map138.size; ++_i139) { - String _key132; // required - List _val133; // required - _key132 = iprot.readString(); + String _key140; // required + List _val141; // required + _key140 = iprot.readString(); { - org.apache.thrift.protocol.TList _list134 = iprot.readListBegin(); - _val133 = new ArrayList(_list134.size); - for (int _i135 = 0; _i135 < _list134.size; ++_i135) + org.apache.thrift.protocol.TList _list142 = iprot.readListBegin(); + _val141 = new ArrayList(_list142.size); + for (int _i143 = 0; _i143 < _list142.size; ++_i143) { - String _elem136; // required - _elem136 = iprot.readString(); - _val133.add(_elem136); + String _elem144; // required + _elem144 = iprot.readString(); + _val141.add(_elem144); } iprot.readListEnd(); } - struct.success.put(_key132, _val133); + struct.success.put(_key140, _val141); } iprot.readMapEnd(); } @@ -3391,14 +3391,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_related_pa oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, struct.success.size())); - for (Map.Entry> _iter137 : struct.success.entrySet()) + for (Map.Entry> _iter145 : struct.success.entrySet()) { - oprot.writeString(_iter137.getKey()); + oprot.writeString(_iter145.getKey()); { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, _iter137.getValue().size())); - for (String _iter138 : _iter137.getValue()) + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, _iter145.getValue().size())); + for (String _iter146 : _iter145.getValue()) { - oprot.writeString(_iter138); + oprot.writeString(_iter146); } oprot.writeListEnd(); } @@ -3432,14 +3432,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_related_pat if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry> _iter139 : struct.success.entrySet()) + for (Map.Entry> _iter147 : struct.success.entrySet()) { - oprot.writeString(_iter139.getKey()); + oprot.writeString(_iter147.getKey()); { - oprot.writeI32(_iter139.getValue().size()); - for (String _iter140 : _iter139.getValue()) + oprot.writeI32(_iter147.getValue().size()); + for (String _iter148 : _iter147.getValue()) { - oprot.writeString(_iter140); + oprot.writeString(_iter148); } } } @@ -3453,24 +3453,24 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_related_path BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map141 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32()); - struct.success = new HashMap>(2*_map141.size); - for (int _i142 = 0; _i142 < _map141.size; ++_i142) + org.apache.thrift.protocol.TMap _map149 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32()); + struct.success = new HashMap>(2*_map149.size); + for (int _i150 = 0; _i150 < _map149.size; ++_i150) { - String _key143; // required - List _val144; // required - _key143 = iprot.readString(); + String _key151; // required + List _val152; // required + _key151 = iprot.readString(); { - org.apache.thrift.protocol.TList _list145 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - _val144 = new ArrayList(_list145.size); - for (int _i146 = 0; _i146 < _list145.size; ++_i146) + org.apache.thrift.protocol.TList _list153 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + _val152 = new ArrayList(_list153.size); + for (int _i154 = 0; _i154 < _list153.size; ++_i154) { - String _elem147; // required - _elem147 = iprot.readString(); - _val144.add(_elem147); + String _elem155; // required + _elem155 = iprot.readString(); + _val152.add(_elem155); } } - struct.success.put(_key143, _val144); + struct.success.put(_key151, _val152); } } struct.setSuccessIsSet(true); diff --git a/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TAuthzUpdateResponse.java b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TAuthzUpdateResponse.java index 480c264d9..ec1d2af36 100644 --- a/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TAuthzUpdateResponse.java +++ b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TAuthzUpdateResponse.java @@ -443,14 +443,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAuthzUpdateRespons case 1: // AUTHZ_PATH_UPDATE if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list114 = iprot.readListBegin(); - struct.authzPathUpdate = new ArrayList(_list114.size); - for (int _i115 = 0; _i115 < _list114.size; ++_i115) + org.apache.thrift.protocol.TList _list122 = iprot.readListBegin(); + struct.authzPathUpdate = new ArrayList(_list122.size); + for (int _i123 = 0; _i123 < _list122.size; ++_i123) { - TPathsUpdate _elem116; // required - _elem116 = new TPathsUpdate(); - _elem116.read(iprot); - struct.authzPathUpdate.add(_elem116); + TPathsUpdate _elem124; // required + _elem124 = new TPathsUpdate(); + _elem124.read(iprot); + struct.authzPathUpdate.add(_elem124); } iprot.readListEnd(); } @@ -462,14 +462,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAuthzUpdateRespons case 2: // AUTHZ_PERM_UPDATE if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list117 = iprot.readListBegin(); - struct.authzPermUpdate = new ArrayList(_list117.size); - for (int _i118 = 0; _i118 < _list117.size; ++_i118) + org.apache.thrift.protocol.TList _list125 = iprot.readListBegin(); + struct.authzPermUpdate = new ArrayList(_list125.size); + for (int _i126 = 0; _i126 < _list125.size; ++_i126) { - TPermissionsUpdate _elem119; // required - _elem119 = new TPermissionsUpdate(); - _elem119.read(iprot); - struct.authzPermUpdate.add(_elem119); + TPermissionsUpdate _elem127; // required + _elem127 = new TPermissionsUpdate(); + _elem127.read(iprot); + struct.authzPermUpdate.add(_elem127); } iprot.readListEnd(); } @@ -496,9 +496,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TAuthzUpdateRespon oprot.writeFieldBegin(AUTHZ_PATH_UPDATE_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.authzPathUpdate.size())); - for (TPathsUpdate _iter120 : struct.authzPathUpdate) + for (TPathsUpdate _iter128 : struct.authzPathUpdate) { - _iter120.write(oprot); + _iter128.write(oprot); } oprot.writeListEnd(); } @@ -510,9 +510,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TAuthzUpdateRespon oprot.writeFieldBegin(AUTHZ_PERM_UPDATE_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.authzPermUpdate.size())); - for (TPermissionsUpdate _iter121 : struct.authzPermUpdate) + for (TPermissionsUpdate _iter129 : struct.authzPermUpdate) { - _iter121.write(oprot); + _iter129.write(oprot); } oprot.writeListEnd(); } @@ -547,18 +547,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TAuthzUpdateRespons if (struct.isSetAuthzPathUpdate()) { { oprot.writeI32(struct.authzPathUpdate.size()); - for (TPathsUpdate _iter122 : struct.authzPathUpdate) + for (TPathsUpdate _iter130 : struct.authzPathUpdate) { - _iter122.write(oprot); + _iter130.write(oprot); } } } if (struct.isSetAuthzPermUpdate()) { { oprot.writeI32(struct.authzPermUpdate.size()); - for (TPermissionsUpdate _iter123 : struct.authzPermUpdate) + for (TPermissionsUpdate _iter131 : struct.authzPermUpdate) { - _iter123.write(oprot); + _iter131.write(oprot); } } } @@ -570,28 +570,28 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TAuthzUpdateResponse BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list124 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.authzPathUpdate = new ArrayList(_list124.size); - for (int _i125 = 0; _i125 < _list124.size; ++_i125) + org.apache.thrift.protocol.TList _list132 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.authzPathUpdate = new ArrayList(_list132.size); + for (int _i133 = 0; _i133 < _list132.size; ++_i133) { - TPathsUpdate _elem126; // required - _elem126 = new TPathsUpdate(); - _elem126.read(iprot); - struct.authzPathUpdate.add(_elem126); + TPathsUpdate _elem134; // required + _elem134 = new TPathsUpdate(); + _elem134.read(iprot); + struct.authzPathUpdate.add(_elem134); } } struct.setAuthzPathUpdateIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list127 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.authzPermUpdate = new ArrayList(_list127.size); - for (int _i128 = 0; _i128 < _list127.size; ++_i128) + org.apache.thrift.protocol.TList _list135 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.authzPermUpdate = new ArrayList(_list135.size); + for (int _i136 = 0; _i136 < _list135.size; ++_i136) { - TPermissionsUpdate _elem129; // required - _elem129 = new TPermissionsUpdate(); - _elem129.read(iprot); - struct.authzPermUpdate.add(_elem129); + TPermissionsUpdate _elem137; // required + _elem137 = new TPermissionsUpdate(); + _elem137.read(iprot); + struct.authzPermUpdate.add(_elem137); } } struct.setAuthzPermUpdateIsSet(true); diff --git a/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathEntry.java b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathEntry.java index a2a7f7ba9..3e0313020 100644 --- a/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathEntry.java +++ b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathEntry.java @@ -36,8 +36,8 @@ public class TPathEntry implements org.apache.thrift.TBase, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -47,15 +47,15 @@ public class TPathEntry implements org.apache.thrift.TBase children; // required + private Set authzObjs; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { TYPE((short)1, "type"), PATH_ELEMENT((short)2, "pathElement"), - AUTHZ_OBJ((short)3, "authzObj"), - CHILDREN((short)4, "children"); + CHILDREN((short)4, "children"), + AUTHZ_OBJS((short)5, "authzObjs"); private static final Map byName = new HashMap(); @@ -74,10 +74,10 @@ public static _Fields findByThriftId(int fieldId) { return TYPE; case 2: // PATH_ELEMENT return PATH_ELEMENT; - case 3: // AUTHZ_OBJ - return AUTHZ_OBJ; case 4: // CHILDREN return CHILDREN; + case 5: // AUTHZ_OBJS + return AUTHZ_OBJS; default: return null; } @@ -120,7 +120,7 @@ public String getFieldName() { // isset id assignments private static final int __TYPE_ISSET_ID = 0; private byte __isset_bitfield = 0; - private _Fields optionals[] = {_Fields.AUTHZ_OBJ}; + private _Fields optionals[] = {_Fields.AUTHZ_OBJS}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -128,11 +128,12 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BYTE))); tmpMap.put(_Fields.PATH_ELEMENT, new org.apache.thrift.meta_data.FieldMetaData("pathElement", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.AUTHZ_OBJ, new org.apache.thrift.meta_data.FieldMetaData("authzObj", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.CHILDREN, new org.apache.thrift.meta_data.FieldMetaData("children", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.SetMetaData(org.apache.thrift.protocol.TType.SET, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)))); + tmpMap.put(_Fields.AUTHZ_OBJS, new org.apache.thrift.meta_data.FieldMetaData("authzObjs", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.SetMetaData(org.apache.thrift.protocol.TType.SET, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TPathEntry.class, metaDataMap); } @@ -161,9 +162,6 @@ public TPathEntry(TPathEntry other) { if (other.isSetPathElement()) { this.pathElement = other.pathElement; } - if (other.isSetAuthzObj()) { - this.authzObj = other.authzObj; - } if (other.isSetChildren()) { Set __this__children = new HashSet(); for (Integer other_element : other.children) { @@ -171,6 +169,13 @@ public TPathEntry(TPathEntry other) { } this.children = __this__children; } + if (other.isSetAuthzObjs()) { + Set __this__authzObjs = new HashSet(); + for (String other_element : other.authzObjs) { + __this__authzObjs.add(other_element); + } + this.authzObjs = __this__authzObjs; + } } public TPathEntry deepCopy() { @@ -182,8 +187,8 @@ public void clear() { setTypeIsSet(false); this.type = 0; this.pathElement = null; - this.authzObj = null; this.children = null; + this.authzObjs = null; } public byte getType() { @@ -231,29 +236,6 @@ public void setPathElementIsSet(boolean value) { } } - public String getAuthzObj() { - return this.authzObj; - } - - public void setAuthzObj(String authzObj) { - this.authzObj = authzObj; - } - - public void unsetAuthzObj() { - this.authzObj = null; - } - - /** Returns true if field authzObj is set (has been assigned a value) and false otherwise */ - public boolean isSetAuthzObj() { - return this.authzObj != null; - } - - public void setAuthzObjIsSet(boolean value) { - if (!value) { - this.authzObj = null; - } - } - public int getChildrenSize() { return (this.children == null) ? 0 : this.children.size(); } @@ -292,6 +274,44 @@ public void setChildrenIsSet(boolean value) { } } + public int getAuthzObjsSize() { + return (this.authzObjs == null) ? 0 : this.authzObjs.size(); + } + + public java.util.Iterator getAuthzObjsIterator() { + return (this.authzObjs == null) ? null : this.authzObjs.iterator(); + } + + public void addToAuthzObjs(String elem) { + if (this.authzObjs == null) { + this.authzObjs = new HashSet(); + } + this.authzObjs.add(elem); + } + + public Set getAuthzObjs() { + return this.authzObjs; + } + + public void setAuthzObjs(Set authzObjs) { + this.authzObjs = authzObjs; + } + + public void unsetAuthzObjs() { + this.authzObjs = null; + } + + /** Returns true if field authzObjs is set (has been assigned a value) and false otherwise */ + public boolean isSetAuthzObjs() { + return this.authzObjs != null; + } + + public void setAuthzObjsIsSet(boolean value) { + if (!value) { + this.authzObjs = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case TYPE: @@ -310,19 +330,19 @@ public void setFieldValue(_Fields field, Object value) { } break; - case AUTHZ_OBJ: + case CHILDREN: if (value == null) { - unsetAuthzObj(); + unsetChildren(); } else { - setAuthzObj((String)value); + setChildren((Set)value); } break; - case CHILDREN: + case AUTHZ_OBJS: if (value == null) { - unsetChildren(); + unsetAuthzObjs(); } else { - setChildren((Set)value); + setAuthzObjs((Set)value); } break; @@ -337,12 +357,12 @@ public Object getFieldValue(_Fields field) { case PATH_ELEMENT: return getPathElement(); - case AUTHZ_OBJ: - return getAuthzObj(); - case CHILDREN: return getChildren(); + case AUTHZ_OBJS: + return getAuthzObjs(); + } throw new IllegalStateException(); } @@ -358,10 +378,10 @@ public boolean isSet(_Fields field) { return isSetType(); case PATH_ELEMENT: return isSetPathElement(); - case AUTHZ_OBJ: - return isSetAuthzObj(); case CHILDREN: return isSetChildren(); + case AUTHZ_OBJS: + return isSetAuthzObjs(); } throw new IllegalStateException(); } @@ -397,15 +417,6 @@ public boolean equals(TPathEntry that) { return false; } - boolean this_present_authzObj = true && this.isSetAuthzObj(); - boolean that_present_authzObj = true && that.isSetAuthzObj(); - if (this_present_authzObj || that_present_authzObj) { - if (!(this_present_authzObj && that_present_authzObj)) - return false; - if (!this.authzObj.equals(that.authzObj)) - return false; - } - boolean this_present_children = true && this.isSetChildren(); boolean that_present_children = true && that.isSetChildren(); if (this_present_children || that_present_children) { @@ -415,6 +426,15 @@ public boolean equals(TPathEntry that) { return false; } + boolean this_present_authzObjs = true && this.isSetAuthzObjs(); + boolean that_present_authzObjs = true && that.isSetAuthzObjs(); + if (this_present_authzObjs || that_present_authzObjs) { + if (!(this_present_authzObjs && that_present_authzObjs)) + return false; + if (!this.authzObjs.equals(that.authzObjs)) + return false; + } + return true; } @@ -432,16 +452,16 @@ public int hashCode() { if (present_pathElement) builder.append(pathElement); - boolean present_authzObj = true && (isSetAuthzObj()); - builder.append(present_authzObj); - if (present_authzObj) - builder.append(authzObj); - boolean present_children = true && (isSetChildren()); builder.append(present_children); if (present_children) builder.append(children); + boolean present_authzObjs = true && (isSetAuthzObjs()); + builder.append(present_authzObjs); + if (present_authzObjs) + builder.append(authzObjs); + return builder.toHashCode(); } @@ -473,22 +493,22 @@ public int compareTo(TPathEntry other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetAuthzObj()).compareTo(typedOther.isSetAuthzObj()); + lastComparison = Boolean.valueOf(isSetChildren()).compareTo(typedOther.isSetChildren()); if (lastComparison != 0) { return lastComparison; } - if (isSetAuthzObj()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.authzObj, typedOther.authzObj); + if (isSetChildren()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.children, typedOther.children); if (lastComparison != 0) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetChildren()).compareTo(typedOther.isSetChildren()); + lastComparison = Boolean.valueOf(isSetAuthzObjs()).compareTo(typedOther.isSetAuthzObjs()); if (lastComparison != 0) { return lastComparison; } - if (isSetChildren()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.children, typedOther.children); + if (isSetAuthzObjs()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.authzObjs, typedOther.authzObjs); if (lastComparison != 0) { return lastComparison; } @@ -524,16 +544,6 @@ public String toString() { sb.append(this.pathElement); } first = false; - if (isSetAuthzObj()) { - if (!first) sb.append(", "); - sb.append("authzObj:"); - if (this.authzObj == null) { - sb.append("null"); - } else { - sb.append(this.authzObj); - } - first = false; - } if (!first) sb.append(", "); sb.append("children:"); if (this.children == null) { @@ -542,6 +552,16 @@ public String toString() { sb.append(this.children); } first = false; + if (isSetAuthzObjs()) { + if (!first) sb.append(", "); + sb.append("authzObjs:"); + if (this.authzObjs == null) { + sb.append("null"); + } else { + sb.append(this.authzObjs); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -615,14 +635,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TPathEntry struct) org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // AUTHZ_OBJ - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.authzObj = iprot.readString(); - struct.setAuthzObjIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; case 4: // CHILDREN if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { @@ -641,6 +653,24 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TPathEntry struct) org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // AUTHZ_OBJS + if (schemeField.type == org.apache.thrift.protocol.TType.SET) { + { + org.apache.thrift.protocol.TSet _set35 = iprot.readSetBegin(); + struct.authzObjs = new HashSet(2*_set35.size); + for (int _i36 = 0; _i36 < _set35.size; ++_i36) + { + String _elem37; // required + _elem37 = iprot.readString(); + struct.authzObjs.add(_elem37); + } + iprot.readSetEnd(); + } + struct.setAuthzObjsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -662,25 +692,32 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TPathEntry struct) oprot.writeString(struct.pathElement); oprot.writeFieldEnd(); } - if (struct.authzObj != null) { - if (struct.isSetAuthzObj()) { - oprot.writeFieldBegin(AUTHZ_OBJ_FIELD_DESC); - oprot.writeString(struct.authzObj); - oprot.writeFieldEnd(); - } - } if (struct.children != null) { oprot.writeFieldBegin(CHILDREN_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I32, struct.children.size())); - for (int _iter35 : struct.children) + for (int _iter38 : struct.children) { - oprot.writeI32(_iter35); + oprot.writeI32(_iter38); } oprot.writeSetEnd(); } oprot.writeFieldEnd(); } + if (struct.authzObjs != null) { + if (struct.isSetAuthzObjs()) { + oprot.writeFieldBegin(AUTHZ_OBJS_FIELD_DESC); + { + oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, struct.authzObjs.size())); + for (String _iter39 : struct.authzObjs) + { + oprot.writeString(_iter39); + } + oprot.writeSetEnd(); + } + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -702,18 +739,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TPathEntry struct) oprot.writeString(struct.pathElement); { oprot.writeI32(struct.children.size()); - for (int _iter36 : struct.children) + for (int _iter40 : struct.children) { - oprot.writeI32(_iter36); + oprot.writeI32(_iter40); } } BitSet optionals = new BitSet(); - if (struct.isSetAuthzObj()) { + if (struct.isSetAuthzObjs()) { optionals.set(0); } oprot.writeBitSet(optionals, 1); - if (struct.isSetAuthzObj()) { - oprot.writeString(struct.authzObj); + if (struct.isSetAuthzObjs()) { + { + oprot.writeI32(struct.authzObjs.size()); + for (String _iter41 : struct.authzObjs) + { + oprot.writeString(_iter41); + } + } } } @@ -725,20 +768,29 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TPathEntry struct) t struct.pathElement = iprot.readString(); struct.setPathElementIsSet(true); { - org.apache.thrift.protocol.TSet _set37 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I32, iprot.readI32()); - struct.children = new HashSet(2*_set37.size); - for (int _i38 = 0; _i38 < _set37.size; ++_i38) + org.apache.thrift.protocol.TSet _set42 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I32, iprot.readI32()); + struct.children = new HashSet(2*_set42.size); + for (int _i43 = 0; _i43 < _set42.size; ++_i43) { - int _elem39; // required - _elem39 = iprot.readI32(); - struct.children.add(_elem39); + int _elem44; // required + _elem44 = iprot.readI32(); + struct.children.add(_elem44); } } struct.setChildrenIsSet(true); BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.authzObj = iprot.readString(); - struct.setAuthzObjIsSet(true); + { + org.apache.thrift.protocol.TSet _set45 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.authzObjs = new HashSet(2*_set45.size); + for (int _i46 = 0; _i46 < _set45.size; ++_i46) + { + String _elem47; // required + _elem47 = iprot.readString(); + struct.authzObjs.add(_elem47); + } + } + struct.setAuthzObjsIsSet(true); } } } diff --git a/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathsDump.java b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathsDump.java index 200ecad51..caf9ad186 100644 --- a/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathsDump.java +++ b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathsDump.java @@ -448,16 +448,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TPathsDump struct) case 2: // NODE_MAP if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map40 = iprot.readMapBegin(); - struct.nodeMap = new HashMap(2*_map40.size); - for (int _i41 = 0; _i41 < _map40.size; ++_i41) + org.apache.thrift.protocol.TMap _map48 = iprot.readMapBegin(); + struct.nodeMap = new HashMap(2*_map48.size); + for (int _i49 = 0; _i49 < _map48.size; ++_i49) { - int _key42; // required - TPathEntry _val43; // required - _key42 = iprot.readI32(); - _val43 = new TPathEntry(); - _val43.read(iprot); - struct.nodeMap.put(_key42, _val43); + int _key50; // required + TPathEntry _val51; // required + _key50 = iprot.readI32(); + _val51 = new TPathEntry(); + _val51.read(iprot); + struct.nodeMap.put(_key50, _val51); } iprot.readMapEnd(); } @@ -486,10 +486,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TPathsDump struct) oprot.writeFieldBegin(NODE_MAP_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I32, org.apache.thrift.protocol.TType.STRUCT, struct.nodeMap.size())); - for (Map.Entry _iter44 : struct.nodeMap.entrySet()) + for (Map.Entry _iter52 : struct.nodeMap.entrySet()) { - oprot.writeI32(_iter44.getKey()); - _iter44.getValue().write(oprot); + oprot.writeI32(_iter52.getKey()); + _iter52.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -515,10 +515,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TPathsDump struct) oprot.writeI32(struct.rootId); { oprot.writeI32(struct.nodeMap.size()); - for (Map.Entry _iter45 : struct.nodeMap.entrySet()) + for (Map.Entry _iter53 : struct.nodeMap.entrySet()) { - oprot.writeI32(_iter45.getKey()); - _iter45.getValue().write(oprot); + oprot.writeI32(_iter53.getKey()); + _iter53.getValue().write(oprot); } } } @@ -529,16 +529,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TPathsDump struct) t struct.rootId = iprot.readI32(); struct.setRootIdIsSet(true); { - org.apache.thrift.protocol.TMap _map46 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I32, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.nodeMap = new HashMap(2*_map46.size); - for (int _i47 = 0; _i47 < _map46.size; ++_i47) + org.apache.thrift.protocol.TMap _map54 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I32, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.nodeMap = new HashMap(2*_map54.size); + for (int _i55 = 0; _i55 < _map54.size; ++_i55) { - int _key48; // required - TPathEntry _val49; // required - _key48 = iprot.readI32(); - _val49 = new TPathEntry(); - _val49.read(iprot); - struct.nodeMap.put(_key48, _val49); + int _key56; // required + TPathEntry _val57; // required + _key56 = iprot.readI32(); + _val57 = new TPathEntry(); + _val57.read(iprot); + struct.nodeMap.put(_key56, _val57); } } struct.setNodeMapIsSet(true); diff --git a/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathsUpdate.java b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathsUpdate.java index d0ee6b6b7..f52b4ff1f 100644 --- a/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathsUpdate.java +++ b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathsUpdate.java @@ -626,14 +626,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TPathsUpdate struct case 4: // PATH_CHANGES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list50 = iprot.readListBegin(); - struct.pathChanges = new ArrayList(_list50.size); - for (int _i51 = 0; _i51 < _list50.size; ++_i51) + org.apache.thrift.protocol.TList _list58 = iprot.readListBegin(); + struct.pathChanges = new ArrayList(_list58.size); + for (int _i59 = 0; _i59 < _list58.size; ++_i59) { - TPathChanges _elem52; // required - _elem52 = new TPathChanges(); - _elem52.read(iprot); - struct.pathChanges.add(_elem52); + TPathChanges _elem60; // required + _elem60 = new TPathChanges(); + _elem60.read(iprot); + struct.pathChanges.add(_elem60); } iprot.readListEnd(); } @@ -672,9 +672,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TPathsUpdate struc oprot.writeFieldBegin(PATH_CHANGES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.pathChanges.size())); - for (TPathChanges _iter53 : struct.pathChanges) + for (TPathChanges _iter61 : struct.pathChanges) { - _iter53.write(oprot); + _iter61.write(oprot); } oprot.writeListEnd(); } @@ -701,9 +701,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TPathsUpdate struct oprot.writeI64(struct.seqNum); { oprot.writeI32(struct.pathChanges.size()); - for (TPathChanges _iter54 : struct.pathChanges) + for (TPathChanges _iter62 : struct.pathChanges) { - _iter54.write(oprot); + _iter62.write(oprot); } } BitSet optionals = new BitSet(); @@ -724,14 +724,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TPathsUpdate struct) struct.seqNum = iprot.readI64(); struct.setSeqNumIsSet(true); { - org.apache.thrift.protocol.TList _list55 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.pathChanges = new ArrayList(_list55.size); - for (int _i56 = 0; _i56 < _list55.size; ++_i56) + org.apache.thrift.protocol.TList _list63 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.pathChanges = new ArrayList(_list63.size); + for (int _i64 = 0; _i64 < _list63.size; ++_i64) { - TPathChanges _elem57; // required - _elem57 = new TPathChanges(); - _elem57.read(iprot); - struct.pathChanges.add(_elem57); + TPathChanges _elem65; // required + _elem65 = new TPathChanges(); + _elem65.read(iprot); + struct.pathChanges.add(_elem65); } } struct.setPathChangesIsSet(true); diff --git a/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPermissionsUpdate.java b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPermissionsUpdate.java index 850404b7e..bfb6cbcb0 100644 --- a/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPermissionsUpdate.java +++ b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPermissionsUpdate.java @@ -647,16 +647,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TPermissionsUpdate case 3: // PRIVILEGE_CHANGES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map94 = iprot.readMapBegin(); - struct.privilegeChanges = new HashMap(2*_map94.size); - for (int _i95 = 0; _i95 < _map94.size; ++_i95) + org.apache.thrift.protocol.TMap _map102 = iprot.readMapBegin(); + struct.privilegeChanges = new HashMap(2*_map102.size); + for (int _i103 = 0; _i103 < _map102.size; ++_i103) { - String _key96; // required - TPrivilegeChanges _val97; // required - _key96 = iprot.readString(); - _val97 = new TPrivilegeChanges(); - _val97.read(iprot); - struct.privilegeChanges.put(_key96, _val97); + String _key104; // required + TPrivilegeChanges _val105; // required + _key104 = iprot.readString(); + _val105 = new TPrivilegeChanges(); + _val105.read(iprot); + struct.privilegeChanges.put(_key104, _val105); } iprot.readMapEnd(); } @@ -668,16 +668,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TPermissionsUpdate case 4: // ROLE_CHANGES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map98 = iprot.readMapBegin(); - struct.roleChanges = new HashMap(2*_map98.size); - for (int _i99 = 0; _i99 < _map98.size; ++_i99) + org.apache.thrift.protocol.TMap _map106 = iprot.readMapBegin(); + struct.roleChanges = new HashMap(2*_map106.size); + for (int _i107 = 0; _i107 < _map106.size; ++_i107) { - String _key100; // required - TRoleChanges _val101; // required - _key100 = iprot.readString(); - _val101 = new TRoleChanges(); - _val101.read(iprot); - struct.roleChanges.put(_key100, _val101); + String _key108; // required + TRoleChanges _val109; // required + _key108 = iprot.readString(); + _val109 = new TRoleChanges(); + _val109.read(iprot); + struct.roleChanges.put(_key108, _val109); } iprot.readMapEnd(); } @@ -709,10 +709,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TPermissionsUpdate oprot.writeFieldBegin(PRIVILEGE_CHANGES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.privilegeChanges.size())); - for (Map.Entry _iter102 : struct.privilegeChanges.entrySet()) + for (Map.Entry _iter110 : struct.privilegeChanges.entrySet()) { - oprot.writeString(_iter102.getKey()); - _iter102.getValue().write(oprot); + oprot.writeString(_iter110.getKey()); + _iter110.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -722,10 +722,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TPermissionsUpdate oprot.writeFieldBegin(ROLE_CHANGES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.roleChanges.size())); - for (Map.Entry _iter103 : struct.roleChanges.entrySet()) + for (Map.Entry _iter111 : struct.roleChanges.entrySet()) { - oprot.writeString(_iter103.getKey()); - _iter103.getValue().write(oprot); + oprot.writeString(_iter111.getKey()); + _iter111.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -752,18 +752,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TPermissionsUpdate oprot.writeI64(struct.seqNum); { oprot.writeI32(struct.privilegeChanges.size()); - for (Map.Entry _iter104 : struct.privilegeChanges.entrySet()) + for (Map.Entry _iter112 : struct.privilegeChanges.entrySet()) { - oprot.writeString(_iter104.getKey()); - _iter104.getValue().write(oprot); + oprot.writeString(_iter112.getKey()); + _iter112.getValue().write(oprot); } } { oprot.writeI32(struct.roleChanges.size()); - for (Map.Entry _iter105 : struct.roleChanges.entrySet()) + for (Map.Entry _iter113 : struct.roleChanges.entrySet()) { - oprot.writeString(_iter105.getKey()); - _iter105.getValue().write(oprot); + oprot.writeString(_iter113.getKey()); + _iter113.getValue().write(oprot); } } } @@ -776,30 +776,30 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TPermissionsUpdate s struct.seqNum = iprot.readI64(); struct.setSeqNumIsSet(true); { - org.apache.thrift.protocol.TMap _map106 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.privilegeChanges = new HashMap(2*_map106.size); - for (int _i107 = 0; _i107 < _map106.size; ++_i107) + org.apache.thrift.protocol.TMap _map114 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.privilegeChanges = new HashMap(2*_map114.size); + for (int _i115 = 0; _i115 < _map114.size; ++_i115) { - String _key108; // required - TPrivilegeChanges _val109; // required - _key108 = iprot.readString(); - _val109 = new TPrivilegeChanges(); - _val109.read(iprot); - struct.privilegeChanges.put(_key108, _val109); + String _key116; // required + TPrivilegeChanges _val117; // required + _key116 = iprot.readString(); + _val117 = new TPrivilegeChanges(); + _val117.read(iprot); + struct.privilegeChanges.put(_key116, _val117); } } struct.setPrivilegeChangesIsSet(true); { - org.apache.thrift.protocol.TMap _map110 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.roleChanges = new HashMap(2*_map110.size); - for (int _i111 = 0; _i111 < _map110.size; ++_i111) + org.apache.thrift.protocol.TMap _map118 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.roleChanges = new HashMap(2*_map118.size); + for (int _i119 = 0; _i119 < _map118.size; ++_i119) { - String _key112; // required - TRoleChanges _val113; // required - _key112 = iprot.readString(); - _val113 = new TRoleChanges(); - _val113.read(iprot); - struct.roleChanges.put(_key112, _val113); + String _key120; // required + TRoleChanges _val121; // required + _key120 = iprot.readString(); + _val121 = new TRoleChanges(); + _val121.read(iprot); + struct.roleChanges.put(_key120, _val121); } } struct.setRoleChangesIsSet(true); diff --git a/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPrivilegeChanges.java b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPrivilegeChanges.java index 76720b98e..dc4626e2b 100644 --- a/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPrivilegeChanges.java +++ b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPrivilegeChanges.java @@ -558,15 +558,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TPrivilegeChanges s case 2: // ADD_PRIVILEGES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map58 = iprot.readMapBegin(); - struct.addPrivileges = new HashMap(2*_map58.size); - for (int _i59 = 0; _i59 < _map58.size; ++_i59) + org.apache.thrift.protocol.TMap _map66 = iprot.readMapBegin(); + struct.addPrivileges = new HashMap(2*_map66.size); + for (int _i67 = 0; _i67 < _map66.size; ++_i67) { - String _key60; // required - String _val61; // required - _key60 = iprot.readString(); - _val61 = iprot.readString(); - struct.addPrivileges.put(_key60, _val61); + String _key68; // required + String _val69; // required + _key68 = iprot.readString(); + _val69 = iprot.readString(); + struct.addPrivileges.put(_key68, _val69); } iprot.readMapEnd(); } @@ -578,15 +578,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TPrivilegeChanges s case 3: // DEL_PRIVILEGES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map62 = iprot.readMapBegin(); - struct.delPrivileges = new HashMap(2*_map62.size); - for (int _i63 = 0; _i63 < _map62.size; ++_i63) + org.apache.thrift.protocol.TMap _map70 = iprot.readMapBegin(); + struct.delPrivileges = new HashMap(2*_map70.size); + for (int _i71 = 0; _i71 < _map70.size; ++_i71) { - String _key64; // required - String _val65; // required - _key64 = iprot.readString(); - _val65 = iprot.readString(); - struct.delPrivileges.put(_key64, _val65); + String _key72; // required + String _val73; // required + _key72 = iprot.readString(); + _val73 = iprot.readString(); + struct.delPrivileges.put(_key72, _val73); } iprot.readMapEnd(); } @@ -617,10 +617,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TPrivilegeChanges oprot.writeFieldBegin(ADD_PRIVILEGES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.addPrivileges.size())); - for (Map.Entry _iter66 : struct.addPrivileges.entrySet()) + for (Map.Entry _iter74 : struct.addPrivileges.entrySet()) { - oprot.writeString(_iter66.getKey()); - oprot.writeString(_iter66.getValue()); + oprot.writeString(_iter74.getKey()); + oprot.writeString(_iter74.getValue()); } oprot.writeMapEnd(); } @@ -630,10 +630,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TPrivilegeChanges oprot.writeFieldBegin(DEL_PRIVILEGES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.delPrivileges.size())); - for (Map.Entry _iter67 : struct.delPrivileges.entrySet()) + for (Map.Entry _iter75 : struct.delPrivileges.entrySet()) { - oprot.writeString(_iter67.getKey()); - oprot.writeString(_iter67.getValue()); + oprot.writeString(_iter75.getKey()); + oprot.writeString(_iter75.getValue()); } oprot.writeMapEnd(); } @@ -659,18 +659,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TPrivilegeChanges s oprot.writeString(struct.authzObj); { oprot.writeI32(struct.addPrivileges.size()); - for (Map.Entry _iter68 : struct.addPrivileges.entrySet()) + for (Map.Entry _iter76 : struct.addPrivileges.entrySet()) { - oprot.writeString(_iter68.getKey()); - oprot.writeString(_iter68.getValue()); + oprot.writeString(_iter76.getKey()); + oprot.writeString(_iter76.getValue()); } } { oprot.writeI32(struct.delPrivileges.size()); - for (Map.Entry _iter69 : struct.delPrivileges.entrySet()) + for (Map.Entry _iter77 : struct.delPrivileges.entrySet()) { - oprot.writeString(_iter69.getKey()); - oprot.writeString(_iter69.getValue()); + oprot.writeString(_iter77.getKey()); + oprot.writeString(_iter77.getValue()); } } } @@ -681,28 +681,28 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TPrivilegeChanges st struct.authzObj = iprot.readString(); struct.setAuthzObjIsSet(true); { - org.apache.thrift.protocol.TMap _map70 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.addPrivileges = new HashMap(2*_map70.size); - for (int _i71 = 0; _i71 < _map70.size; ++_i71) + org.apache.thrift.protocol.TMap _map78 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.addPrivileges = new HashMap(2*_map78.size); + for (int _i79 = 0; _i79 < _map78.size; ++_i79) { - String _key72; // required - String _val73; // required - _key72 = iprot.readString(); - _val73 = iprot.readString(); - struct.addPrivileges.put(_key72, _val73); + String _key80; // required + String _val81; // required + _key80 = iprot.readString(); + _val81 = iprot.readString(); + struct.addPrivileges.put(_key80, _val81); } } struct.setAddPrivilegesIsSet(true); { - org.apache.thrift.protocol.TMap _map74 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.delPrivileges = new HashMap(2*_map74.size); - for (int _i75 = 0; _i75 < _map74.size; ++_i75) + org.apache.thrift.protocol.TMap _map82 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.delPrivileges = new HashMap(2*_map82.size); + for (int _i83 = 0; _i83 < _map82.size; ++_i83) { - String _key76; // required - String _val77; // required - _key76 = iprot.readString(); - _val77 = iprot.readString(); - struct.delPrivileges.put(_key76, _val77); + String _key84; // required + String _val85; // required + _key84 = iprot.readString(); + _val85 = iprot.readString(); + struct.delPrivileges.put(_key84, _val85); } } struct.setDelPrivilegesIsSet(true); diff --git a/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TRoleChanges.java b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TRoleChanges.java index 87ef02ddd..7920872d2 100644 --- a/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TRoleChanges.java +++ b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TRoleChanges.java @@ -548,13 +548,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TRoleChanges struct case 2: // ADD_GROUPS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list78 = iprot.readListBegin(); - struct.addGroups = new ArrayList(_list78.size); - for (int _i79 = 0; _i79 < _list78.size; ++_i79) + org.apache.thrift.protocol.TList _list86 = iprot.readListBegin(); + struct.addGroups = new ArrayList(_list86.size); + for (int _i87 = 0; _i87 < _list86.size; ++_i87) { - String _elem80; // required - _elem80 = iprot.readString(); - struct.addGroups.add(_elem80); + String _elem88; // required + _elem88 = iprot.readString(); + struct.addGroups.add(_elem88); } iprot.readListEnd(); } @@ -566,13 +566,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TRoleChanges struct case 3: // DEL_GROUPS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list81 = iprot.readListBegin(); - struct.delGroups = new ArrayList(_list81.size); - for (int _i82 = 0; _i82 < _list81.size; ++_i82) + org.apache.thrift.protocol.TList _list89 = iprot.readListBegin(); + struct.delGroups = new ArrayList(_list89.size); + for (int _i90 = 0; _i90 < _list89.size; ++_i90) { - String _elem83; // required - _elem83 = iprot.readString(); - struct.delGroups.add(_elem83); + String _elem91; // required + _elem91 = iprot.readString(); + struct.delGroups.add(_elem91); } iprot.readListEnd(); } @@ -603,9 +603,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TRoleChanges struc oprot.writeFieldBegin(ADD_GROUPS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.addGroups.size())); - for (String _iter84 : struct.addGroups) + for (String _iter92 : struct.addGroups) { - oprot.writeString(_iter84); + oprot.writeString(_iter92); } oprot.writeListEnd(); } @@ -615,9 +615,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TRoleChanges struc oprot.writeFieldBegin(DEL_GROUPS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.delGroups.size())); - for (String _iter85 : struct.delGroups) + for (String _iter93 : struct.delGroups) { - oprot.writeString(_iter85); + oprot.writeString(_iter93); } oprot.writeListEnd(); } @@ -643,16 +643,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TRoleChanges struct oprot.writeString(struct.role); { oprot.writeI32(struct.addGroups.size()); - for (String _iter86 : struct.addGroups) + for (String _iter94 : struct.addGroups) { - oprot.writeString(_iter86); + oprot.writeString(_iter94); } } { oprot.writeI32(struct.delGroups.size()); - for (String _iter87 : struct.delGroups) + for (String _iter95 : struct.delGroups) { - oprot.writeString(_iter87); + oprot.writeString(_iter95); } } } @@ -663,24 +663,24 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TRoleChanges struct) struct.role = iprot.readString(); struct.setRoleIsSet(true); { - org.apache.thrift.protocol.TList _list88 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.addGroups = new ArrayList(_list88.size); - for (int _i89 = 0; _i89 < _list88.size; ++_i89) + org.apache.thrift.protocol.TList _list96 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.addGroups = new ArrayList(_list96.size); + for (int _i97 = 0; _i97 < _list96.size; ++_i97) { - String _elem90; // required - _elem90 = iprot.readString(); - struct.addGroups.add(_elem90); + String _elem98; // required + _elem98 = iprot.readString(); + struct.addGroups.add(_elem98); } } struct.setAddGroupsIsSet(true); { - org.apache.thrift.protocol.TList _list91 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.delGroups = new ArrayList(_list91.size); - for (int _i92 = 0; _i92 < _list91.size; ++_i92) + org.apache.thrift.protocol.TList _list99 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.delGroups = new ArrayList(_list99.size); + for (int _i100 = 0; _i100 < _list99.size; ++_i100) { - String _elem93; // required - _elem93 = iprot.readString(); - struct.delGroups.add(_elem93); + String _elem101; // required + _elem101 = iprot.readString(); + struct.delGroups.add(_elem101); } } struct.setDelGroupsIsSet(true); diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/AuthzPaths.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/AuthzPaths.java index ba16f4ab0..7dda9fb30 100644 --- a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/AuthzPaths.java +++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/AuthzPaths.java @@ -17,6 +17,8 @@ */ package org.apache.sentry.hdfs; +import java.util.Set; + /** * A public interface of the fundamental APIs exposed by the implementing * data structure. The primary client of this interface is the Namenode @@ -25,34 +27,39 @@ public interface AuthzPaths { /** - * Check if a Path belongs to the configured prefix set - * @param pathElements : A path split into segments - * @return Is Path under configured prefix + * Check if a Path belongs to the configured prefix set. + * + * @param pathElements A path split into segments + * @return Returns if Path under configured prefix or not. */ - public boolean isUnderPrefix(String[] pathElements); + boolean isUnderPrefix(String[] pathElements); /** - * Returns the authorizable Object (database/table) associated with this path. - * Unlike {@link #findAuthzObjectExactMatch(String[])}, if not match is - * found, it will return the first ancestor that has an associated - * authorizable object. - * @param pathElements : A path split into segments - * @return A authzObject associated with this path + * Returns all authorizable Objects (database/table/partition) associated + * with this path. Unlike {@link #findAuthzObjectExactMatches(String[])}, + * if not match is found, it will return the first ancestor that has the + * associated authorizable objects. + * + * @param pathElements A path split into segments + * @return Returns a set of authzObjects authzObject associated with this path */ - public String findAuthzObject(String[] pathElements); + Set findAuthzObject(String[] pathElements); /** - * Returns the authorizable Object (database/table) associated with this path. - * @param pathElements : A path split into segments - * @return A authzObject associated with this path + * Returns all authorizable Objects (database/table/partition) associated + * with this path. + * + * @param pathElements A path split into segments + * @return Returns a set of authzObjects associated with this path */ - public String findAuthzObjectExactMatch(String[] pathElements); + Set findAuthzObjectExactMatches(String[] pathElements); /** * Return a Dumper that may return a more optimized over the * wire representation of the internal data-structures. - * @return + * + * @return Returns the AuthzPathsDumper. */ - public AuthzPathsDumper getPathsDump(); + AuthzPathsDumper getPathsDump(); } diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/AuthzPathsDumper.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/AuthzPathsDumper.java index 2bd2a885c..095095710 100644 --- a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/AuthzPathsDumper.java +++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/AuthzPathsDumper.java @@ -21,8 +21,8 @@ public interface AuthzPathsDumper { - public TPathsDump createPathsDump(); + TPathsDump createPathsDump(); - public K initializeFromDump(TPathsDump pathsDump); + K initializeFromDump(TPathsDump pathsDump); } diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/AuthzPermissions.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/AuthzPermissions.java index 1631ae566..b575e81a9 100644 --- a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/AuthzPermissions.java +++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/AuthzPermissions.java @@ -23,6 +23,6 @@ public interface AuthzPermissions { - public List getAcls(String authzObj); + List getAcls(String authzObj); } diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/HMSPaths.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/HMSPaths.java index d52e3617a..ceb1da80f 100644 --- a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/HMSPaths.java +++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/HMSPaths.java @@ -17,19 +17,16 @@ */ package org.apache.sentry.hdfs; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; +import java.util.*; +import com.google.common.base.Joiner; import org.apache.hadoop.fs.Path; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * A non thread-safe implementation of {@link AuthzPaths}. It abstracts over the @@ -39,6 +36,8 @@ */ public class HMSPaths implements AuthzPaths { + private static Logger LOG = LoggerFactory.getLogger(HMSPaths.class); + @VisibleForTesting static List getPathElements(String path) { path = path.trim(); @@ -63,7 +62,7 @@ static List getPathElements(String path) { } @VisibleForTesting - static List> gePathsElements(List paths) { + static List> getPathsElements(List paths) { List> pathsElements = new ArrayList>(paths.size()); for (String path : paths) { pathsElements.add(getPathElements(path)); @@ -110,7 +109,13 @@ static class Entry { private Entry parent; private EntryType type; private String pathElement; - private String authzObj; + + // A set of authorizable objects associated with this entry. Authorizable + // object should be case insensitive. + private Set authzObjs; + + // Path of child element to the path entry mapping. + // e.g. 'b' -> '/a/b' private final Map children; Entry(Entry parent, String pathElement, EntryType type, @@ -118,12 +123,47 @@ static class Entry { this.parent = parent; this.type = type; this.pathElement = pathElement; - this.authzObj = authzObj; + this.authzObjs = new TreeSet(String.CASE_INSENSITIVE_ORDER); + addAuthzObj(authzObj); children = new HashMap(); } - void setAuthzObj(String authzObj) { - this.authzObj = authzObj; + Entry(Entry parent, String pathElement, EntryType type, + Set authzObjs) { + this.parent = parent; + this.type = type; + this.pathElement = pathElement; + this.authzObjs = new TreeSet(String.CASE_INSENSITIVE_ORDER); + addAuthzObjs(authzObjs); + children = new HashMap(); + } + + // Get all the mapping of the children element to + // the path entries. + public Map getChildren() { + return children; + } + + void clearAuthzObjs() { + authzObjs = new HashSet(); + } + + void removeAuthzObj(String authzObj) { + authzObjs.remove(authzObj); + } + + void addAuthzObj(String authzObj) { + if (authzObj != null) { + authzObjs.add(authzObj); + } + } + + void addAuthzObjs(Set authzObjs) { + if (authzObjs != null) { + for (String authObj : authzObjs) { + this.authzObjs.add(authObj); + } + } } private void setType(EntryType type) { @@ -136,42 +176,64 @@ protected void removeParent() { public String toString() { return String.format("Entry[fullPath: %s, type: %s, authObject: %s]", - getFullPath(), type, authzObj); - } - + getFullPath(), type, Joiner.on(",").join(authzObjs)); + } + + /** + * Create a child entry based on the path, type and authzObj that + * associates with it. + * + * @param pathElements a path split into segments. + * @param type the type of the child entry. + * @param authzObj the authorizable Object associates with the entry. + * @return Returns the child entry. + */ private Entry createChild(List pathElements, EntryType type, String authzObj) { + + // Parent entry is the current referring one. Entry entryParent = this; + + // Creates the entry based on the path elements (if not found) until reaches its + // direct parent. for (int i = 0; i < pathElements.size() - 1; i++) { + String pathElement = pathElements.get(i); Entry child = entryParent.getChildren().get(pathElement); + if (child == null) { - child = new Entry(entryParent, pathElement, EntryType.DIR, null); + child = new Entry(entryParent, pathElement, EntryType.DIR, (String) null); entryParent.getChildren().put(pathElement, child); } + entryParent = child; } + String lastPathElement = pathElements.get(pathElements.size() - 1); Entry child = entryParent.getChildren().get(lastPathElement); + + // Create the child entry if not found. If found and the entry is + // already a prefix or authzObj type, then only add the authzObj. + // If the entry already existed as dir, we change it to be a authzObj, + // and add the authzObj. if (child == null) { child = new Entry(entryParent, lastPathElement, type, authzObj); entryParent.getChildren().put(lastPathElement, child); } else if (type == EntryType.AUTHZ_OBJECT && - child.getType() == EntryType.PREFIX) { - // Support for default db in hive (which is usually a prefix dir) - child.setAuthzObj(authzObj); + (child.getType() == EntryType.PREFIX || child.getType() == EntryType.AUTHZ_OBJECT)) { + child.addAuthzObj(authzObj); } else if (type == EntryType.AUTHZ_OBJECT && child.getType() == EntryType.DIR) { - // if the entry already existed as dir, we change it to be a authz obj - child.setAuthzObj(authzObj); + child.addAuthzObj(authzObj); child.setType(EntryType.AUTHZ_OBJECT); } + return child; } public static Entry createRoot(boolean asPrefix) { - return new Entry(null, "/", (asPrefix) - ? EntryType.PREFIX : EntryType.DIR, null); + return new Entry(null, "/", asPrefix + ? EntryType.PREFIX : EntryType.DIR, (String) null); } private String toPath(List arr) { @@ -202,6 +264,32 @@ public Entry createAuthzObjPath(List pathElements, String authzObj) { return entry; } + public void deleteAuthzObject(String authzObj) { + if (getParent() != null) { + if (getChildren().isEmpty()) { + + // Remove the authzObj on the path entry. If the path + // entry no longer maps to any authzObj, removes the + // entry recursively. + authzObjs.remove(authzObj); + if (authzObjs.size() == 0) { + getParent().getChildren().remove(getPathElement()); + getParent().deleteIfDangling(); + parent = null; + } + } else { + + // if the entry was for an authz object and has children, we + // change it to be a dir entry. And remove the authzObj on + // the path entry. + if (getType() == EntryType.AUTHZ_OBJECT) { + setType(EntryType.DIR); + authzObjs.remove(authzObj); + } + } + } + } + public void delete() { if (getParent() != null) { if (getChildren().isEmpty()) { @@ -213,7 +301,7 @@ public void delete() { // change it to be a dir entry. if (getType() == EntryType.AUTHZ_OBJECT) { setType(EntryType.DIR); - setAuthzObj(null); + clearAuthzObjs(); } } } @@ -237,14 +325,11 @@ public String getPathElement() { return pathElement; } - public String getAuthzObj() { - return authzObj; + public Set getAuthzObjs() { + return authzObjs; } - @SuppressWarnings("unchecked") - public Map getChildren() { - return children; - } + public Entry findPrefixEntry(List pathElements) { Preconditions.checkArgument(pathElements != null, @@ -281,17 +366,17 @@ private Entry find(String[] pathElements, int index, boolean isPartialMatchOk, Entry lastAuthObj) { Entry found = null; if (index == pathElements.length) { - if (isPartialMatchOk && (getAuthzObj() != null)) { + if (isPartialMatchOk && getAuthzObjs().size() != 0) { found = this; } } else { Entry child = getChildren().get(pathElements[index]); if (child != null) { if (index == pathElements.length - 1) { - found = (child.getAuthzObj() != null) ? child : lastAuthObj; + found = (child.getAuthzObjs().size() != 0) ? child : lastAuthObj; } else { found = child.find(pathElements, index + 1, isPartialMatchOk, - (child.getAuthzObj() != null) ? child : lastAuthObj); + (child.getAuthzObjs().size() != 0) ? child : lastAuthObj); } } else { if (isPartialMatchOk) { @@ -322,11 +407,15 @@ private StringBuilder getFullPath(Entry entry, StringBuilder sb) { private volatile Entry root; private String[] prefixes; + + // The hive authorized objects to path entries mapping. + // One authorized object can map to a set of path entries. private Map> authzObjToPath; public HMSPaths(String[] pathPrefixes) { boolean rootPrefix = false; - this.prefixes = pathPrefixes; + // Copy the array to avoid external modification + this.prefixes = Arrays.copyOf(pathPrefixes, pathPrefixes.length); for (String pathPrefix : pathPrefixes) { rootPrefix = rootPrefix || pathPrefix.equals(Path.SEPARATOR); } @@ -340,11 +429,12 @@ public HMSPaths(String[] pathPrefixes) { root.createPrefix(getPathElements(pathPrefix)); } } - authzObjToPath = new HashMap>(); + + authzObjToPath = new TreeMap>(String.CASE_INSENSITIVE_ORDER); } void _addAuthzObject(String authzObj, List authzObjPaths) { - addAuthzObject(authzObj, gePathsElements(authzObjPaths)); + addAuthzObject(authzObj, getPathsElements(authzObjPaths)); } void addAuthzObject(String authzObj, List> authzObjPathElements) { @@ -355,7 +445,7 @@ void addAuthzObject(String authzObj, List> authzObjPathElements) { if (e != null) { newEntries.add(e); } else { - // LOG WARN IGNORING PATH, no prefix + LOG.warn("Ignoring path, no prefix"); } } authzObjToPath.put(authzObj, newEntries); @@ -363,7 +453,7 @@ void addAuthzObject(String authzObj, List> authzObjPathElements) { previousEntries.removeAll(newEntries); if (!previousEntries.isEmpty()) { for (Entry entry : previousEntries) { - entry.delete(); + entry.deleteAuthzObject(authzObj); } } } @@ -379,7 +469,7 @@ void addPathsToAuthzObject(String authzObj, if (e != null) { newEntries.add(e); } else { - // LOG WARN IGNORING PATH, no prefix + LOG.warn("Ignoring path, no prefix"); } } entries.addAll(newEntries); @@ -387,12 +477,12 @@ void addPathsToAuthzObject(String authzObj, if (createNew) { addAuthzObject(authzObj, authzObjPathElements); } - // LOG WARN object does not exist + LOG.warn("Object does not exist"); } } void _addPathsToAuthzObject(String authzObj, List authzObjPaths) { - addPathsToAuthzObject(authzObj, gePathsElements(authzObjPaths), false); + addPathsToAuthzObject(authzObj, getPathsElements(authzObjPaths), false); } void addPathsToAuthzObject(String authzObj, List> authzObjPaths) { @@ -408,15 +498,15 @@ void deletePathsFromAuthzObject(String authzObj, Entry entry = root.find( pathElements.toArray(new String[pathElements.size()]), false); if (entry != null) { - entry.delete(); + entry.deleteAuthzObject(authzObj); toDelEntries.add(entry); } else { - // LOG WARN IGNORING PATH, it was not in registered + LOG.warn("Ignoring path, it was not registered"); } } entries.removeAll(toDelEntries); } else { - // LOG WARN object does not exist + LOG.warn("Object does not exist"); } } @@ -424,39 +514,49 @@ void deleteAuthzObject(String authzObj) { Set entries = authzObjToPath.remove(authzObj); if (entries != null) { for (Entry entry : entries) { - entry.delete(); + entry.deleteAuthzObject(authzObj); } } } @Override - public String findAuthzObject(String[] pathElements) { + public Set findAuthzObject(String[] pathElements) { return findAuthzObject(pathElements, true); } @Override - public String findAuthzObjectExactMatch(String[] pathElements) { + public Set findAuthzObjectExactMatches(String[] pathElements) { return findAuthzObject(pathElements, false); } - public String findAuthzObject(String[] pathElements, boolean isPartialOk) { + /** + * Based on the isPartialOk flag, returns all authorizable Objects + * (database/table/partition) associated with the path, or if no match + * is found returns the first ancestor that has the associated + * authorizable objects. + * + * @param pathElements A path split into segments. + * @param isPartialOk Flag that indicates if patial path match is Ok or not. + * @return Returns a set of authzObjects authzObject associated with this path. + */ + public Set findAuthzObject(String[] pathElements, boolean isPartialOk) { // Handle '/' - if ((pathElements == null)||(pathElements.length == 0)) return null; - String authzObj = null; - Entry entry = root.find(pathElements, isPartialOk); - if (entry != null) { - authzObj = entry.getAuthzObj(); + if (pathElements == null || pathElements.length == 0) { + return null; } - return authzObj; + Entry entry = root.find(pathElements, isPartialOk); + return (entry != null) ? entry.getAuthzObjs() : null; } boolean renameAuthzObject(String oldName, List oldPathElems, String newName, List newPathElems) { // Handle '/' - if ((oldPathElems == null)||(oldPathElems.size() == 0)) return false; + if (oldPathElems == null || oldPathElems.size() == 0) { + return false; + } Entry entry = root.find(oldPathElems.toArray(new String[oldPathElems.size()]), false); - if ((entry != null)&&(entry.getAuthzObj().equals(oldName))) { + if (entry != null && entry.getAuthzObjs().contains(oldName)) { // Update pathElements String[] newPath = newPathElems.toArray(new String[newPathElems.size()]); // Can't use Lists.newArrayList() because of whacky generics @@ -474,8 +574,9 @@ boolean renameAuthzObject(String oldName, List oldPathElems, Set eSet = authzObjToPath.get(oldName); authzObjToPath.put(newName, eSet); for (Entry e : eSet) { - if (e.getAuthzObj().equals(oldName)) { - e.setAuthzObj(newName); + if (e.getAuthzObjs().contains(oldName)) { + e.removeAuthzObj(oldName); + e.addAuthzObj(newName); } } authzObjToPath.remove(oldName); diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/HMSPathsDumper.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/HMSPathsDumper.java index 8f7bb0f61..3203ecd00 100644 --- a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/HMSPathsDumper.java +++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/HMSPathsDumper.java @@ -70,8 +70,8 @@ private Tuple createTPathEntry(Entry entry, AtomicInteger idCounter, int myId = idCounter.incrementAndGet(); TPathEntry tEntry = new TPathEntry(entry.getType().getByte(), entry.getPathElement(), new HashSet()); - if (entry.getAuthzObj() != null) { - tEntry.setAuthzObj(entry.getAuthzObj()); + if (entry.getAuthzObjs().size() != 0) { + tEntry.setAuthzObjs(entry.getAuthzObjs()); } idMap.put(myId, tEntry); return new Tuple(tEntry, myId); @@ -82,13 +82,12 @@ public HMSPaths initializeFromDump(TPathsDump pathDump) { HMSPaths hmsPaths = new HMSPaths(this.hmsPaths.getPrefixes()); TPathEntry tRootEntry = pathDump.getNodeMap().get(pathDump.getRootId()); Entry rootEntry = hmsPaths.getRootEntry(); -// Entry rootEntry = new Entry(null, tRootEntry.getPathElement(), -// EntryType.fromByte(tRootEntry.getType()), tRootEntry.getAuthzObj()); Map> authzObjToPath = new HashMap>(); cloneToEntry(tRootEntry, rootEntry, pathDump.getNodeMap(), authzObjToPath, rootEntry.getType() == EntryType.PREFIX); hmsPaths.setRootEntry(rootEntry); hmsPaths.setAuthzObjToPathMapping(authzObjToPath); + return hmsPaths; } @@ -103,25 +102,28 @@ private void cloneToEntry(TPathEntry tParent, Entry parent, child = parent.getChildren().get(tChild.getPathElement()); // If we havn't reached a prefix entry yet, then child should // already exists.. else it is not part of the prefix - if (child == null) continue; + if (child == null) { + continue; + } isChildPrefix = child.getType() == EntryType.PREFIX; // Handle case when prefix entry has an authzObject // For Eg (default table mapped to /user/hive/warehouse) if (isChildPrefix) { - child.setAuthzObj(tChild.getAuthzObj()); + child.addAuthzObjs(tChild.getAuthzObjs()); } } if (child == null) { child = new Entry(parent, tChild.getPathElement(), - EntryType.fromByte(tChild.getType()), tChild.getAuthzObj()); + EntryType.fromByte(tChild.getType()), tChild.getAuthzObjs()); } - if (child.getAuthzObj() != null) { - Set paths = authzObjToPath.get(child.getAuthzObj()); - if (paths == null) { - paths = new HashSet(); - authzObjToPath.put(child.getAuthzObj(), paths); + if (child.getAuthzObjs().size() != 0) { + for (String authzObj: child.getAuthzObjs()) { + Set paths = authzObjToPath.get(authzObj); + if (paths == null) { + paths = new HashSet(); + } + paths.add(child); } - paths.add(child); } parent.getChildren().put(child.getPathElement(), child); cloneToEntry(tChild, child, idMap, authzObjToPath, isChildPrefix); diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/PathsUpdate.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/PathsUpdate.java index 7cb20ef7e..a091f7129 100644 --- a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/PathsUpdate.java +++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/PathsUpdate.java @@ -23,14 +23,20 @@ import java.util.LinkedList; import java.util.List; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; - import org.apache.sentry.hdfs.service.thrift.TPathChanges; import org.apache.sentry.hdfs.service.thrift.TPathsUpdate; import org.apache.commons.httpclient.util.URIUtil; import org.apache.commons.httpclient.URIException; +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.conf.Configuration; import com.google.common.collect.Lists; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * A wrapper class over the TPathsUpdate thrift generated class. Please see @@ -38,8 +44,10 @@ */ public class PathsUpdate implements Updateable.Update { - public static String ALL_PATHS = "__ALL_PATHS__"; + private static final Logger LOGGER = LoggerFactory.getLogger(PathsUpdate.class); + public static String ALL_PATHS = "__ALL_PATHS__"; + private static final Configuration CONF = new Configuration(); private final TPathsUpdate tPathsUpdate; public PathsUpdate() { @@ -59,12 +67,15 @@ public PathsUpdate(long seqNum, boolean hasFullImage) { public boolean hasFullImage() { return tPathsUpdate.isHasFullImage(); } + public TPathChanges newPathChange(String authzObject) { + TPathChanges pathChanges = new TPathChanges(authzObject, new LinkedList>(), new LinkedList>()); tPathsUpdate.addToPathChanges(pathChanges); return pathChanges; } + public List getPathChanges() { return tPathsUpdate.getPathChanges(); } @@ -83,21 +94,44 @@ public TPathsUpdate toThrift() { return tPathsUpdate; } + @VisibleForTesting + public static Configuration getConfiguration() { + return CONF; + } /** * * @param path : Needs to be a HDFS location with scheme * @return Path in the form a list containing the path tree with scheme/ authority stripped off. - * Returns null if a non HDFS path + * Returns null if a non HDFS path or if path is null/empty */ public static List parsePath(String path) { try { - URI uri = new URI(URIUtil.encodePath(path)); - Preconditions.checkNotNull(uri.getScheme()); - if(uri.getScheme().equalsIgnoreCase("hdfs")) { + LOGGER.debug("Parsing path " + path); + URI uri = null; + if (StringUtils.isNotEmpty(path)) { + uri = new URI(URIUtil.encodePath(path)); + } else { + return null; + } + + String scheme = uri.getScheme(); + if (scheme == null) { + // Use the default URI scheme only if the paths has no scheme. + URI defaultUri = FileSystem.getDefaultUri(CONF); + scheme = defaultUri.getScheme(); + } + + // The paths without a scheme will be default to default scheme. + Preconditions.checkNotNull(scheme); + + // Non-HDFS paths will be skipped. + if(scheme.equalsIgnoreCase("hdfs")) { + return Lists.newArrayList(uri.getPath().split("^/")[1] .split("/")); } else { + LOGGER.warn("Invalid FS: " + scheme + "://; expected hdfs://"); return null; } } catch (URISyntaxException e) { diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/MetastoreClient.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryAuthzUpdate.java similarity index 65% rename from sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/MetastoreClient.java rename to sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryAuthzUpdate.java index 3ecff94c7..4cf439b5a 100644 --- a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/MetastoreClient.java +++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryAuthzUpdate.java @@ -19,20 +19,23 @@ import java.util.List; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.Table; +public class SentryAuthzUpdate { -/** - * Interface to abstract all interactions between Sentry and Hive Metastore - * - */ -public interface MetastoreClient { + private final List permUpdates; + private final List pathUpdates; - public List getAllDatabases(); + public SentryAuthzUpdate(List permUpdates, + List pathUpdates) { + this.permUpdates = permUpdates; + this.pathUpdates = pathUpdates; + } - public List
getAllTablesOfDatabase(Database db); + public List getPermUpdates() { + return permUpdates; + } - public List listAllPartitions(Database db, Table tbl); + public List getPathUpdates() { + return pathUpdates; + } } diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java index 5425daa68..ab12bf402 100644 --- a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java +++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java @@ -17,213 +17,17 @@ */ package org.apache.sentry.hdfs; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.security.PrivilegedExceptionAction; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; +public interface SentryHDFSServiceClient { + String SENTRY_HDFS_SERVICE_NAME = "SentryHDFSService"; -import javax.security.auth.callback.CallbackHandler; + void notifyHMSUpdate(PathsUpdate update) + throws SentryHdfsServiceException; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.SaslRpcServer; -import org.apache.hadoop.security.SaslRpcServer.AuthMethod; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.sentry.hdfs.service.thrift.SentryHDFSService; -import org.apache.sentry.hdfs.service.thrift.SentryHDFSService.Client; -import org.apache.sentry.hdfs.service.thrift.TAuthzUpdateResponse; -import org.apache.sentry.hdfs.service.thrift.TPathsUpdate; -import org.apache.sentry.hdfs.service.thrift.TPermissionsUpdate; -import org.apache.sentry.hdfs.ServiceConstants.ClientConfig; -import org.apache.sentry.hdfs.ServiceConstants.ServerConfig; -import org.apache.thrift.protocol.TBinaryProtocol; -//import org.apache.thrift.protocol.TCompactProtocol; -import org.apache.thrift.protocol.TMultiplexedProtocol; -import org.apache.thrift.protocol.TProtocol; -import org.apache.thrift.transport.TSaslClientTransport; -import org.apache.thrift.transport.TSocket; -import org.apache.thrift.transport.TTransport; -import org.apache.thrift.transport.TTransportException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + long getLastSeenHMSPathSeqNum() throws SentryHdfsServiceException; -import com.google.common.base.Preconditions; + SentryAuthzUpdate getAllUpdatesFrom(long permSeqNum, long pathSeqNum) + throws SentryHdfsServiceException; -public class SentryHDFSServiceClient { - - private static final Logger LOGGER = LoggerFactory.getLogger(SentryHDFSServiceClient.class); - - public static final String SENTRY_HDFS_SERVICE_NAME = "SentryHDFSService"; - - public static class SentryAuthzUpdate { - - private final List permUpdates; - private final List pathUpdates; - - public SentryAuthzUpdate(List permUpdates, List pathUpdates) { - this.permUpdates = permUpdates; - this.pathUpdates = pathUpdates; - } - - public List getPermUpdates() { - return permUpdates; - } - - public List getPathUpdates() { - return pathUpdates; - } - } - - /** - * This transport wraps the Sasl transports to set up the right UGI context for open(). - */ - public static class UgiSaslClientTransport extends TSaslClientTransport { - protected UserGroupInformation ugi = null; - - public UgiSaslClientTransport(String mechanism, String authorizationId, - String protocol, String serverName, Map props, - CallbackHandler cbh, TTransport transport, boolean wrapUgi) - throws IOException { - super(mechanism, authorizationId, protocol, serverName, props, cbh, - transport); - if (wrapUgi) { - ugi = UserGroupInformation.getLoginUser(); - } - } - - // open the SASL transport with using the current UserGroupInformation - // This is needed to get the current login context stored - @Override - public void open() throws TTransportException { - if (ugi == null) { - baseOpen(); - } else { - try { - ugi.doAs(new PrivilegedExceptionAction() { - public Void run() throws TTransportException { - baseOpen(); - return null; - } - }); - } catch (IOException e) { - throw new TTransportException("Failed to open SASL transport", e); - } catch (InterruptedException e) { - throw new TTransportException( - "Interrupted while opening underlying transport", e); - } - } - } - - private void baseOpen() throws TTransportException { - super.open(); - } - } - - private final Configuration conf; - private final InetSocketAddress serverAddress; - private final int connectionTimeout; - private boolean kerberos; - private TTransport transport; - - private String[] serverPrincipalParts; - private Client client; - - public SentryHDFSServiceClient(Configuration conf) throws IOException { - this.conf = conf; - Preconditions.checkNotNull(this.conf, "Configuration object cannot be null"); - this.serverAddress = NetUtils.createSocketAddr(Preconditions.checkNotNull( - conf.get(ClientConfig.SERVER_RPC_ADDRESS), "Config key " - + ClientConfig.SERVER_RPC_ADDRESS + " is required"), conf.getInt( - ClientConfig.SERVER_RPC_PORT, ClientConfig.SERVER_RPC_PORT_DEFAULT)); - this.connectionTimeout = conf.getInt(ClientConfig.SERVER_RPC_CONN_TIMEOUT, - ClientConfig.SERVER_RPC_CONN_TIMEOUT_DEFAULT); - kerberos = ClientConfig.SECURITY_MODE_KERBEROS.equalsIgnoreCase( - conf.get(ClientConfig.SECURITY_MODE, ClientConfig.SECURITY_MODE_KERBEROS).trim()); - transport = new TSocket(serverAddress.getHostName(), - serverAddress.getPort(), connectionTimeout); - if (kerberos) { - String serverPrincipal = Preconditions.checkNotNull( - conf.get(ClientConfig.PRINCIPAL), ClientConfig.PRINCIPAL + " is required"); - - // Resolve server host in the same way as we are doing on server side - serverPrincipal = SecurityUtil.getServerPrincipal(serverPrincipal, serverAddress.getAddress()); - LOGGER.info("Using server kerberos principal: " + serverPrincipal); - - serverPrincipalParts = SaslRpcServer.splitKerberosName(serverPrincipal); - Preconditions.checkArgument(serverPrincipalParts.length == 3, - "Kerberos principal should have 3 parts: " + serverPrincipal); - boolean wrapUgi = "true".equalsIgnoreCase(conf - .get(ClientConfig.SECURITY_USE_UGI_TRANSPORT, "true")); - transport = new UgiSaslClientTransport(AuthMethod.KERBEROS.getMechanismName(), - null, serverPrincipalParts[0], serverPrincipalParts[1], - ClientConfig.SASL_PROPERTIES, null, transport, wrapUgi); - } else { - serverPrincipalParts = null; - } - try { - transport.open(); - } catch (TTransportException e) { - throw new IOException("Transport exception while opening transport: " + e.getMessage(), e); - } - LOGGER.info("Successfully opened transport: " + transport + " to " + serverAddress); - TProtocol tProtocol = new TBinaryProtocol(transport); -// if (conf.getBoolean(ClientConfig.USE_COMPACT_TRANSPORT, -// ClientConfig.USE_COMPACT_TRANSPORT_DEFAULT)) { -// tProtocol = new TCompactProtocol(transport); -// } else { -// tProtocol = new TBinaryProtocol(transport); -// } - TMultiplexedProtocol protocol = new TMultiplexedProtocol( - tProtocol, SentryHDFSServiceClient.SENTRY_HDFS_SERVICE_NAME); - client = new SentryHDFSService.Client(protocol); - LOGGER.info("Successfully created client"); - } - - public synchronized void notifyHMSUpdate(PathsUpdate update) - throws IOException { - try { - client.handle_hms_notification(update.toThrift()); - } catch (Exception e) { - throw new IOException("Thrift Exception occurred !!", e); - } - } - - public synchronized long getLastSeenHMSPathSeqNum() - throws IOException { - try { - return client.check_hms_seq_num(-1); - } catch (Exception e) { - throw new IOException("Thrift Exception occurred !!", e); - } - } - - public synchronized SentryAuthzUpdate getAllUpdatesFrom(long permSeqNum, long pathSeqNum) - throws IOException { - SentryAuthzUpdate retVal = new SentryAuthzUpdate(new LinkedList(), new LinkedList()); - try { - TAuthzUpdateResponse sentryUpdates = client.get_all_authz_updates_from(permSeqNum, pathSeqNum); - if (sentryUpdates.getAuthzPathUpdate() != null) { - for (TPathsUpdate pathsUpdate : sentryUpdates.getAuthzPathUpdate()) { - retVal.getPathUpdates().add(new PathsUpdate(pathsUpdate)); - } - } - if (sentryUpdates.getAuthzPermUpdate() != null) { - for (TPermissionsUpdate permsUpdate : sentryUpdates.getAuthzPermUpdate()) { - retVal.getPermUpdates().add(new PermissionsUpdate(permsUpdate)); - } - } - } catch (Exception e) { - throw new IOException("Thrift Exception occurred !!", e); - } - return retVal; - } - - public void close() { - if (transport != null) { - transport.close(); - } - } + void close(); } + diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClientDefaultImpl.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClientDefaultImpl.java new file mode 100644 index 000000000..03bf39e1b --- /dev/null +++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClientDefaultImpl.java @@ -0,0 +1,215 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.hdfs; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.security.PrivilegedExceptionAction; +import java.util.LinkedList; +import java.util.Map; + +import javax.security.auth.callback.CallbackHandler; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.SaslRpcServer; +import org.apache.hadoop.security.SaslRpcServer.AuthMethod; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.sentry.hdfs.service.thrift.SentryHDFSService; +import org.apache.sentry.hdfs.service.thrift.SentryHDFSService.Client; +import org.apache.sentry.hdfs.service.thrift.TAuthzUpdateResponse; +import org.apache.sentry.hdfs.service.thrift.TPathsUpdate; +import org.apache.sentry.hdfs.service.thrift.TPermissionsUpdate; +import org.apache.sentry.hdfs.ServiceConstants.ClientConfig; +import org.apache.thrift.protocol.TBinaryProtocol; +import org.apache.thrift.protocol.TCompactProtocol; +import org.apache.thrift.protocol.TMultiplexedProtocol; +import org.apache.thrift.protocol.TProtocol; +import org.apache.thrift.transport.TSaslClientTransport; +import org.apache.thrift.transport.TSocket; +import org.apache.thrift.transport.TTransport; +import org.apache.thrift.transport.TTransportException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Preconditions; + +public class SentryHDFSServiceClientDefaultImpl implements SentryHDFSServiceClient { + + private static final Logger LOGGER = LoggerFactory.getLogger(SentryHDFSServiceClientDefaultImpl.class); + + /** + * This transport wraps the Sasl transports to set up the right UGI context for open(). + */ + public static class UgiSaslClientTransport extends TSaslClientTransport { + protected UserGroupInformation ugi = null; + + public UgiSaslClientTransport(String mechanism, String authorizationId, + String protocol, String serverName, Map props, + CallbackHandler cbh, TTransport transport, boolean wrapUgi) + throws IOException { + super(mechanism, authorizationId, protocol, serverName, props, cbh, + transport); + if (wrapUgi) { + ugi = UserGroupInformation.getLoginUser(); + } + } + + // open the SASL transport with using the current UserGroupInformation + // This is needed to get the current login context stored + @Override + public void open() throws TTransportException { + if (ugi == null) { + baseOpen(); + } else { + try { + // ensure that the ticket is valid before connecting to service. Note that + // checkTGTAndReloginFromKeytab() renew the ticket only when more than 80% + // of ticket lifetime has passed. + if (ugi.isFromKeytab()) { + ugi.checkTGTAndReloginFromKeytab(); + } + + ugi.doAs(new PrivilegedExceptionAction() { + public Void run() throws TTransportException { + baseOpen(); + return null; + } + }); + } catch (IOException e) { + throw new TTransportException("Failed to open SASL transport", e); + } catch (InterruptedException e) { + throw new TTransportException( + "Interrupted while opening underlying transport", e); + } + } + } + + private void baseOpen() throws TTransportException { + super.open(); + } + } + + private final Configuration conf; + private final InetSocketAddress serverAddress; + private final int connectionTimeout; + private boolean kerberos; + private TTransport transport; + + private String[] serverPrincipalParts; + private Client client; + + public SentryHDFSServiceClientDefaultImpl(Configuration conf) throws IOException { + this.conf = conf; + Preconditions.checkNotNull(this.conf, "Configuration object cannot be null"); + this.serverAddress = NetUtils.createSocketAddr(Preconditions.checkNotNull( + conf.get(ClientConfig.SERVER_RPC_ADDRESS), "Config key " + + ClientConfig.SERVER_RPC_ADDRESS + " is required"), conf.getInt( + ClientConfig.SERVER_RPC_PORT, ClientConfig.SERVER_RPC_PORT_DEFAULT)); + this.connectionTimeout = conf.getInt(ClientConfig.SERVER_RPC_CONN_TIMEOUT, + ClientConfig.SERVER_RPC_CONN_TIMEOUT_DEFAULT); + kerberos = ClientConfig.SECURITY_MODE_KERBEROS.equalsIgnoreCase( + conf.get(ClientConfig.SECURITY_MODE, ClientConfig.SECURITY_MODE_KERBEROS).trim()); + transport = new TSocket(serverAddress.getHostName(), + serverAddress.getPort(), connectionTimeout); + if (kerberos) { + String serverPrincipal = Preconditions.checkNotNull( + conf.get(ClientConfig.PRINCIPAL), ClientConfig.PRINCIPAL + " is required"); + + // Resolve server host in the same way as we are doing on server side + serverPrincipal = SecurityUtil.getServerPrincipal(serverPrincipal, serverAddress.getAddress()); + LOGGER.info("Using server kerberos principal: " + serverPrincipal); + + serverPrincipalParts = SaslRpcServer.splitKerberosName(serverPrincipal); + Preconditions.checkArgument(serverPrincipalParts.length == 3, + "Kerberos principal should have 3 parts: " + serverPrincipal); + boolean wrapUgi = "true".equalsIgnoreCase(conf + .get(ClientConfig.SECURITY_USE_UGI_TRANSPORT, "true")); + transport = new UgiSaslClientTransport(AuthMethod.KERBEROS.getMechanismName(), + null, serverPrincipalParts[0], serverPrincipalParts[1], + ClientConfig.SASL_PROPERTIES, null, transport, wrapUgi); + } else { + serverPrincipalParts = null; + } + try { + transport.open(); + } catch (TTransportException e) { + throw new IOException("Transport exception while opening transport: " + e.getMessage(), e); + } + LOGGER.info("Successfully opened transport: " + transport + " to " + serverAddress); + TProtocol tProtocol = null; + long maxMessageSize = conf.getLong(ServiceConstants.ClientConfig.SENTRY_HDFS_THRIFT_MAX_MESSAGE_SIZE, + ServiceConstants.ClientConfig.SENTRY_HDFS_THRIFT_MAX_MESSAGE_SIZE_DEFAULT); + if (conf.getBoolean(ClientConfig.USE_COMPACT_TRANSPORT, + ClientConfig.USE_COMPACT_TRANSPORT_DEFAULT)) { + tProtocol = new TCompactProtocol(transport, maxMessageSize, maxMessageSize); + } else { + tProtocol = new TBinaryProtocol(transport, maxMessageSize, maxMessageSize, true, true); + } + TMultiplexedProtocol protocol = new TMultiplexedProtocol( + tProtocol, SentryHDFSServiceClient.SENTRY_HDFS_SERVICE_NAME); + client = new SentryHDFSService.Client(protocol); + LOGGER.info("Successfully created client"); + } + + public synchronized void notifyHMSUpdate(PathsUpdate update) + throws SentryHdfsServiceException { + try { + client.handle_hms_notification(update.toThrift()); + } catch (Exception e) { + throw new SentryHdfsServiceException("Thrift Exception occurred !!", e); + } + } + + public synchronized long getLastSeenHMSPathSeqNum() + throws SentryHdfsServiceException { + try { + return client.check_hms_seq_num(-1); + } catch (Exception e) { + throw new SentryHdfsServiceException("Thrift Exception occurred !!", e); + } + } + + public synchronized SentryAuthzUpdate getAllUpdatesFrom(long permSeqNum, long pathSeqNum) + throws SentryHdfsServiceException { + SentryAuthzUpdate retVal = new SentryAuthzUpdate(new LinkedList(), new LinkedList()); + try { + TAuthzUpdateResponse sentryUpdates = client.get_all_authz_updates_from(permSeqNum, pathSeqNum); + if (sentryUpdates.getAuthzPathUpdate() != null) { + for (TPathsUpdate pathsUpdate : sentryUpdates.getAuthzPathUpdate()) { + retVal.getPathUpdates().add(new PathsUpdate(pathsUpdate)); + } + } + if (sentryUpdates.getAuthzPermUpdate() != null) { + for (TPermissionsUpdate permsUpdate : sentryUpdates.getAuthzPermUpdate()) { + retVal.getPermUpdates().add(new PermissionsUpdate(permsUpdate)); + } + } + } catch (Exception e) { + throw new SentryHdfsServiceException("Thrift Exception occurred !!", e); + } + return retVal; + } + + public void close() { + if (transport != null) { + transport.close(); + } + } +} diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClientFactory.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClientFactory.java new file mode 100644 index 000000000..58aa10d70 --- /dev/null +++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClientFactory.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.hdfs; + +import java.lang.reflect.Proxy; + +import org.apache.hadoop.conf.Configuration; +import org.apache.sentry.hdfs.ha.HdfsHAClientInvocationHandler; +import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; + +/** + * Client factory to create normal client or proxy with HA invocation handler + */ +public class SentryHDFSServiceClientFactory { + public static SentryHDFSServiceClient create(Configuration conf) + throws Exception { + boolean haEnabled = conf.getBoolean(ServerConfig.SENTRY_HA_ENABLED, false); + if (haEnabled) { + return (SentryHDFSServiceClient) Proxy.newProxyInstance( + SentryHDFSServiceClientDefaultImpl.class.getClassLoader(), + SentryHDFSServiceClientDefaultImpl.class.getInterfaces(), + new HdfsHAClientInvocationHandler(conf)); + } else { + return new SentryHDFSServiceClientDefaultImpl(conf); + } + } + +} diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHdfsServiceException.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHdfsServiceException.java new file mode 100644 index 000000000..307d8c317 --- /dev/null +++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHdfsServiceException.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.hdfs; + +public class SentryHdfsServiceException extends RuntimeException { + private static final long serialVersionUID = 1511645864949767378L; + + public SentryHdfsServiceException(String message, Throwable cause) { + super(message, cause); + } + + public SentryHdfsServiceException(String message) { + super(message); + } + + +} diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ServiceConstants.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ServiceConstants.java index 516f77368..1fdf4181f 100644 --- a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ServiceConstants.java +++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ServiceConstants.java @@ -49,8 +49,23 @@ public static class ServerConfig { public static final int SENTRY_HDFS_SYNC_CHECKER_PERIOD_DEFAULT = 1000; public static final String SENTRY_HDFS_HA_ZOOKEEPER_NAMESPACE = "sentry.hdfs.ha.zookeeper.namespace"; public static final String SENTRY_HDFS_HA_ZOOKEEPER_NAMESPACE_DEFAULT = "/sentry_hdfs"; - public static final String SENTRY_METASTORE_HA_ZOOKEEPER_NAMESPACE = "sentry.hdfs.ha.zookeeper.namespace"; + public static final String SENTRY_METASTORE_HA_ZOOKEEPER_NAMESPACE = "sentry.metastore.ha.zookeeper.namespace"; public static final String SENTRY_METASTORE_HA_ZOOKEEPER_NAMESPACE_DEFAULT = "/sentry_metastore"; + public static final String SENTRY_HDFS_SYNC_METASTORE_CACHE_INIT_THREADS = "sentry.hdfs.sync.metastore.cache.init.threads"; + public static final int SENTRY_HDFS_SYNC_METASTORE_CACHE_INIT_THREADS_DEFAULT = 10; + public static final String SENTRY_HDFS_SYNC_METASTORE_CACHE_RETRY_MAX_NUM = "sentry.hdfs.sync.metastore.cache.retry.max.num"; + public static final int SENTRY_HDFS_SYNC_METASTORE_CACHE_RETRY_MAX_NUM_DEFAULT = 1; + public static final String SENTRY_HDFS_SYNC_METASTORE_CACHE_RETRY_WAIT_DURAION_IN_MILLIS = "sentry.hdfs.sync.metastore.cache.retry.wait.duration.millis"; + public static final int SENTRY_HDFS_SYNC_METASTORE_CACHE_RETRY_WAIT_DURAION_IN_MILLIS_DEFAULT = 1000; + public static final String SENTRY_HDFS_SYNC_METASTORE_CACHE_FAIL_ON_PARTIAL_UPDATE = "sentry.hdfs.sync.metastore.cache.fail.on.partial.update"; + public static final boolean SENTRY_HDFS_SYNC_METASTORE_CACHE_FAIL_ON_PARTIAL_UPDATE_DEFAULT = true; + public static final String SENTRY_HDFS_SYNC_METASTORE_CACHE_ASYNC_INIT_ENABLE = "sentry.hdfs.sync.metastore.cache.async-init.enable"; + public static final boolean SENTRY_HDFS_SYNC_METASTORE_CACHE_ASYNC_INIT_ENABLE_DEFAULT = false; + + public static String SENTRY_HDFS_SYNC_METASTORE_CACHE_MAX_PART_PER_RPC = "sentry.hdfs.sync.metastore.cache.max-partitions-per-rpc"; + public static int SENTRY_HDFS_SYNC_METASTORE_CACHE_MAX_PART_PER_RPC_DEFAULT = 100; + public static String SENTRY_HDFS_SYNC_METASTORE_CACHE_MAX_TABLES_PER_RPC = "sentry.hdfs.sync.metastore.cache.max-tables-per-rpc"; + public static int SENTRY_HDFS_SYNC_METASTORE_CACHE_MAX_TABLES_PER_RPC_DEFAULT = 100; } public static class ClientConfig { @@ -71,6 +86,10 @@ public static class ClientConfig { public static final int SERVER_RPC_CONN_TIMEOUT_DEFAULT = 200000; public static final String USE_COMPACT_TRANSPORT = "sentry.hdfs.service.client.compact.transport"; public static final boolean USE_COMPACT_TRANSPORT_DEFAULT = false; + + // max message size for thrift messages + public static String SENTRY_HDFS_THRIFT_MAX_MESSAGE_SIZE = "sentry.hdfs.thrift.max.message.size"; + public static long SENTRY_HDFS_THRIFT_MAX_MESSAGE_SIZE_DEFAULT = 100 * 1024 * 1024; } } diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ThriftSerializer.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ThriftSerializer.java index b5857735c..782367a70 100644 --- a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ThriftSerializer.java +++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ThriftSerializer.java @@ -19,18 +19,24 @@ import java.io.IOException; +import com.google.common.annotations.VisibleForTesting; import org.apache.thrift.TBase; import org.apache.thrift.TDeserializer; import org.apache.thrift.TException; import org.apache.thrift.TSerializer; -import org.apache.thrift.protocol.TBinaryProtocol; import org.apache.thrift.protocol.TCompactProtocol; public class ThriftSerializer { + // Use default max thrift message size here. + // TODO: Figure out a way to make maxMessageSize configurable, eg. create a serializer singleton at startup by + // passing a max_size parameter + @VisibleForTesting + static long maxMessageSize = ServiceConstants.ClientConfig.SENTRY_HDFS_THRIFT_MAX_MESSAGE_SIZE_DEFAULT; + @SuppressWarnings("rawtypes") public static byte[] serialize(TBase baseObject) throws IOException { - TSerializer serializer = new TSerializer(new TCompactProtocol.Factory()); + TSerializer serializer = new TSerializer(new TCompactProtocol.Factory(maxMessageSize, maxMessageSize)); try { return serializer.serialize(baseObject); } catch (TException e) { @@ -40,10 +46,8 @@ public static byte[] serialize(TBase baseObject) throws IOException { } @SuppressWarnings("rawtypes") - public static TBase deserialize(TBase baseObject, byte[] serialized) - throws IOException { - TDeserializer deserializer = new TDeserializer( - new TCompactProtocol.Factory()); + public static TBase deserialize(TBase baseObject, byte[] serialized) throws IOException { + TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory(maxMessageSize, maxMessageSize)); try { deserializer.deserialize(baseObject, serialized); } catch (TException e) { diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/Updateable.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/Updateable.java index ac8459b19..4dc3a0ceb 100644 --- a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/Updateable.java +++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/Updateable.java @@ -23,12 +23,12 @@ public interface Updateable { /** - * Thrift currently does not support class inheritance.We need all update + * Thrift currently does not support class inheritance. We need all update * objects to expose a unified API. A wrapper class need to be created * implementing this interface and containing the generated thrift class as * a work around */ - public interface Update { + interface Update { boolean hasFullImage(); @@ -47,27 +47,27 @@ public interface Update { * @param lock External Lock. * @return */ - public void updatePartial(Iterable update, ReadWriteLock lock); + void updatePartial(Iterable update, ReadWriteLock lock); /** * This returns a new object with the full update applied * @param update * @return */ - public Updateable updateFull(K update); + Updateable updateFull(K update); /** * Return sequence number of Last Update */ - public long getLastUpdatedSeqNum(); + long getLastUpdatedSeqNum(); /** * Create and Full image update of the local data structure * @param currSeqNum * @return */ - public K createFullImageUpdate(long currSeqNum); + K createFullImageUpdate(long currSeqNum); - public String getUpdateableTypeName(); + String getUpdateableTypeName(); } diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/UpdateableAuthzPaths.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/UpdateableAuthzPaths.java index b74f9541f..8fc547008 100644 --- a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/UpdateableAuthzPaths.java +++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/UpdateableAuthzPaths.java @@ -18,6 +18,7 @@ package org.apache.sentry.hdfs; import java.util.List; +import java.util.Set; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReadWriteLock; @@ -48,13 +49,13 @@ public boolean isUnderPrefix(String[] pathElements) { } @Override - public String findAuthzObject(String[] pathElements) { - return paths.findAuthzObject(pathElements); + public Set findAuthzObject(String[] pathElements) { + return paths.findAuthzObject(pathElements); } @Override - public String findAuthzObjectExactMatch(String[] pathElements) { - return paths.findAuthzObjectExactMatch(pathElements); + public Set findAuthzObjectExactMatches(String[] pathElements) { + return paths.findAuthzObjectExactMatches(pathElements); } @Override @@ -92,16 +93,16 @@ private void applyPartialUpdate(PathsUpdate update) { List pathChanges = update.getPathChanges(); TPathChanges newPathInfo = null; TPathChanges oldPathInfo = null; - if ((pathChanges.get(0).getAddPathsSize() == 1) - && (pathChanges.get(1).getDelPathsSize() == 1)) { + if (pathChanges.get(0).getAddPathsSize() == 1 + && pathChanges.get(1).getDelPathsSize() == 1) { newPathInfo = pathChanges.get(0); oldPathInfo = pathChanges.get(1); - } else if ((pathChanges.get(1).getAddPathsSize() == 1) - && (pathChanges.get(0).getDelPathsSize() == 1)) { + } else if (pathChanges.get(1).getAddPathsSize() == 1 + && pathChanges.get(0).getDelPathsSize() == 1) { newPathInfo = pathChanges.get(1); oldPathInfo = pathChanges.get(0); } - if ((newPathInfo != null)&&(oldPathInfo != null)) { + if (newPathInfo != null && oldPathInfo != null) { paths.renameAuthzObject( oldPathInfo.getAuthzObj(), oldPathInfo.getDelPaths().get(0), newPathInfo.getAuthzObj(), newPathInfo.getAddPaths().get(0)); @@ -112,8 +113,8 @@ private void applyPartialUpdate(PathsUpdate update) { paths.addPathsToAuthzObject(pathChanges.getAuthzObj(), pathChanges .getAddPaths(), true); List> delPaths = pathChanges.getDelPaths(); - if ((delPaths.size() == 1) && (delPaths.get(0).size() == 1) - && (delPaths.get(0).get(0).equals(PathsUpdate.ALL_PATHS))) { + if (delPaths.size() == 1 && delPaths.get(0).size() == 1 + && delPaths.get(0).get(0).equals(PathsUpdate.ALL_PATHS)) { // Remove all paths.. eg. drop table paths.deleteAuthzObject(pathChanges.getAuthzObj()); } else { diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ha/HdfsHAClientInvocationHandler.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ha/HdfsHAClientInvocationHandler.java new file mode 100644 index 000000000..6138b8c89 --- /dev/null +++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ha/HdfsHAClientInvocationHandler.java @@ -0,0 +1,142 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.hdfs.ha; + +import java.io.IOException; +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.net.InetSocketAddress; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.curator.x.discovery.ServiceInstance; +import org.apache.sentry.hdfs.SentryHDFSServiceClientDefaultImpl; +import org.apache.sentry.hdfs.SentryHdfsServiceException; +import org.apache.sentry.hdfs.ServiceConstants; +import org.apache.sentry.provider.db.service.persistent.HAContext; +import org.apache.sentry.provider.db.service.persistent.ServiceManager; +import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Preconditions; + +public class HdfsHAClientInvocationHandler implements InvocationHandler { + + private static final Logger LOGGER = LoggerFactory.getLogger(HdfsHAClientInvocationHandler.class); + + private final Configuration conf; + private ServiceManager manager; + private ServiceInstance currentServiceInstance; + private SentryHDFSServiceClientDefaultImpl client = null; + + private static final String THRIFT_EXCEPTION_MESSAGE = "Thrift exception occured "; + public static final String SENTRY_HA_ERROR_MESSAGE = "No Sentry server available. " + + "Please ensure that at least one Sentry server is online"; + + public HdfsHAClientInvocationHandler(Configuration conf) throws Exception { + this.conf = conf; + checkClientConf(); + } + + @Override + public Object invoke(Object proxy, Method method, Object[] args) throws + SentryHdfsServiceException { + Object result = null; + try { + if (!method.isAccessible()) { + method.setAccessible(true); + } + // The client is initialized in the first call instead of constructor. + // This way we can propagate the connection exception to caller cleanly + if (client == null) { + renewSentryClient(); + } + result = method.invoke(client, args); + } catch (IllegalAccessException e) { + throw new SentryHdfsServiceException(e.getMessage(), e.getCause()); + } catch (InvocationTargetException e) { + if (!(e.getTargetException() instanceof SentryHdfsServiceException)) { + throw new SentryHdfsServiceException("Error in Sentry HDFS client", + e.getTargetException()); + } else { + LOGGER.warn(THRIFT_EXCEPTION_MESSAGE + ": Error in connect current" + + " service, will retry other service.", e); + if (client != null) { + client.close(); + client = null; + } + throw (SentryHdfsServiceException) e.getTargetException(); + } + } catch (IOException e1) { + // close() doesn't throw exception we supress that in case of connection + // loss. Changing SentryPolicyServiceClient#close() to throw an + // exception would be a backward incompatible change for Sentry clients. + if ("close".equals(method.getName())) { + return null; + } + throw new SentryHdfsServiceException( + "Error connecting to sentry service " + e1.getMessage(), e1); + } + return result; + } + + // Retrieve the new connection endpoint from ZK and connect to new server + private void renewSentryClient() throws IOException { + try { + manager = new ServiceManager(HAContext.getHAContext(conf)); + } catch (Exception e1) { + throw new IOException("Failed to extract Sentry node info from zookeeper", e1); + } + + try { + while (true) { + currentServiceInstance = manager.getServiceInstance(); + if (currentServiceInstance == null) { + throw new IOException(SENTRY_HA_ERROR_MESSAGE); + } + InetSocketAddress serverAddress = + ServiceManager.convertServiceInstance(currentServiceInstance); + conf.set(ServiceConstants.ClientConfig.SERVER_RPC_ADDRESS, serverAddress.getHostName()); + conf.setInt(ServiceConstants.ClientConfig.SERVER_RPC_PORT, serverAddress.getPort()); + try { + client = new SentryHDFSServiceClientDefaultImpl(conf); + LOGGER.info("Sentry Client using server " + serverAddress.getHostName() + + ":" + serverAddress.getPort()); + break; + } catch (IOException e) { + manager.reportError(currentServiceInstance); + LOGGER.info("Transport exception while opening transport:", e, e.getMessage()); + } + } + } finally { + manager.close(); + } + } + + private void checkClientConf() { + if (conf.getBoolean(ServerConfig.SENTRY_HA_ZOOKEEPER_SECURITY, + ServerConfig.SENTRY_HA_ZOOKEEPER_SECURITY_DEFAULT)) { + String serverPrincipal = Preconditions.checkNotNull(conf.get(ServerConfig.PRINCIPAL), + ServerConfig.PRINCIPAL + " is required"); + Preconditions.checkArgument(serverPrincipal.contains(SecurityUtil.HOSTNAME_PATTERN), + ServerConfig.PRINCIPAL + " : " + serverPrincipal + " should contain " + SecurityUtil.HOSTNAME_PATTERN); + } + } +} diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/resources/sentry_hdfs_service.thrift b/sentry-hdfs/sentry-hdfs-common/src/main/resources/sentry_hdfs_service.thrift index fb6085574..5f9cf3137 100644 --- a/sentry-hdfs/sentry-hdfs-common/src/main/resources/sentry_hdfs_service.thrift +++ b/sentry-hdfs/sentry-hdfs-common/src/main/resources/sentry_hdfs_service.thrift @@ -22,23 +22,39 @@ # Thrift Service that the MetaStore is built on # -include "share/fb303/if/fb303.thrift" +#include "share/fb303/if/fb303.thrift" namespace java org.apache.sentry.hdfs.service.thrift namespace php sentry.hdfs.thrift namespace cpp Apache.Sentry.HDFS.Thrift struct TPathChanges { + +# The authorizable object that needs to be updated. 1: required string authzObj; + +# The path (splits into string segments) that needs to be +# added to the authorizable object. 2: required list> addPaths; + +# The path (splits into string segments) that needs to be +# deleted to the authorizable object. 3: required list> delPaths; } struct TPathEntry { + +# The type of the Path Entry. 1: required byte type; + +# The path element in string. 2: required string pathElement; -3: optional string authzObj; + +# The child tuple id of the Path Entry. 4: required set children; + +# A set of authzObjs associated with the Path Entry. +5: optional set authzObjs; } struct TPathsDump { @@ -54,14 +70,28 @@ struct TPathsUpdate { } struct TPrivilegeChanges { + +# The authorizable object that needs to be updated. 1: required string authzObj; + +# The privileges that needs to be added to +# the authorizable object. 2: required map addPrivileges; + +# The privileges that needs to be deleted to +# the authorizable object. 3: required map delPrivileges; } struct TRoleChanges { + +# The role that needs to be updated. 1: required string role; + +# The groups that needs to be added. 2: required list addGroups; + +# The groups that needs to be deleted. 3: required list delGroups; } @@ -69,7 +99,7 @@ struct TPermissionsUpdate { 1: required bool hasfullImage; 2: required i64 seqNum; 3: required map privilegeChanges; -4: required map roleChanges; +4: required map roleChanges; } struct TAuthzUpdateResponse { diff --git a/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/SentryHdfsServiceIntegrationBase.java b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/SentryHdfsServiceIntegrationBase.java new file mode 100644 index 000000000..eccf83bdf --- /dev/null +++ b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/SentryHdfsServiceIntegrationBase.java @@ -0,0 +1,81 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.hdfs; + +import java.security.PrivilegedExceptionAction; + +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.sentry.hdfs.ServiceConstants.ClientConfig; +import org.apache.sentry.service.thrift.SentryServiceIntegrationBase; +import org.junit.After; +import org.junit.Before; + +public class SentryHdfsServiceIntegrationBase extends + SentryServiceIntegrationBase { + + protected SentryHDFSServiceClient hdfsClient; + + @Before + public void before() throws Exception { + conf.set("hadoop.security.authentication", "kerberos"); + UserGroupInformation.setConfiguration(conf); + UserGroupInformation.loginUserFromKeytab(CLIENT_PRINCIPAL, + clientKeytab.getPath()); + + connectToHdfsSyncService(); + } + + @After + public void after() { + if (hdfsClient != null) { + hdfsClient.close(); + } + } + + protected void connectToHdfsSyncService() throws Exception { + if (hdfsClient != null) { + hdfsClient.close(); + } + + // SentryHdfs client configuration setup + conf.set(ClientConfig.SERVER_RPC_ADDRESS, server.getAddress() + .getHostName()); + conf.set(ClientConfig.SERVER_RPC_ADDRESS, server.getAddress() + .getHostName()); + conf.set(ClientConfig.SERVER_RPC_PORT, + String.valueOf(server.getAddress().getPort())); + + if (kerberos) { + conf.set(ClientConfig.SECURITY_MODE, ClientConfig.SECURITY_MODE_KERBEROS); + conf.set(ClientConfig.SECURITY_USE_UGI_TRANSPORT, "true"); + conf.set(ClientConfig.PRINCIPAL, getServerKerberosName()); + hdfsClient = UserGroupInformation.getLoginUser().doAs( + new PrivilegedExceptionAction() { + @Override + public SentryHDFSServiceClient run() throws Exception { + return SentryHDFSServiceClientFactory.create(conf); + } + }); + } else { + hdfsClient = SentryHDFSServiceClientFactory.create(conf); + } + hdfsClient.close(); + } + +} diff --git a/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestHMSPaths.java b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestHMSPaths.java index 29868ae26..bb74779bf 100644 --- a/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestHMSPaths.java +++ b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestHMSPaths.java @@ -63,7 +63,7 @@ public void testRootEntry() { root.toString(); Assert.assertNull(root.getParent()); Assert.assertEquals(HMSPaths.EntryType.DIR, root.getType()); - Assert.assertNull(root.getAuthzObj()); + Assert.assertTrue(root.getAuthzObjs().size() == 0); Assert.assertEquals(Path.SEPARATOR, root.getFullPath()); Assert.assertTrue(root.getChildren().isEmpty()); root.delete(); @@ -127,7 +127,7 @@ public void testImmediatePrefixEntry() { Assert.assertEquals(root, entry.getParent()); Assert.assertEquals(HMSPaths.EntryType.PREFIX, entry.getType()); Assert.assertEquals("a", entry.getPathElement()); - Assert.assertNull(entry.getAuthzObj()); + Assert.assertEquals(0, entry.getAuthzObjs().size()); Assert.assertEquals(Path.SEPARATOR + "a", entry.getFullPath()); Assert.assertTrue(entry.getChildren().isEmpty()); @@ -167,13 +167,13 @@ public void testFurtherPrefixEntry() { Assert.assertEquals(root, entry.getParent().getParent()); Assert.assertEquals(HMSPaths.EntryType.PREFIX, entry.getType()); - Assert.assertEquals(HMSPaths.EntryType.DIR, + Assert.assertEquals(HMSPaths.EntryType.DIR, entry.getParent().getType()); Assert.assertEquals("b", entry.getPathElement()); Assert.assertEquals("a", entry.getParent().getPathElement()); - Assert.assertNull(entry.getAuthzObj()); - Assert.assertNull(entry.getParent().getAuthzObj()); - Assert.assertEquals(Path.SEPARATOR + "a" + Path.SEPARATOR + "b", + Assert.assertTrue(entry.getAuthzObjs().size() == 0); + Assert.assertTrue(entry.getParent().getAuthzObjs().size() == 0); + Assert.assertEquals(Path.SEPARATOR + "a" + Path.SEPARATOR + "b", entry.getFullPath()); Assert.assertEquals(Path.SEPARATOR + "a", entry.getParent().getFullPath()); Assert.assertTrue(entry.getChildren().isEmpty()); @@ -212,7 +212,7 @@ public void testImmediateAuthzEntry() { Assert.assertEquals(prefix, entry.getParent()); Assert.assertEquals(HMSPaths.EntryType.AUTHZ_OBJECT, entry.getType()); Assert.assertEquals("p1", entry.getPathElement()); - Assert.assertEquals("A", entry.getAuthzObj()); + Assert.assertTrue(entry.getAuthzObjs().contains("A")); Assert.assertEquals(Path.SEPARATOR + "a" + Path.SEPARATOR + "b" + Path.SEPARATOR + "p1", entry.getFullPath()); @@ -249,7 +249,7 @@ public void testFurtherAuthzEntry() { Assert.assertEquals(prefix, entry.getParent().getParent()); Assert.assertEquals(HMSPaths.EntryType.AUTHZ_OBJECT, entry.getType()); Assert.assertEquals("p1", entry.getPathElement()); - Assert.assertEquals("A", entry.getAuthzObj()); + Assert.assertTrue(entry.getAuthzObjs().contains("A")); Assert.assertEquals(Path.SEPARATOR + "a" + Path.SEPARATOR + "b" + Path.SEPARATOR + "t" + Path.SEPARATOR + "p1", entry.getFullPath()); @@ -265,11 +265,11 @@ public void testFurtherAuthzEntry() { Assert.assertEquals(HMSPaths.EntryType.AUTHZ_OBJECT, entry.getType()); Assert.assertEquals("p1", entry.getPathElement()); - Assert.assertEquals("A", entry.getAuthzObj()); + Assert.assertTrue(entry.getAuthzObjs().contains("A")); Assert.assertEquals(HMSPaths.EntryType.AUTHZ_OBJECT, ep2.getType()); Assert.assertEquals("p2", ep2.getPathElement()); - Assert.assertEquals("A", entry.getAuthzObj()); + Assert.assertTrue(entry.getAuthzObjs().contains("A")); Assert.assertEquals(entry, root.find(new String[]{"a", "b", "t", "p1"}, true)); @@ -296,7 +296,7 @@ public void testFurtherAuthzEntry() { Assert.assertNull(root.find(new String[]{"a", "b", "t", "p1"}, true)); Assert.assertEquals(HMSPaths.EntryType.DIR, entry.getType()); - Assert.assertNull(entry.getAuthzObj()); + Assert.assertEquals(entry.getAuthzObjs().size(), 0); Assert.assertNull(root.find(new String[]{"a", "b", "t", "p1"}, false)); Assert.assertNull(root.find(new String[]{"a", "b", "t"}, false)); @@ -353,5 +353,19 @@ public void testMultipleAuthzEntry() { Assert.assertEquals(prefix, root.findPrefixEntry( Lists.newArrayList("a", "b", "t", "p3"))); } + + @Test + public void testAuthzObjCaseInsensitive() { + HMSPaths.Entry root = HMSPaths.Entry.createRoot(false); + HMSPaths.Entry prefix = root.createPrefix(Lists.newArrayList("a", "b")); + + HMSPaths.Entry entry = root.createAuthzObjPath( + Lists.newArrayList("a", "b", "t", "p1"), "A"); + Assert.assertEquals(prefix, entry.getParent().getParent()); + Assert.assertEquals(HMSPaths.EntryType.AUTHZ_OBJECT, entry.getType()); + + // Authz Object is case insensitive. + Assert.assertTrue(entry.getAuthzObjs().contains("a")); + } } diff --git a/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestHMSPathsFullDump.java b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestHMSPathsFullDump.java index f74a75dd9..194ffb755 100644 --- a/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestHMSPathsFullDump.java +++ b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestHMSPathsFullDump.java @@ -17,7 +17,7 @@ */ package org.apache.sentry.hdfs; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.sentry.hdfs.service.thrift.TPathsDump; import org.apache.thrift.TDeserializer; @@ -29,9 +29,11 @@ import org.junit.Test; import com.google.common.collect.Lists; +import java.util.Arrays; +import java.util.HashSet; +import java.io.IOException; public class TestHMSPathsFullDump { - private static boolean useCompact = true; @Test @@ -50,32 +52,85 @@ public void testDumpAndInitialize() { hmsPaths._addAuthzObject("db2.tbl21", Lists.newArrayList("/user/hive/w2/db2/tbl21")); hmsPaths._addPathsToAuthzObject("db2.tbl21", Lists.newArrayList("/user/hive/w2/db2/tbl21/p1=1/p2=x")); - Assert.assertEquals("default", hmsPaths.findAuthzObject(new String[]{"user", "hive", "warehouse"}, false)); - Assert.assertEquals("db1", hmsPaths.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1"}, false)); - Assert.assertEquals("db1.tbl11", hmsPaths.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1", "tbl11"}, false)); - Assert.assertEquals("db1.tbl11", hmsPaths.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1", "tbl11", "part111"}, false)); - Assert.assertEquals("db1.tbl11", hmsPaths.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1", "tbl11", "part112"}, false)); + Assert.assertEquals(new HashSet(Arrays.asList("default")), hmsPaths.findAuthzObject(new String[]{"user", "hive", "warehouse"}, false)); + Assert.assertEquals(new HashSet(Arrays.asList("db1")), hmsPaths.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1"}, false)); + Assert.assertEquals(new HashSet(Arrays.asList("db1.tbl11")), hmsPaths.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1", "tbl11"}, false)); + Assert.assertEquals(new HashSet(Arrays.asList("db1.tbl11")), hmsPaths.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1", "tbl11", "part111"}, false)); + Assert.assertEquals(new HashSet(Arrays.asList("db1.tbl11")), hmsPaths.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1", "tbl11", "part112"}, false)); - Assert.assertEquals("db1.tbl11", hmsPaths.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1", "tbl11", "p1=1", "p2=x"}, false)); - Assert.assertEquals("db1.tbl11", hmsPaths.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1", "tbl11", "p1=1"}, true)); - Assert.assertEquals("db2.tbl21", hmsPaths.findAuthzObject(new String[]{"user", "hive", "w2", "db2", "tbl21", "p1=1"}, true)); + Assert.assertEquals(new HashSet(Arrays.asList("db1.tbl11")), hmsPaths.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1", "tbl11", "p1=1", "p2=x"}, false)); + Assert.assertEquals(new HashSet(Arrays.asList("db1.tbl11")), hmsPaths.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1", "tbl11", "p1=1"}, true)); + Assert.assertEquals(new HashSet(Arrays.asList("db2.tbl21")), hmsPaths.findAuthzObject(new String[]{"user", "hive", "w2", "db2", "tbl21", "p1=1"}, true)); HMSPathsDumper serDe = hmsPaths.getPathsDump(); TPathsDump pathsDump = serDe.createPathsDump(); HMSPaths hmsPaths2 = new HMSPaths(new String[] {"/user/hive/warehouse"}).getPathsDump().initializeFromDump(pathsDump); - Assert.assertEquals("default", hmsPaths2.findAuthzObject(new String[]{"user", "hive", "warehouse"}, false)); - Assert.assertEquals("db1", hmsPaths2.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1"}, false)); - Assert.assertEquals("db1.tbl11", hmsPaths2.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1", "tbl11"}, false)); - Assert.assertEquals("db1.tbl11", hmsPaths2.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1", "tbl11", "part111"}, false)); - Assert.assertEquals("db1.tbl11", hmsPaths2.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1", "tbl11", "part112"}, false)); + Assert.assertEquals(new HashSet(Arrays.asList("default")), hmsPaths2.findAuthzObject(new String[]{"user", "hive", "warehouse"}, false)); + Assert.assertEquals(new HashSet(Arrays.asList("db1")), hmsPaths2.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1"}, false)); + Assert.assertEquals(new HashSet(Arrays.asList("db1.tbl11")), hmsPaths2.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1", "tbl11"}, false)); + Assert.assertEquals(new HashSet(Arrays.asList("db1.tbl11")), hmsPaths2.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1", "tbl11", "part111"}, false)); + Assert.assertEquals(new HashSet(Arrays.asList("db1.tbl11")), hmsPaths2.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1", "tbl11", "part112"}, false)); - // This path is not under prefix, so should not be deserialized.. + // This path is not under prefix, so should not be deserialized.. Assert.assertNull(hmsPaths2.findAuthzObject(new String[]{"user", "hive", "w2", "db2", "tbl21", "p1=1"}, true)); } @Test public void testThrftSerialization() throws TException { + HMSPathsDumper serDe = genHMSPathsDumper(); + long t1 = System.currentTimeMillis(); + TPathsDump pathsDump = serDe.createPathsDump(); + + TProtocolFactory protoFactory = useCompact ? new TCompactProtocol.Factory( + ServiceConstants.ClientConfig.SENTRY_HDFS_THRIFT_MAX_MESSAGE_SIZE_DEFAULT, + ServiceConstants.ClientConfig.SENTRY_HDFS_THRIFT_MAX_MESSAGE_SIZE_DEFAULT) + : new TBinaryProtocol.Factory(true, true, + ServiceConstants.ClientConfig.SENTRY_HDFS_THRIFT_MAX_MESSAGE_SIZE_DEFAULT, + ServiceConstants.ClientConfig.SENTRY_HDFS_THRIFT_MAX_MESSAGE_SIZE_DEFAULT); + byte[] ser = new TSerializer(protoFactory).serialize(pathsDump); + long serTime = System.currentTimeMillis() - t1; + System.out.println("Serialization Time: " + serTime + ", " + ser.length); + + t1 = System.currentTimeMillis(); + TPathsDump tPathsDump = new TPathsDump(); + new TDeserializer(protoFactory).deserialize(tPathsDump, ser); + HMSPaths fromDump = serDe.initializeFromDump(tPathsDump); + System.out.println("Deserialization Time: " + (System.currentTimeMillis() - t1)); + Assert.assertEquals(new HashSet(Arrays.asList("db9.tbl999")), fromDump.findAuthzObject(new String[]{"user", "hive", "warehouse", "db9", "tbl999"}, false)); + Assert.assertEquals(new HashSet(Arrays.asList("db9.tbl999")), fromDump.findAuthzObject(new String[]{"user", "hive", "warehouse", "db9", "tbl999", "part99"}, false)); + } + + /** + * Test ThriftSerializer with a larger message than thrift max message size. + */ + @Test + public void testThriftSerializerWithInvalidMsgSize() throws TException, IOException { + HMSPathsDumper serDe = genHMSPathsDumper(); + TPathsDump pathsDump = serDe.createPathsDump(); + byte[] ser =ThriftSerializer.serialize(pathsDump); + + boolean exceptionThrown = false; + try { + // deserialize a msg with a larger size should throw IO exception + ThriftSerializer.maxMessageSize = 1024; + ThriftSerializer.deserialize(new TPathsDump(), ser); + } catch (IOException e) { + exceptionThrown = true; + Assert.assertTrue(e.getCause().getMessage().contains("Length exceeded max allowed:")); + Assert.assertTrue(e.getMessage().contains("Error deserializing thrift object TPathsDump")); + } finally { + Assert.assertEquals(true, exceptionThrown); + } + // deserialize a normal msg should succeed + ThriftSerializer.maxMessageSize = ServiceConstants.ClientConfig.SENTRY_HDFS_THRIFT_MAX_MESSAGE_SIZE_DEFAULT; + ThriftSerializer.deserialize(new TPathsDump(), ser); + } + + /** + * Generate HMSPathsDumper for ThrftSerialization tests + */ + private HMSPathsDumper genHMSPathsDumper() { HMSPaths hmsPaths = new HMSPaths(new String[] {"/"}); String prefix = "/user/hive/warehouse/"; for (int dbNum = 0; dbNum < 10; dbNum++) { @@ -94,22 +149,7 @@ public void testThrftSerialization() throws TException { } } } - HMSPathsDumper serDe = hmsPaths.getPathsDump(); - long t1 = System.currentTimeMillis(); - TPathsDump pathsDump = serDe.createPathsDump(); - - TProtocolFactory protoFactory = useCompact ? new TCompactProtocol.Factory() : new TBinaryProtocol.Factory(); - byte[] ser = new TSerializer(protoFactory).serialize(pathsDump); - long serTime = System.currentTimeMillis() - t1; - System.out.println("Serialization Time: " + serTime + ", " + ser.length); - - t1 = System.currentTimeMillis(); - TPathsDump tPathsDump = new TPathsDump(); - new TDeserializer(protoFactory).deserialize(tPathsDump, ser); - HMSPaths fromDump = serDe.initializeFromDump(tPathsDump); - System.out.println("Deserialization Time: " + (System.currentTimeMillis() - t1)); - Assert.assertEquals("db9.tbl999", fromDump.findAuthzObject(new String[]{"user", "hive", "warehouse", "db9", "tbl999"}, false)); - Assert.assertEquals("db9.tbl999", fromDump.findAuthzObject(new String[]{"user", "hive", "warehouse", "db9", "tbl999", "part99"}, false)); + return hmsPaths.getPathsDump(); } } diff --git a/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestKrbConnectionTimeout.java b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestKrbConnectionTimeout.java new file mode 100644 index 000000000..b62a83f99 --- /dev/null +++ b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestKrbConnectionTimeout.java @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.hdfs; + +import org.apache.hadoop.minikdc.MiniKdc; +import org.junit.Assume; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +public class TestKrbConnectionTimeout extends + SentryHdfsServiceIntegrationBase { + + @BeforeClass + public static void setup() throws Exception { + Assume.assumeTrue("true".equalsIgnoreCase(System.getProperty( + "sentry.hive.test.ticket.timeout", "false"))); + kdcConfOverlay.setProperty(MiniKdc.MAX_TICKET_LIFETIME, "300001"); + setup(); + } + + /*** + * Test is run only when sentry.hive.test.ticket.timeout is set to "true" + * @throws Exception + */ + @Before + public void beforeMethod() { + } + + @Test + public void testConnectionAfterTicketTimeout() throws Exception { + Thread.sleep(400000); + connectToHdfsSyncService(); + } + +} diff --git a/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestPathsUpdate.java b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestPathsUpdate.java index 5bd848762..71618ab1b 100644 --- a/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestPathsUpdate.java +++ b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestPathsUpdate.java @@ -19,7 +19,7 @@ import java.util.List; import org.junit.Test; -import junit.framework.Assert; +import org.junit.Assert; public class TestPathsUpdate { @Test diff --git a/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestUpdateableAuthzPaths.java b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestUpdateableAuthzPaths.java index 4b8a05813..a5bc313e6 100644 --- a/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestUpdateableAuthzPaths.java +++ b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestUpdateableAuthzPaths.java @@ -17,13 +17,13 @@ */ package org.apache.sentry.hdfs; -import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNull; import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.sentry.hdfs.service.thrift.TPathChanges; +import static org.junit.Assert.assertTrue; import org.junit.Test; import com.google.common.collect.Lists; @@ -33,10 +33,10 @@ public class TestUpdateableAuthzPaths { @Test public void testFullUpdate() { HMSPaths hmsPaths = createBaseHMSPaths(1, 1); - assertEquals("db1", hmsPaths.findAuthzObjectExactMatch(new String[]{"db1"})); - assertEquals("db1.tbl11", hmsPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl11"})); - assertEquals("db1.tbl11", hmsPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl11", "part111"})); - assertEquals("db1.tbl11", hmsPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl11", "part112"})); + assertTrue(hmsPaths.findAuthzObjectExactMatches(new String[]{"db1"}).contains("db1")); + assertTrue(hmsPaths.findAuthzObjectExactMatches(new String[]{"db1", "tbl11"}).contains("db1.tbl11")); + assertTrue(hmsPaths.findAuthzObjectExactMatches(new String[]{"db1", "tbl11", "part111"}).contains("db1.tbl11")); + assertTrue(hmsPaths.findAuthzObjectExactMatches(new String[]{"db1", "tbl11", "part112"}).contains("db1.tbl11")); UpdateableAuthzPaths authzPaths = new UpdateableAuthzPaths(hmsPaths); PathsUpdate update = new PathsUpdate(1, true); @@ -47,10 +47,10 @@ public void testFullUpdate() { assertFalse(pre == authzPaths2); authzPaths2 = pre; - assertEquals("db1", authzPaths2.findAuthzObjectExactMatch(new String[]{"db1"})); - assertEquals("db1.tbl11", authzPaths2.findAuthzObjectExactMatch(new String[]{"db1", "tbl11"})); - assertEquals("db1.tbl11", authzPaths2.findAuthzObjectExactMatch(new String[]{"db1", "tbl11", "part111"})); - assertEquals("db1.tbl11", authzPaths2.findAuthzObjectExactMatch(new String[]{"db1", "tbl11", "part112"})); + assertTrue(authzPaths2.findAuthzObjectExactMatches(new String[]{"db1"}).contains("db1")); + assertTrue(authzPaths2.findAuthzObjectExactMatches(new String[]{"db1", "tbl11"}).contains("db1.tbl11")); + assertTrue(authzPaths2.findAuthzObjectExactMatches(new String[]{"db1", "tbl11", "part111"}).contains("db1.tbl11")); + assertTrue(authzPaths2.findAuthzObjectExactMatches(new String[]{"db1", "tbl11", "part112"}).contains("db1.tbl11")); // Ensure Full Update wipes old stuff UpdateableAuthzPaths authzPaths3 = new UpdateableAuthzPaths(createBaseHMSPaths(2, 1)); @@ -60,13 +60,13 @@ public void testFullUpdate() { assertFalse(pre == authzPaths2); authzPaths2 = pre; - assertNull(authzPaths2.findAuthzObjectExactMatch(new String[]{"db1"})); - assertNull(authzPaths2.findAuthzObjectExactMatch(new String[]{"db1", "tbl11"})); + assertNull(authzPaths2.findAuthzObjectExactMatches(new String[]{"db1"})); + assertNull(authzPaths2.findAuthzObjectExactMatches(new String[]{"db1", "tbl11"})); - assertEquals("db2", authzPaths2.findAuthzObjectExactMatch(new String[]{"db2"})); - assertEquals("db2.tbl21", authzPaths2.findAuthzObjectExactMatch(new String[]{"db2", "tbl21"})); - assertEquals("db2.tbl21", authzPaths2.findAuthzObjectExactMatch(new String[]{"db2", "tbl21", "part211"})); - assertEquals("db2.tbl21", authzPaths2.findAuthzObjectExactMatch(new String[]{"db2", "tbl21", "part212"})); + assertTrue(authzPaths2.findAuthzObjectExactMatches(new String[]{"db2"}).contains("db2")); + assertTrue(authzPaths2.findAuthzObjectExactMatches(new String[]{"db2", "tbl21"}).contains("db2.tbl21")); + assertTrue(authzPaths2.findAuthzObjectExactMatches(new String[]{"db2", "tbl21", "part211"}).contains("db2.tbl21")); + assertTrue(authzPaths2.findAuthzObjectExactMatches(new String[]{"db2", "tbl21", "part212"}).contains("db2.tbl21")); } @Test @@ -87,14 +87,14 @@ public void testPartialUpdateAddPath() { authzPaths.updatePartial(Lists.newArrayList(update), lock); // Ensure no change in existing Paths - assertEquals("db1", authzPaths.findAuthzObjectExactMatch(new String[]{"db1"})); - assertEquals("db1.tbl11", authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl11"})); - assertEquals("db1.tbl11", authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl11", "part111"})); - assertEquals("db1.tbl11", authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl11", "part112"})); + assertTrue(authzPaths.findAuthzObjectExactMatches(new String[]{"db1"}).contains("db1")); + assertTrue(authzPaths.findAuthzObjectExactMatches(new String[]{"db1", "tbl11"}).contains("db1.tbl11")); + assertTrue(authzPaths.findAuthzObjectExactMatches(new String[]{"db1", "tbl11", "part111"}).contains("db1.tbl11")); + assertTrue(authzPaths.findAuthzObjectExactMatches(new String[]{"db1", "tbl11", "part112"}).contains("db1.tbl11")); // Verify new Paths - assertEquals("db1.tbl12", authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl12"})); - assertEquals("db1.tbl12", authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl12", "part121"})); + assertTrue(authzPaths.findAuthzObjectExactMatches(new String[]{"db1", "tbl12"}).contains("db1.tbl12")); + assertTrue(authzPaths.findAuthzObjectExactMatches(new String[]{"db1", "tbl12", "part121"}).contains("db1.tbl12")); // Rename table update = new PathsUpdate(4, false); @@ -103,17 +103,17 @@ public void testPartialUpdateAddPath() { authzPaths.updatePartial(Lists.newArrayList(update), lock); // Verify name change - assertEquals("db1", authzPaths.findAuthzObjectExactMatch(new String[]{"db1"})); - assertEquals("db1.xtbl11", authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "xtbl11"})); + assertTrue(authzPaths.findAuthzObjectExactMatches(new String[]{"db1"}).contains("db1")); + assertTrue(authzPaths.findAuthzObjectExactMatches(new String[]{"db1", "xtbl11"}).contains("db1.xtbl11")); // Explicit set location has to be done on the partition else it will be associated to // the old location - assertEquals("db1.xtbl11", authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl11", "part111"})); - assertEquals("db1.xtbl11", authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl11", "part112"})); + assertTrue(authzPaths.findAuthzObjectExactMatches(new String[]{"db1", "tbl11", "part111"}).contains("db1.xtbl11")); + assertTrue(authzPaths.findAuthzObjectExactMatches(new String[]{"db1", "tbl11", "part112"}).contains("db1.xtbl11")); // Verify other tables are not touched - assertNull(authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "xtbl12"})); - assertNull(authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "xtbl12", "part121"})); - assertEquals("db1.tbl12", authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl12"})); - assertEquals("db1.tbl12", authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl12", "part121"})); + assertNull(authzPaths.findAuthzObjectExactMatches(new String[]{"db1", "xtbl12"})); + assertNull(authzPaths.findAuthzObjectExactMatches(new String[]{"db1", "xtbl12", "part121"})); + assertTrue(authzPaths.findAuthzObjectExactMatches(new String[]{"db1", "tbl12"}).contains("db1.tbl12")); + assertTrue(authzPaths.findAuthzObjectExactMatches(new String[]{"db1", "tbl12", "part121"}).contains("db1.tbl12")); } @@ -122,9 +122,9 @@ public void testPartialUpdateDelPath() { HMSPaths hmsPaths = createBaseHMSPaths(1, 1); UpdateableAuthzPaths authzPaths = new UpdateableAuthzPaths(hmsPaths); ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); - assertEquals("db1.tbl11", authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl11"})); - assertEquals("db1.tbl11", authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl11", "part111"})); - + assertTrue(authzPaths.findAuthzObjectExactMatches(new String[]{"db1", "tbl11"}).contains("db1.tbl11")); + assertTrue(authzPaths.findAuthzObjectExactMatches(new String[]{"db1", "tbl11", "part111"}).contains("db1.tbl11")); + // Drop partition PathsUpdate update = new PathsUpdate(2, false); TPathChanges pathChange = update.newPathChange("db1.tbl11"); @@ -132,17 +132,17 @@ public void testPartialUpdateDelPath() { authzPaths.updatePartial(Lists.newArrayList(update), lock); // Verify Paths deleted - assertNull(authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl11", "part111"})); + assertNull(authzPaths.findAuthzObjectExactMatches(new String[]{"db1", "tbl11", "part111"})); // Verify rest ok - assertEquals("db1.tbl11", authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl11", "part112"})); + assertTrue(authzPaths.findAuthzObjectExactMatches(new String[]{"db1", "tbl11", "part112"}).contains("db1.tbl11")); } @Test public void testDefaultDbPath() { HMSPaths hmsPaths = new HMSPaths(new String[] {"/user/hive/warehouse"}); hmsPaths._addAuthzObject("default", Lists.newArrayList("/user/hive/warehouse")); - assertEquals("default", hmsPaths.findAuthzObject(new String[]{"user", "hive", "warehouse"})); + assertTrue(hmsPaths.findAuthzObject(new String[]{"user", "hive", "warehouse"}).contains("default")); } private HMSPaths createBaseHMSPaths(int dbNum, int tblNum) { diff --git a/sentry-hdfs/sentry-hdfs-dist/pom.xml b/sentry-hdfs/sentry-hdfs-dist/pom.xml index 4bbb2128d..37350c515 100644 --- a/sentry-hdfs/sentry-hdfs-dist/pom.xml +++ b/sentry-hdfs/sentry-hdfs-dist/pom.xml @@ -22,7 +22,7 @@ limitations under the License. org.apache.sentry sentry-hdfs - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-hdfs-dist @@ -53,7 +53,7 @@ limitations under the License. org.apache.maven.plugins maven-shade-plugin - 2.1 + 2.3 package diff --git a/sentry-hdfs/sentry-hdfs-namenode-plugin/.gitignore b/sentry-hdfs/sentry-hdfs-namenode-plugin/.gitignore deleted file mode 100644 index 91ad75bb4..000000000 --- a/sentry-hdfs/sentry-hdfs-namenode-plugin/.gitignore +++ /dev/null @@ -1,18 +0,0 @@ -*.class -target/ -.classpath -.project -.settings -.metadata -.idea/ -*.iml -derby.log -datanucleus.log -sentry-core/sentry-core-common/src/gen -**/TempStatsStore/ -# Package Files # -*.jar -*.war -*.ear -test-output/ -maven-repo/ diff --git a/sentry-hdfs/sentry-hdfs-namenode-plugin/pom.xml b/sentry-hdfs/sentry-hdfs-namenode-plugin/pom.xml index 813c2e4aa..8d3bdc9fc 100644 --- a/sentry-hdfs/sentry-hdfs-namenode-plugin/pom.xml +++ b/sentry-hdfs/sentry-hdfs-namenode-plugin/pom.xml @@ -21,7 +21,7 @@ limitations under the License. org.apache.sentry sentry-hdfs - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-hdfs-namenode-plugin @@ -32,7 +32,7 @@ limitations under the License. org.apache.sentry sentry-hdfs-common - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT junit @@ -53,11 +53,28 @@ limitations under the License. hadoop-hdfs provided + + org.apache.thrift + libthrift + org.apache.hadoop hadoop-minicluster test + + + + org.apache.maven.plugins + maven-surefire-plugin + + + **/TestSentryAuthorizationProvider.java + + + + + diff --git a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/hadoop/hdfs/server/namenode/AuthorizationProvider.java b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/hadoop/hdfs/server/namenode/AuthorizationProvider.java new file mode 100644 index 000000000..114dbb0e3 --- /dev/null +++ b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/hadoop/hdfs/server/namenode/AuthorizationProvider.java @@ -0,0 +1,411 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.UnresolvedLinkException; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; +import org.apache.hadoop.security.AccessControlException; + +import java.io.IOException; +import java.util.Map; +import java.util.Set; + +/** + * Implementations of this interface are called from within an + * inode to set or return authorization related information. + *

+ * The HDFS default implementation, {@link DefaultAuthorizationProvider} uses + * the inode itself to retrieve and store information. + *

+ * A custom implementation may use a different authorization store and enforce + * the permission check using alternate logic. + *

+ * It is expected that an implementation of the provider will not call external + * systems or realize expensive computations on any of the methods defined by + * the provider interface as they are typically invoked within remote client + * filesystem operations. + *

+ * If calls to external systems are required, they should be done + * asynchronously from the provider methods. + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public abstract class AuthorizationProvider { + + private static final ThreadLocal CLIENT_OP_TL = + new ThreadLocal() { + @Override + protected Boolean initialValue() { + return Boolean.FALSE; + } + }; + + static void beginClientOp() { + CLIENT_OP_TL.set(Boolean.TRUE); + } + + static void endClientOp() { + CLIENT_OP_TL.set(Boolean.FALSE); + } + + private static AuthorizationProvider provider; + + /** + * Return the authorization provider singleton for the NameNode. + * + * @return the authorization provider + */ + public static AuthorizationProvider get() { + return provider; + } + + /** + * Set the authorization provider singleton for the NameNode. The + * provider must be started (before being set) and stopped by the setter. + * + * @param authzProvider the authorization provider + */ + static void set(AuthorizationProvider authzProvider) { + provider = authzProvider; + } + + /** + * Constant that indicates current state (as opposed to a particular snapshot + * ID) when retrieving authorization information from the provider. + */ + public static final int CURRENT_STATE_ID = Snapshot.CURRENT_STATE_ID; + + /** + * This interface exposes INode read-only information relevant for + * authorization decisions. + * + * @see AuthorizationProvider + */ + @InterfaceAudience.Public + @InterfaceStability.Unstable + public interface INodeAuthorizationInfo { + + /** + * Return the inode unique ID. This value never changes. + * + * @return the inode unique ID. + */ + long getId(); + + /** + * Return the inode path element name. This value may change. + * @return the inode path element name. + */ + String getLocalName(); + + /** + * Return the parent inode. This value may change. + * + * @return the parent inode. + */ + INodeAuthorizationInfo getParent(); + + /** + * Return the inode full path. This value may change. + * + * @return the inode full path + */ + String getFullPathName(); + + /** + * Return if the inode is a directory or not. + * + * @return TRUE if the inode is a directory, + * FALSE otherwise. + */ + boolean isDirectory(); + + /** + * Return the inode user for the specified snapshot. + * + * @param snapshotId a snapshot ID or {@link #CURRENT_STATE_ID} for latest + * value. + * @return the inode user for the specified snapshot. + */ + String getUserName(int snapshotId); + + /** + * Return the inode group for the specified snapshot. + * + * @param snapshotId a snapshot ID or {@link #CURRENT_STATE_ID} for latest + * value. + * @return the inode group for the specified snapshot. + */ + String getGroupName(int snapshotId); + + /** + * Return the inode permission for the specified snapshot. + * + * @param snapshotId a snapshot ID or {@link #CURRENT_STATE_ID} for latest + * value. + * @return the inode permission for the specified snapshot. + */ + FsPermission getFsPermission(int snapshotId); + + /** + * Return the inode ACL feature for the specified snapshot. + * + * @param snapshotId a snapshot ID or {@link #CURRENT_STATE_ID} for latest + * value. + * @return the inode ACL feature for the specified snapshot. + */ + AclFeature getAclFeature(int snapshotId); + + } + + /** + * Indicates if the current provider method invocation is part of a client + * operation or it is an internal NameNode call (i.e. a FS image or an edit + * log operation). + * + * @return TRUE if the provider method invocation is being + * done as part of a client operation, FALSE otherwise. + */ + protected final boolean isClientOp() { + return CLIENT_OP_TL.get() == Boolean.TRUE; + } + + /** + * Initialize the provider. This method is called at NameNode startup + * time. + */ + public void start() { + } + + /** + * Shutdown the provider. This method is called at NameNode shutdown time. + */ + public void stop() { + } + + /** + * Set all currently snapshot-able directories and their corresponding last + * snapshot ID. This method is called at NameNode startup. + *

+ * A provider implementation that keeps authorization information on per + * snapshot basis can use this call to initialize/re-sync its information with + * the NameNode snapshot-able directories information. + * + * @param snapshotableDirs a map with all the currently snapshot-able + * directories and their corresponding last snapshot ID + */ + public void setSnaphottableDirs(Map + snapshotableDirs) { + } + + /** + * Add a directory as snapshot-able. + *

+ * A provider implementation that keeps authorization information on per + * snapshot basis can use this call to prepare itself for snapshots on the + * specified directory. + * + * @param dir snapshot-able directory to add + */ + public void addSnapshottable(INodeAuthorizationInfo dir) { + } + + /** + * Remove a directory as snapshot-able. + *

+ * A provider implementation that keeps authorization information on per + * snapshot basis can use this call to clean up any snapshot on the + * specified directory. + * + * @param dir snapshot-able directory to remove + */ + public void removeSnapshottable(INodeAuthorizationInfo dir) { + } + + /** + * Create a snapshot for snapshot-able directory. + *

+ * A provider implementation that keeps authorization information on per + * snapshot basis can use this call to perform any snapshot related + * bookkeeping on the specified directory because of the snapshot creation. + * + * @param dir directory to make a snapshot of + * @param snapshotId the snapshot ID to create + */ + public void createSnapshot(INodeAuthorizationInfo dir, int snapshotId) + throws IOException { + } + + /** + * Remove a snapshot for snapshot-able directory. + *

+ * A provider implementation that keeps authorization information on per + * snapshot basis can use this call to perform any snapshot related + * bookkeeping on the specified directory because of the snapshot removal. + * + * @param dir directory to remove a snapshot from + * @param snapshotId the snapshot ID to remove + */ + public void removeSnapshot(INodeAuthorizationInfo dir, int snapshotId) + throws IOException { + } + + /** + * Set the user for an inode. + *

+ * This method is always call within a Filesystem LOCK. + * + * @param node inode + * @param user user name + */ + public abstract void setUser(INodeAuthorizationInfo node, String user); + + /** + * Get the user of an inode. + *

+ * This method is always call within a Filesystem LOCK. + * + * @param node inode + * @param snapshotId snapshot ID of the inode to get the user from + * @return the user of the inode + */ + public abstract String getUser(INodeAuthorizationInfo node, int snapshotId); + + /** + * Set teh group of an inode. + *

+ * This method is always call within a Filesystem LOCK. + * + * @param node inode + * @param group group name + */ + public abstract void setGroup(INodeAuthorizationInfo node, String group); + + /** + * Get the group of an inode. + *

+ * This method is always call within a Filesystem LOCK. + * + * @param node inode + * @param snapshotId snapshot ID of the inode to get the group from + * @return the group of the inode + */ + public abstract String getGroup(INodeAuthorizationInfo node, int snapshotId); + + /** + * Set the permission of an inode. + *

+ * This method is always call within a Filesystem LOCK. + * + * @param node inode + * @param permission the permission to set + */ + public abstract void setPermission(INodeAuthorizationInfo node, + FsPermission permission); + + /** + * Get the permission of an inode. + *

+ * This method is always call within a Filesystem LOCK. + * + * @param node inode + * @param snapshotId snapshot ID of the inode to get the permission from + * @return the permission of the inode + */ + public abstract FsPermission getFsPermission(INodeAuthorizationInfo node, + int snapshotId); + + /** + * Get the ACLs of an inode. + *

+ * This method is always call within a Filesystem LOCK. + * + * @param node inode + * @param snapshotId snapshot ID of the inode to get the ACLs from + * @return the ACLs of the inode + */ + public abstract AclFeature getAclFeature(INodeAuthorizationInfo node, + int snapshotId); + + /** + * Remove the ACLs of an inode. + *

+ * This method is always call within a Filesystem LOCK. + * + * @param node inode + */ + public abstract void removeAclFeature(INodeAuthorizationInfo node); + + /** + * Add ACLs to an inode. + *

+ * This method is always call within a Filesystem LOCK. + * + * @param node inode + * @param f the ACLs of the inode + */ + public abstract void addAclFeature(INodeAuthorizationInfo node, AclFeature f); + + /** + * Check whether current user have permissions to access the path. + * Traverse is always checked. + *

+ * This method is always call within a Filesystem LOCK. + *

+ * Parent path means the parent directory for the path. + * Ancestor path means the last (the closest) existing ancestor directory + * of the path. + *

+ * Note that if the parent path exists, + * then the parent path and the ancestor path are the same. + *

+ * For example, suppose the path is "/foo/bar/baz". + * No matter baz is a file or a directory, + * the parent path is "/foo/bar". + * If bar exists, then the ancestor path is also "/foo/bar". + * If bar does not exist and foo exists, + * then the ancestor path is "/foo". + * Further, if both foo and bar do not exist, + * then the ancestor path is "/". + * + * @param user user ot check permissions against + * @param groups groups of the user to check permissions against + * @param inodes inodes of the path to check permissions + * @param snapshotId snapshot ID to check permissions + * @param doCheckOwner Require user to be the owner of the path? + * @param ancestorAccess The access required by the ancestor of the path. + * @param parentAccess The access required by the parent of the path. + * @param access The access required by the path. + * @param subAccess If path is a directory, + * it is the access required of the path and all the sub-directories. + * If path is not a directory, there is no effect. + * @param ignoreEmptyDir Ignore permission checking for empty directory? + * @throws AccessControlException + * @throws UnresolvedLinkException + */ + public abstract void checkPermission(String user, Set groups, + INodeAuthorizationInfo[] inodes, int snapshotId, + boolean doCheckOwner, FsAction ancestorAccess, FsAction parentAccess, + FsAction access, FsAction subAccess, boolean ignoreEmptyDir) + throws AccessControlException, UnresolvedLinkException; + +} diff --git a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationConstants.java b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationConstants.java index cf33b8b34..ea1514cdb 100644 --- a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationConstants.java +++ b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationConstants.java @@ -31,7 +31,7 @@ public class SentryAuthorizationConstants { public static final String HDFS_PERMISSION_KEY = CONFIG_PREFIX + "hdfs-permission"; - public static final long HDFS_PERMISSION_DEFAULT = 0770; + public static final long HDFS_PERMISSION_DEFAULT = 771; public static final String HDFS_PATH_PREFIXES_KEY = CONFIG_PREFIX + "hdfs-path-prefixes"; diff --git a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationInfo.java b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationInfo.java index f9a1f65bd..c2416c1cf 100644 --- a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationInfo.java +++ b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationInfo.java @@ -17,9 +17,7 @@ */ package org.apache.sentry.hdfs; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; +import java.util.*; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ThreadFactory; @@ -31,14 +29,12 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.util.StringUtils; -import org.apache.sentry.hdfs.SentryHDFSServiceClient.SentryAuthzUpdate; import org.apache.sentry.hdfs.Updateable.Update; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; public class SentryAuthorizationInfo implements Runnable { private static Logger LOG = @@ -127,6 +123,7 @@ UpdateableAuthzPermissions getAuthzPermissions() { } private boolean update() { + //Looks like getting same updates multiple times SentryAuthzUpdate updates = updater.getUpdates(); // Updates can be null if Sentry Service is un-reachable if (updates != null) { @@ -136,14 +133,14 @@ private boolean update() { updates.getPermUpdates(), authzPermissions); // If there were any FULL updates the returned instance would be // different - if ((newAuthzPaths != authzPaths)||(newAuthzPerms != authzPermissions)) { + if (newAuthzPaths != authzPaths || newAuthzPerms != authzPermissions) { lock.writeLock().lock(); try { - LOG.warn("FULL Updated paths seq Num [old=" + LOG.debug("FULL Updated paths seq Num [old=" + authzPaths.getLastUpdatedSeqNum() + "], [new=" + newAuthzPaths.getLastUpdatedSeqNum() + "]"); authzPaths = newAuthzPaths; - LOG.warn("FULL Updated perms seq Num [old=" + LOG.debug("FULL Updated perms seq Num [old=" + authzPermissions.getLastUpdatedSeqNum() + "], [new=" + newAuthzPerms.getLastUpdatedSeqNum() + "]"); authzPermissions = newAuthzPerms; @@ -162,20 +159,20 @@ private > V processUpdates(List upd // one in the List.. all the remaining will be partial updates if (updates.size() > 0) { if (updates.get(0).hasFullImage()) { - LOG.warn("Process Update : FULL IMAGE " + LOG.debug("Process Update : FULL IMAGE " + "[" + updateable.getClass() + "]" + "[" + updates.get(0).getSeqNum() + "]"); updateable = (V)updateable.updateFull(updates.remove(0)); } // Any more elements ? if (!updates.isEmpty()) { - LOG.warn("Process Update : More updates.. " + LOG.debug("Process Update : More updates.. " + "[" + updateable.getClass() + "]" + "[" + updateable.getLastUpdatedSeqNum() + "]" + "[" + updates.size() + "]"); updateable.updatePartial(updates, lock); } - LOG.warn("Process Update : Finished updates.. " + LOG.debug("Process Update : Finished updates.. " + "[" + updateable.getClass() + "]" + "[" + updateable.getLastUpdatedSeqNum() + "]"); } @@ -208,7 +205,7 @@ public void run() { } public void start() { - if ((authzPaths != null)||(authzPermissions != null)) { + if (authzPaths != null || authzPermissions != null) { boolean success = false; try { success = update(); @@ -253,7 +250,7 @@ public boolean isStale() { return stale; } - public boolean isManaged(String[] pathElements) { + public boolean isUnderPrefix(String[] pathElements) { lock.readLock().lock(); try { return authzPaths.isUnderPrefix(pathElements); @@ -262,6 +259,11 @@ public boolean isManaged(String[] pathElements) { } } + @Deprecated + public boolean isManaged(String[] pathElements) { + return isUnderPrefix(pathElements); + } + public boolean doesBelongToAuthzObject(String[] pathElements) { lock.readLock().lock(); try { @@ -271,17 +273,38 @@ public boolean doesBelongToAuthzObject(String[] pathElements) { } } + public boolean isSentryManaged(final String[] pathElements) { + lock.readLock().lock(); + try { + return authzPaths.isUnderPrefix(pathElements) && + authzPaths.findAuthzObject(pathElements) != null; + } finally { + lock.readLock().unlock(); + } + } + @SuppressWarnings("unchecked") public List getAclEntries(String[] pathElements) { lock.readLock().lock(); try { - String authzObj = authzPaths.findAuthzObject(pathElements); + Set authzObjs = authzPaths.findAuthzObject(pathElements); // Apparently setFAcl throws error if 'group::---' is not present AclEntry noGroup = AclEntry.parseAclEntry("group::---", true); - ArrayList retList = Lists.newArrayList(noGroup); - retList.addAll((authzObj != null) ? authzPermissions.getAcls(authzObj) - : Collections.EMPTY_LIST); - return retList; + + Set retSet = new HashSet(); + retSet.add(noGroup); + + if (authzObjs == null) { + retSet.addAll(Collections.EMPTY_LIST); + return new ArrayList(retSet); + } + + // No duplicate acls should be added. + for (String authzObj: authzObjs) { + retSet.addAll(authzPermissions.getAcls(authzObj)); + } + + return new ArrayList(retSet); } finally { lock.readLock().unlock(); } diff --git a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationProvider.java b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationProvider.java index 001da6554..c701723aa 100644 --- a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationProvider.java +++ b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationProvider.java @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -38,7 +37,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.namenode.AclFeature; import org.apache.hadoop.hdfs.server.namenode.AuthorizationProvider; -import org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider; +import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.security.AccessControlException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -46,16 +45,16 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; -public class SentryAuthorizationProvider +public class SentryAuthorizationProvider extends AuthorizationProvider implements Configurable { - + static class SentryAclFeature extends AclFeature { public SentryAclFeature(ImmutableList entries) { super(entries); } } - private static Logger LOG = + private static Logger LOG = LoggerFactory.getLogger(SentryAuthorizationProvider.class); private boolean started; @@ -67,6 +66,9 @@ public SentryAclFeature(ImmutableList entries) { private boolean originalAuthzAsAcl; private SentryAuthorizationInfo authzInfo; + private static String WARN_VISIBILITY = + " The result won't be visible when the path is managed by Sentry"; + public SentryAuthorizationProvider() { this(null); } @@ -75,7 +77,7 @@ public SentryAuthorizationProvider() { SentryAuthorizationProvider(SentryAuthorizationInfo authzInfo) { this.authzInfo = authzInfo; } - + @Override public void setConf(Configuration conf) { this.conf = conf; @@ -97,7 +99,7 @@ public synchronized void start() { throw new RuntimeException("HDFS ACLs must be enabled"); } - defaultAuthzProvider = new DefaultAuthorizationProvider(); + defaultAuthzProvider = AuthorizationProvider.get(); defaultAuthzProvider.start(); // Configuration is read from hdfs-sentry.xml and NN configuration, in // that order of precedence. @@ -177,7 +179,7 @@ public void checkPermission(String user, Set groups, } private static final String[] EMPTY_STRING_ARRAY = new String[0]; - + private String[] getPathElements(INodeAuthorizationInfo node) { return getPathElements(node, 0); } @@ -186,7 +188,7 @@ private String[] getPathElements(INodeAuthorizationInfo node, int idx) { String[] paths; INodeAuthorizationInfo parent = node.getParent(); if (parent == null) { - paths = (idx > 0) ? new String[idx] : EMPTY_STRING_ARRAY; + paths = idx > 0 ? new String[idx] : EMPTY_STRING_ARRAY; } else { paths = getPathElements(parent, idx + 1); paths[paths.length - 1 - idx] = node.getLocalName(); @@ -194,59 +196,60 @@ private String[] getPathElements(INodeAuthorizationInfo node, int idx) { return paths; } + private boolean isSentryManaged(final String[] pathElements) { + return authzInfo.isSentryManaged(pathElements); + } + + private boolean isSentryManaged(INodeAuthorizationInfo node) { + String[] pathElements = getPathElements(node); + return isSentryManaged(pathElements); + } + @Override public void setUser(INodeAuthorizationInfo node, String user) { + // always fall through to defaultAuthZProvider, + // issue warning when the path is sentry managed + if (isSentryManaged(node)) { + LOG.warn("### setUser {} (sentry managed path) to {}, update HDFS." + + WARN_VISIBILITY, + node.getFullPathName(), user); + } defaultAuthzProvider.setUser(node, user); } @Override public String getUser(INodeAuthorizationInfo node, int snapshotId) { - String user; - String[] pathElements = getPathElements(node); - if (!authzInfo.isManaged(pathElements)) { - user = defaultAuthzProvider.getUser(node, snapshotId); - } else { - if (!authzInfo.isStale()) { - if (authzInfo.doesBelongToAuthzObject(pathElements)) { - user = this.user; - } else { - user = defaultAuthzProvider.getUser(node, snapshotId); - } - } else { - user = this.user; - } - } - return user; + return isSentryManaged(node)? + this.user : defaultAuthzProvider.getUser(node, snapshotId); } @Override public void setGroup(INodeAuthorizationInfo node, String group) { + // always fall through to defaultAuthZProvider, + // issue warning when the path is sentry managed + if (isSentryManaged(node)) { + LOG.warn("### setGroup {} (sentry managed path) to {}, update HDFS." + + WARN_VISIBILITY, + node.getFullPathName(), group); + } defaultAuthzProvider.setGroup(node, group); } @Override public String getGroup(INodeAuthorizationInfo node, int snapshotId) { - String group; - String[] pathElements = getPathElements(node); - if (!authzInfo.isManaged(pathElements)) { - group = getDefaultProviderGroup(node, snapshotId); - } else { - if (!authzInfo.isStale()) { - if (authzInfo.doesBelongToAuthzObject(pathElements)) { - group = this.group; - } else { - group = getDefaultProviderGroup(node, snapshotId); - } - } else { - group = this.group; - } - } - return group; + return isSentryManaged(node)? + this.group : defaultAuthzProvider.getGroup(node, snapshotId); } @Override - public void setPermission(INodeAuthorizationInfo node, - FsPermission permission) { + public void setPermission(INodeAuthorizationInfo node, FsPermission permission) { + // always fall through to defaultAuthZProvider, + // issue warning when the path is sentry managed + if (isSentryManaged(node)) { + LOG.warn("### setPermission {} (sentry managed path) to {}, update HDFS." + + WARN_VISIBILITY, + node.getFullPathName(), permission.toString()); + } defaultAuthzProvider.setPermission(node, permission); } @@ -255,7 +258,7 @@ public FsPermission getFsPermission( INodeAuthorizationInfo node, int snapshotId) { FsPermission permission; String[] pathElements = getPathElements(node); - if (!authzInfo.isManaged(pathElements)) { + if (!isSentryManaged(pathElements)) { permission = defaultAuthzProvider.getFsPermission(node, snapshotId); } else { FsPermission returnPerm = this.permission; @@ -270,15 +273,7 @@ public FsPermission getFsPermission( break; } } - if (!authzInfo.isStale()) { - if (authzInfo.doesBelongToAuthzObject(pathElements)) { - permission = returnPerm; - } else { - permission = defaultAuthzProvider.getFsPermission(node, snapshotId); - } - } else { - permission = returnPerm; - } + permission = returnPerm; } return permission; } @@ -301,21 +296,33 @@ private List createAclEntries(String user, String group, builder.setName(null); return list; } - + /* + Returns hadoop acls if + - Not managed + - Not stale and not an auth obj + Returns hive:hive + - If stale + Returns sentry acls + - Otherwise, if not stale and auth obj + */ @Override public AclFeature getAclFeature(INodeAuthorizationInfo node, int snapshotId) { AclFeature f = null; String[] pathElements = getPathElements(node); String p = Arrays.toString(pathElements); - boolean isManaged = false; + boolean isPrefixed = false; boolean isStale = false; boolean hasAuthzObj = false; Map aclMap = null; - if (!authzInfo.isManaged(pathElements)) { - isManaged = false; + if (!authzInfo.isUnderPrefix(pathElements)) { + isPrefixed = false; + f = defaultAuthzProvider.getAclFeature(node, snapshotId); + } else if (!authzInfo.doesBelongToAuthzObject(pathElements)) { + isPrefixed = true; f = defaultAuthzProvider.getAclFeature(node, snapshotId); } else { - isManaged = true; + isPrefixed = true; + hasAuthzObj = true; aclMap = new HashMap(); if (originalAuthzAsAcl) { String user = defaultAuthzProvider.getUser(node, snapshotId); @@ -326,24 +333,18 @@ public AclFeature getAclFeature(INodeAuthorizationInfo node, int snapshotId) { addToACLMap(aclMap, createAclEntries(this.user, this.group, this.permission)); } - if (!authzInfo.isStale()) { + if (!authzInfo.isStale()) { isStale = false; - if (authzInfo.doesBelongToAuthzObject(pathElements)) { - hasAuthzObj = true; - addToACLMap(aclMap, authzInfo.getAclEntries(pathElements)); - f = new SentryAclFeature(ImmutableList.copyOf(aclMap.values())); - } else { - hasAuthzObj = false; - f = defaultAuthzProvider.getAclFeature(node, snapshotId); - } + addToACLMap(aclMap, authzInfo.getAclEntries(pathElements)); + f = new SentryAclFeature(ImmutableList.copyOf(aclMap.values())); } else { isStale = true; f = new SentryAclFeature(ImmutableList.copyOf(aclMap.values())); } } if (LOG.isDebugEnabled()) { - LOG.debug("### getAclEntry \n[" + (p == null ? "null" : p) + "] : [" - + "isManaged=" + isManaged + LOG.debug("### getAclEntry \n[" + p + "] : [" + + "isPreifxed=" + isPrefixed + ", isStale=" + isStale + ", hasAuthzObj=" + hasAuthzObj + ", origAuthzAsAcl=" + originalAuthzAsAcl + "]\n" @@ -384,20 +385,55 @@ private String getDefaultProviderGroup(INodeAuthorizationInfo node, return group; } + /* + * Check if the given node has ACL, remove the ACL if so. Issue a warning + * message when the node doesn't have ACL and warn is true. + * TODO: We need this to maintain backward compatibility (not throw error in + * some cases). We may remove this when we release sentry major version. + */ + private void checkAndRemoveHdfsAcl(INodeAuthorizationInfo node, + boolean warn) { + AclFeature f = defaultAuthzProvider.getAclFeature(node, + Snapshot.CURRENT_STATE_ID); + if (f != null) { + defaultAuthzProvider.removeAclFeature(node); + } else { + if (warn) { + LOG.warn("### removeAclFeature is requested on {}, but it does not " + + "have any acl.", node); + } + } + } + @Override public void removeAclFeature(INodeAuthorizationInfo node) { - AclFeature aclFeature = node.getAclFeature(CURRENT_STATE_ID); - if (aclFeature.getClass() != SentryAclFeature.class) { + // always fall through to defaultAuthZProvider, + // issue warning when the path is sentry managed + if (isSentryManaged(node)) { + LOG.warn("### removeAclFeature {} (sentry managed path), update HDFS." + + WARN_VISIBILITY, + node.getFullPathName()); + // For Sentry-managed paths, client code may try to remove a + // non-existing ACL, ignore the request with a warning if the ACL + // doesn't exist + checkAndRemoveHdfsAcl(node, true); + } else { defaultAuthzProvider.removeAclFeature(node); } } @Override public void addAclFeature(INodeAuthorizationInfo node, AclFeature f) { - String[] pathElements = getPathElements(node); - if (!authzInfo.isManaged(pathElements)) { - defaultAuthzProvider.addAclFeature(node, f); + // always fall through to defaultAuthZProvider, + // issue warning when the path is sentry managed + if (isSentryManaged(node)) { + LOG.warn("### addAclFeature {} (sentry managed path) {}, update HDFS." + + WARN_VISIBILITY, + node.getFullPathName(), f.toString()); + // For Sentry-managed path, remove ACL silently before adding new ACL + checkAndRemoveHdfsAcl(node, false); } + defaultAuthzProvider.addAclFeature(node, f); } } diff --git a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryPermissions.java b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryPermissions.java index b9d1d70e5..c01ff686a 100644 --- a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryPermissions.java +++ b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryPermissions.java @@ -17,13 +17,7 @@ */ package org.apache.sentry.hdfs; -import java.util.Collection; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; +import java.util.*; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntryScope; @@ -79,17 +73,21 @@ public Set getAllGroups() { } } - private final Map privileges = new HashMap(); - private final Map roles = new HashMap(); - private Map> authzObjChildren = new HashMap>(); + // Comparison of authorizable object should be case insensitive. + private final Map privileges = new TreeMap(String.CASE_INSENSITIVE_ORDER); + private Map> authzObjChildren = new TreeMap>(String.CASE_INSENSITIVE_ORDER); + + // RoleInfo should be case insensitive. + private final Map roles = new TreeMap(String.CASE_INSENSITIVE_ORDER); String getParentAuthzObject(String authzObject) { - int dot = authzObject.indexOf('.'); - if (dot > 0) { - return authzObject.substring(0, dot); - } else { - return authzObject; + if (authzObject != null) { + int dot = authzObject.indexOf('.'); + if (dot > 0) { + return authzObject.substring(0, dot); + } } + return authzObject; } void addParentChildMappings(String authzObject) { @@ -118,10 +116,14 @@ void removeParentChildMappings(String authzObject) { } private Map getGroupPerms(String authzObj) { - Map groupPerms = new HashMap(); - if (authzObj == null) { - return groupPerms; + Map groupPerms; + String parent = getParentAuthzObject(authzObj); + if (parent == null || parent.equals(authzObj)) { + groupPerms = new HashMap(); + } else { + groupPerms = getGroupPerms(parent); } + PrivilegeInfo privilegeInfo = privileges.get(authzObj); if (privilegeInfo != null) { for (Map.Entry privs : privilegeInfo @@ -135,16 +137,6 @@ private Map getGroupPerms(String authzObj) { @Override public List getAcls(String authzObj) { Map groupPerms = getGroupPerms(authzObj); - String parent = getParentAuthzObject(authzObj); - Map pGroupPerms = null; - if (parent == null) { - pGroupPerms = new HashMap(); - } else { - pGroupPerms = getGroupPerms(getParentAuthzObject(authzObj)); - if ((groupPerms == null)||(groupPerms.size() == 0)) { - groupPerms = pGroupPerms; - } - } List retList = new LinkedList(); for (Map.Entry groupPerm : groupPerms.entrySet()) { AclEntry.Builder builder = new AclEntry.Builder(); @@ -152,12 +144,8 @@ public List getAcls(String authzObj) { builder.setType(AclEntryType.GROUP); builder.setScope(AclEntryScope.ACCESS); FsAction action = groupPerm.getValue(); - FsAction pAction = pGroupPerms.get(groupPerm.getKey()); - if (pAction != null) { - action = action.or(pAction); - } - if ((action == FsAction.READ) || (action == FsAction.WRITE) - || (action == FsAction.READ_WRITE)) { + if (action == FsAction.READ || action == FsAction.WRITE + || action == FsAction.READ_WRITE) { action = action.or(FsAction.EXECUTE); } builder.setPermission(action); diff --git a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryUpdater.java b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryUpdater.java index 954039702..88be3f56a 100644 --- a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryUpdater.java +++ b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryUpdater.java @@ -18,7 +18,6 @@ package org.apache.sentry.hdfs; import org.apache.hadoop.conf.Configuration; -import org.apache.sentry.hdfs.SentryHDFSServiceClient.SentryAuthzUpdate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,7 +37,7 @@ public SentryUpdater(Configuration conf, SentryAuthorizationInfo authzInfo) thro public SentryAuthzUpdate getUpdates() { if (sentryClient == null) { try { - sentryClient = new SentryHDFSServiceClient(conf); + sentryClient = SentryHDFSServiceClientFactory.create(conf); } catch (Exception e) { LOG.error("Error connecting to Sentry ['{}'] !!", e.getMessage()); diff --git a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/UpdateableAuthzPermissions.java b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/UpdateableAuthzPermissions.java index aa7836011..33581b723 100644 --- a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/UpdateableAuthzPermissions.java +++ b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/UpdateableAuthzPermissions.java @@ -17,7 +17,6 @@ */ package org.apache.sentry.hdfs; -import java.util.Collection; import java.util.HashMap; import java.util.LinkedList; import java.util.List; diff --git a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/java/org/apache/sentry/hdfs/SentryAuthorizationInfoX.java b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/java/org/apache/sentry/hdfs/SentryAuthorizationInfoX.java index 4cebed2ef..b31a1ccd0 100644 --- a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/java/org/apache/sentry/hdfs/SentryAuthorizationInfoX.java +++ b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/java/org/apache/sentry/hdfs/SentryAuthorizationInfoX.java @@ -29,6 +29,7 @@ public class SentryAuthorizationInfoX extends SentryAuthorizationInfo { public SentryAuthorizationInfoX() { super(new String[]{"/user/authz"}); + System.setProperty("test.stale", "false"); } @Override @@ -48,7 +49,8 @@ public void stop() { @Override public boolean isStale() { - return false; + String stale = System.getProperty("test.stale"); + return stale.equalsIgnoreCase("true"); } private static final String[] MANAGED = {"user", "authz"}; @@ -65,7 +67,7 @@ private boolean hasPrefix(String[] prefix, String[] pathElement) { } @Override - public boolean isManaged(String[] pathElements) { + public boolean isUnderPrefix(String[] pathElements) { return hasPrefix(MANAGED, pathElements); } @@ -74,6 +76,11 @@ public boolean doesBelongToAuthzObject(String[] pathElements) { return hasPrefix(AUTHZ_OBJ, pathElements); } + @Override + public boolean isSentryManaged(final String[] pathElements) { + return isUnderPrefix(pathElements) && doesBelongToAuthzObject(pathElements); + } + @Override public List getAclEntries(String[] pathElements) { AclEntry acl = new AclEntry.Builder().setType(AclEntryType.USER). diff --git a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/java/org/apache/sentry/hdfs/TestSentryAuthorizationProvider.java b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/java/org/apache/sentry/hdfs/TestSentryAuthorizationProvider.java index 767c8f60e..5da0dc2fb 100644 --- a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/java/org/apache/sentry/hdfs/TestSentryAuthorizationProvider.java +++ b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/java/org/apache/sentry/hdfs/TestSentryAuthorizationProvider.java @@ -44,6 +44,9 @@ public class TestSentryAuthorizationProvider { + private static final String DFS_NAMENODE_AUTHORIZATION_PROVIDER_KEY = + "dfs.namenode.authorization.provider.class"; + private MiniDFSCluster miniDFS; private UserGroupInformation admin; @@ -57,7 +60,7 @@ public Void run() throws Exception { System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, "target/test/data"); Configuration conf = new HdfsConfiguration(); conf.setBoolean("sentry.authorization-provider.include-hdfs-authz-as-acl", true); - conf.set(DFSConfigKeys.DFS_NAMENODE_AUTHORIZATION_PROVIDER_KEY, + conf.set(DFS_NAMENODE_AUTHORIZATION_PROVIDER_KEY, MockSentryAuthorizationProvider.class.getName()); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); EditLogFileOutputStream.setShouldSkipFsyncForTesting(true); @@ -130,7 +133,7 @@ public Void run() throws Exception { path = new Path("/user/authz/obj"); Assert.assertEquals("hive", fs.getFileStatus(path).getOwner()); Assert.assertEquals("hive", fs.getFileStatus(path).getGroup()); - Assert.assertEquals(new FsPermission((short) 0770), fs.getFileStatus(path).getPermission()); + Assert.assertEquals(new FsPermission((short) 0771), fs.getFileStatus(path).getPermission()); Assert.assertFalse(fs.getAclStatus(path).getEntries().isEmpty()); List acls = new ArrayList(); @@ -143,7 +146,7 @@ public Void run() throws Exception { path = new Path("/user/authz/obj/xxx"); Assert.assertEquals("hive", fs.getFileStatus(path).getOwner()); Assert.assertEquals("hive", fs.getFileStatus(path).getGroup()); - Assert.assertEquals(new FsPermission((short) 0770), fs.getFileStatus(path).getPermission()); + Assert.assertEquals(new FsPermission((short) 0771), fs.getFileStatus(path).getPermission()); Assert.assertFalse(fs.getAclStatus(path).getEntries().isEmpty()); Path path2 = new Path("/user/authz/obj/path2"); @@ -156,6 +159,60 @@ public Void run() throws Exception { Assert.assertEquals("supergroup", fs.getFileStatus(path).getGroup()); Assert.assertEquals(new FsPermission((short) 0755), fs.getFileStatus(path).getPermission()); Assert.assertTrue(fs.getAclStatus(path).getEntries().isEmpty()); + + //stale and dir inside of prefix, obj + System.setProperty("test.stale", "true"); + path = new Path("/user/authz/xxx"); + status = fs.getFileStatus(path); + Assert.assertEquals(sysUser, status.getOwner()); + Assert.assertEquals("supergroup", status.getGroup()); + Assert.assertEquals(new FsPermission((short) 0755), status.getPermission()); + Assert.assertTrue(fs.getAclStatus(path).getEntries().isEmpty()); + + // setPermission sets the permission for dir outside of prefix. + // setUser/setGroup sets the user/group for dir outside of prefix. + Path pathOutside = new Path("/user/xxx"); + + fs.setPermission(pathOutside, new FsPermission((short) 0000)); + Assert.assertEquals(new FsPermission((short) 0000), fs.getFileStatus(pathOutside).getPermission()); + fs.setOwner(pathOutside, sysUser, "supergroup"); + Assert.assertEquals(sysUser, fs.getFileStatus(pathOutside).getOwner()); + Assert.assertEquals("supergroup", fs.getFileStatus(pathOutside).getGroup()); + + // removeAcl removes the ACL entries for dir outside of prefix. + List aclsOutside = new ArrayList(baseAclList); + List acl = new ArrayList(); + acl.add(new AclEntry.Builder().setName("supergroup").setType(AclEntryType.GROUP).setScope(AclEntryScope.ACCESS). + setPermission(FsAction.READ_EXECUTE).build()); + aclsOutside.addAll(acl); + fs.setAcl(pathOutside, aclsOutside); + fs.removeAclEntries(pathOutside, acl); + Assert.assertFalse(fs.getAclStatus(pathOutside).getEntries().containsAll(acl)); + + // setPermission sets the permission for dir inside of prefix but not a hive obj. + // setUser/setGroup sets the user/group for dir inside of prefix but not a hive obj. + Path pathInside = new Path("/user/authz/xxx"); + + fs.setPermission(pathInside, new FsPermission((short) 0000)); + Assert.assertEquals(new FsPermission((short) 0000), fs.getFileStatus(pathInside).getPermission()); + fs.setOwner(pathInside, sysUser, "supergroup"); + Assert.assertEquals(sysUser, fs.getFileStatus(pathInside).getOwner()); + Assert.assertEquals("supergroup", fs.getFileStatus(pathInside).getGroup()); + + // removeAcl is a no op for dir inside of prefix. + Assert.assertTrue(fs.getAclStatus(pathInside).getEntries().isEmpty()); + fs.removeAclEntries(pathInside, acl); + Assert.assertTrue(fs.getAclStatus(pathInside).getEntries().isEmpty()); + + // setPermission/setUser/setGroup is a no op for dir inside of prefix, and is a hive obj. + Path pathInsideAndHive = new Path("/user/authz/obj"); + + fs.setPermission(pathInsideAndHive, new FsPermission((short) 0000)); + Assert.assertEquals(new FsPermission((short) 0771), fs.getFileStatus(pathInsideAndHive).getPermission()); + fs.setOwner(pathInsideAndHive, sysUser, "supergroup"); + Assert.assertEquals("hive", fs.getFileStatus(pathInsideAndHive).getOwner()); + Assert.assertEquals("hive", fs.getFileStatus(pathInsideAndHive).getGroup()); + return null; } }); diff --git a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/java/org/apache/sentry/hdfs/TestSentryPermissions.java b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/java/org/apache/sentry/hdfs/TestSentryPermissions.java new file mode 100644 index 000000000..dbce40538 --- /dev/null +++ b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/java/org/apache/sentry/hdfs/TestSentryPermissions.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +package org.apache.sentry.hdfs; + +import org.junit.Assert; +import org.junit.Test; + +/** + * Test suits for components inside SentryPermissions. + */ +public class TestSentryPermissions { + + @Test + public void testRoleInfoCaseInsensitive() { + SentryPermissions perm = new SentryPermissions(); + SentryPermissions.RoleInfo roleInfo = new SentryPermissions.RoleInfo("Admin"); + perm.addRoleInfo(roleInfo); + + // RoleInfo is case insensitive. + Assert.assertNotNull(perm.getRoleInfo("admin")); + Assert.assertNull(perm.getRoleInfo("doesNotExist")); + } +} diff --git a/sentry-hdfs/sentry-hdfs-service/.gitignore b/sentry-hdfs/sentry-hdfs-service/.gitignore deleted file mode 100644 index 91ad75bb4..000000000 --- a/sentry-hdfs/sentry-hdfs-service/.gitignore +++ /dev/null @@ -1,18 +0,0 @@ -*.class -target/ -.classpath -.project -.settings -.metadata -.idea/ -*.iml -derby.log -datanucleus.log -sentry-core/sentry-core-common/src/gen -**/TempStatsStore/ -# Package Files # -*.jar -*.war -*.ear -test-output/ -maven-repo/ diff --git a/sentry-hdfs/sentry-hdfs-service/pom.xml b/sentry-hdfs/sentry-hdfs-service/pom.xml index 6b84733f0..855368560 100644 --- a/sentry-hdfs/sentry-hdfs-service/pom.xml +++ b/sentry-hdfs/sentry-hdfs-service/pom.xml @@ -21,17 +21,22 @@ limitations under the License. org.apache.sentry sentry-hdfs - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-hdfs-service - Sentry HDFS service + Sentry HDFS Service org.apache.sentry sentry-binding-hive + + org.mockito + mockito-all + test + org.apache.hadoop hadoop-common diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/ExtendedMetastoreClient.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/ExtendedMetastoreClient.java deleted file mode 100644 index e7677f252..000000000 --- a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/ExtendedMetastoreClient.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.sentry.hdfs; - -import java.util.ArrayList; -import java.util.LinkedList; -import java.util.List; - -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.Table; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Implementation of {@link MetastoreClient} - * - */ -public class ExtendedMetastoreClient implements MetastoreClient { - - private static Logger LOG = LoggerFactory.getLogger(ExtendedMetastoreClient.class); - - private volatile HiveMetaStoreClient client; - private final HiveConf hiveConf; - public ExtendedMetastoreClient(HiveConf hiveConf) { - this.hiveConf = hiveConf; - } - - @Override - public List getAllDatabases() { - List retList = new ArrayList(); - HiveMetaStoreClient client = getClient(); - if (client != null) { - try { - for (String dbName : client.getAllDatabases()) { - retList.add(client.getDatabase(dbName)); - } - } catch (Exception e) { - LOG.error("Could not get All Databases !!", e); - } - } - return retList; - } - - @Override - public List

getAllTablesOfDatabase(Database db) { - List
retList = new ArrayList
(); - HiveMetaStoreClient client = getClient(); - if (client != null) { - try { - for (String tblName : client.getAllTables(db.getName())) { - retList.add(client.getTable(db.getName(), tblName)); - } - } catch (Exception e) { - LOG.error(String.format( - "Could not get Tables for '%s' !!", db.getName()), e); - } - } - return retList; - } - - @Override - public List listAllPartitions(Database db, Table tbl) { - HiveMetaStoreClient client = getClient(); - if (client != null) { - try { - return client.listPartitions(db.getName(), tbl.getTableName(), Short.MAX_VALUE); - } catch (Exception e) { - LOG.error(String.format( - "Could not get partitions for '%s'.'%s' !!", db.getName(), - tbl.getTableName()), e); - } - } - return new LinkedList(); - } - - private HiveMetaStoreClient getClient() { - if (client == null) { - try { - client = new HiveMetaStoreClient(hiveConf); - return client; - } catch (MetaException e) { - client = null; - LOG.error("Could not create metastore client !!", e); - return null; - } - } else { - return client; - } - } -} diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/MetastoreCacheInitializer.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/MetastoreCacheInitializer.java new file mode 100644 index 000000000..7a1959405 --- /dev/null +++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/MetastoreCacheInitializer.java @@ -0,0 +1,347 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.hdfs; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.IHMSHandler; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.sentry.hdfs.service.thrift.TPathChanges; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +class MetastoreCacheInitializer implements Closeable { + + private static final Logger LOGGER = LoggerFactory.getLogger + (MetastoreCacheInitializer.class); + + final static class CallResult { + final private Exception failure; + final private boolean successStatus; + + CallResult(Exception ex, boolean successStatus) { + failure = ex; + this.successStatus = successStatus; + } + + public boolean getSuccessStatus() { + return successStatus; + } + + public Exception getFailure() { + return failure; + } + } + + abstract class BaseTask implements Callable { + + /** + * Class represents retry strategy for BaseTask. + */ + private class RetryStrategy { + private int retryStrategyMaxRetries = 0; + private int retryStrategyWaitDurationMillis; + private int retries; + private Exception exception; + + private RetryStrategy(int retryStrategyMaxRetries, int retryStrategyWaitDurationMillis) { + this.retryStrategyMaxRetries = retryStrategyMaxRetries; + retries = 0; + + // Assign default wait duration if negative value is provided. + if (retryStrategyWaitDurationMillis > 0) { + this.retryStrategyWaitDurationMillis = retryStrategyWaitDurationMillis; + } else { + this.retryStrategyWaitDurationMillis = 1000; + } + } + + public CallResult exec() { + + // Retry logic is happening inside callable/task to avoid + // synchronous waiting on getting the result. + // Retry the failure task until reach the max retry number. + // Wait configurable duration for next retry. + for (int i = 0; i < retryStrategyMaxRetries; i++) { + try { + doTask(); + + // Task succeeds, reset the exception and return + // the successful flag. + exception = null; + return new CallResult(exception, true); + } catch (Exception ex) { + LOGGER.debug("Failed to execute task on " + (i + 1) + " attempts." + + " Sleeping for " + retryStrategyWaitDurationMillis + " ms. Exception: " + ex.toString(), ex); + exception = ex; + + try { + Thread.sleep(retryStrategyWaitDurationMillis); + } catch (InterruptedException exception) { + // Skip the rest retries if get InterruptedException. + // And set the corresponding retries number. + retries = i; + i = retryStrategyMaxRetries; + } + } + + retries = i; + } + + // Task fails, return the failure flag. + LOGGER.error("Task did not complete successfully after " + retries + + " tries. Exception got: " + exception.toString()); + return new CallResult(exception, false); + } + } + + private RetryStrategy retryStrategy; + + BaseTask() { + taskCounter.incrementAndGet(); + retryStrategy = new RetryStrategy(maxRetries, waitDurationMillis); + } + + @Override + public CallResult call() throws Exception { + CallResult callResult = retryStrategy.exec(); + taskCounter.decrementAndGet(); + return callResult; + } + + abstract void doTask() throws Exception; + } + + class PartitionTask extends BaseTask { + private final String dbName; + private final String tblName; + private final List partNames; + private final TPathChanges tblPathChange; + + PartitionTask(String dbName, String tblName, List partNames, + TPathChanges tblPathChange) { + super(); + this.dbName = dbName; + this.tblName = tblName; + this.partNames = partNames; + this.tblPathChange = tblPathChange; + } + + @Override + public void doTask() throws Exception { + List tblParts = + hmsHandler.get_partitions_by_names(dbName, tblName, partNames); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("#### Fetching partitions " + + "[" + dbName + "." + tblName + "]" + "[" + partNames + "]"); + } + for (Partition part : tblParts) { + List partPath = PathsUpdate.parsePath(part.getSd() + .getLocation()); + if (partPath != null) { + synchronized (tblPathChange) { + tblPathChange.addToAddPaths(partPath); + } + } + } + } + } + + class TableTask extends BaseTask { + private final Database db; + private final List tableNames; + private final PathsUpdate update; + + TableTask(Database db, List tableNames, PathsUpdate update) { + super(); + this.db = db; + this.tableNames = tableNames; + this.update = update; + } + + @Override + public void doTask() throws Exception { + List

tables = + hmsHandler.get_table_objects_by_name(db.getName(), tableNames); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("#### Fetching tables [" + db.getName() + "][" + + tableNames + "]"); + } + for (Table tbl : tables) { + TPathChanges tblPathChange; + // Table names are case insensitive + String tableName = tbl.getTableName().toLowerCase(); + synchronized (update) { + Preconditions.checkArgument(tbl.getDbName().equalsIgnoreCase(db.getName())); + tblPathChange = update.newPathChange(db.getName() + "." + tableName); + } + if (tbl.getSd().getLocation() != null) { + List tblPath = + PathsUpdate.parsePath(tbl.getSd().getLocation()); + if (tblPath != null) { + tblPathChange.addToAddPaths(tblPath); + } + List tblPartNames = + hmsHandler.get_partition_names(db.getName(), tableName, (short) -1); + for (int i = 0; i < tblPartNames.size(); i += maxPartitionsPerCall) { + List partsToFetch = + tblPartNames.subList(i, Math.min( + i + maxPartitionsPerCall, tblPartNames.size())); + Callable partTask = + new PartitionTask(db.getName(), tableName, + partsToFetch, tblPathChange); + synchronized (results) { + results.add(threadPool.submit(partTask)); + } + } + } + } + } + } + + class DbTask extends BaseTask { + + private final PathsUpdate update; + private final String dbName; + + DbTask(PathsUpdate update, String dbName) { + super(); + this.update = update; + //Database names are case insensitive + this.dbName = dbName.toLowerCase(); + } + + @Override + public void doTask() throws Exception { + Database db = hmsHandler.get_database(dbName); + List dbPath = PathsUpdate.parsePath(db.getLocationUri()); + if (dbPath != null) { + synchronized (update) { + Preconditions.checkArgument(dbName.equalsIgnoreCase(db.getName())); + update.newPathChange(dbName).addToAddPaths(dbPath); + } + } + List allTblStr = hmsHandler.get_all_tables(dbName); + for (int i = 0; i < allTblStr.size(); i += maxTablesPerCall) { + List tablesToFetch = + allTblStr.subList(i, Math.min( + i + maxTablesPerCall, allTblStr.size())); + Callable tableTask = + new TableTask(db, tablesToFetch, update); + synchronized (results) { + results.add(threadPool.submit(tableTask)); + } + } + } + } + + private final ExecutorService threadPool; + private final IHMSHandler hmsHandler; + private final int maxPartitionsPerCall; + private final int maxTablesPerCall; + private final List> results = + new ArrayList>(); + private final AtomicInteger taskCounter = new AtomicInteger(0); + private final int maxRetries; + private final int waitDurationMillis; + private final boolean failOnRetry; + + MetastoreCacheInitializer(IHMSHandler hmsHandler, Configuration conf) { + this.hmsHandler = hmsHandler; + this.maxPartitionsPerCall = conf.getInt( + ServiceConstants.ServerConfig + .SENTRY_HDFS_SYNC_METASTORE_CACHE_MAX_PART_PER_RPC, + ServiceConstants.ServerConfig + .SENTRY_HDFS_SYNC_METASTORE_CACHE_MAX_PART_PER_RPC_DEFAULT); + this.maxTablesPerCall = conf.getInt( + ServiceConstants.ServerConfig + .SENTRY_HDFS_SYNC_METASTORE_CACHE_MAX_TABLES_PER_RPC, + ServiceConstants.ServerConfig + .SENTRY_HDFS_SYNC_METASTORE_CACHE_MAX_TABLES_PER_RPC_DEFAULT); + threadPool = Executors.newFixedThreadPool(conf.getInt( + ServiceConstants.ServerConfig + .SENTRY_HDFS_SYNC_METASTORE_CACHE_INIT_THREADS, + ServiceConstants.ServerConfig + .SENTRY_HDFS_SYNC_METASTORE_CACHE_INIT_THREADS_DEFAULT)); + maxRetries = conf.getInt( + ServiceConstants.ServerConfig + .SENTRY_HDFS_SYNC_METASTORE_CACHE_RETRY_MAX_NUM, + ServiceConstants.ServerConfig + .SENTRY_HDFS_SYNC_METASTORE_CACHE_RETRY_MAX_NUM_DEFAULT); + waitDurationMillis = conf.getInt( + ServiceConstants.ServerConfig + .SENTRY_HDFS_SYNC_METASTORE_CACHE_RETRY_WAIT_DURAION_IN_MILLIS, + ServiceConstants.ServerConfig + .SENTRY_HDFS_SYNC_METASTORE_CACHE_RETRY_WAIT_DURAION_IN_MILLIS_DEFAULT); + failOnRetry = conf.getBoolean( + ServiceConstants.ServerConfig + .SENTRY_HDFS_SYNC_METASTORE_CACHE_FAIL_ON_PARTIAL_UPDATE, + ServiceConstants.ServerConfig + .SENTRY_HDFS_SYNC_METASTORE_CACHE_FAIL_ON_PARTIAL_UPDATE_DEFAULT); + } + + UpdateableAuthzPaths createInitialUpdate() throws + Exception { + UpdateableAuthzPaths authzPaths = new UpdateableAuthzPaths(new + String[]{"/"}); + PathsUpdate tempUpdate = new PathsUpdate(-1, false); + List allDbStr = hmsHandler.get_all_databases(); + for (String dbName : allDbStr) { + Callable dbTask = new DbTask(tempUpdate, dbName); + results.add(threadPool.submit(dbTask)); + } + + while (taskCounter.get() > 0) { + Thread.sleep(1000); + // Wait until no more tasks remain + } + + for (Future result : results) { + CallResult callResult = result.get(); + + // Fail the HMS startup if tasks are not all successful and + // fail on partial updates flag is set in the config. + if (callResult.getSuccessStatus() == false && failOnRetry) { + throw new RuntimeException(callResult.getFailure()); + } + } + + authzPaths.updatePartial(Lists.newArrayList(tempUpdate), + new ReentrantReadWriteLock()); + return authzPaths; + } + + + @Override + public void close() throws IOException { + if (threadPool != null) { + threadPool.shutdownNow(); + } + } +} diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/MetastorePlugin.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/MetastorePlugin.java index 5277eef05..10ea37bd0 100644 --- a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/MetastorePlugin.java +++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/MetastorePlugin.java @@ -17,8 +17,11 @@ */ package org.apache.sentry.hdfs; -import java.io.IOException; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.util.LinkedList; import java.util.List; +import java.util.Queue; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -28,17 +31,13 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import com.codahale.metrics.Timer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler; -import org.apache.hadoop.hive.metastore.IHMSHandler; import org.apache.hadoop.hive.metastore.MetaStorePreEventListener; -import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.Table; import org.apache.sentry.hdfs.ServiceConstants.ServerConfig; -import org.apache.sentry.hdfs.service.thrift.TPathChanges; import org.apache.sentry.provider.db.SentryMetastoreListenerPlugin; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -61,6 +60,11 @@ public void run() { // No need to sync.. as metastore is in the process of pushing an update.. return; } + if (MetastorePlugin.this.authzPaths == null) { + LOGGER.info("#### Metastore Plugin cache has not finished" + + "initialization."); + return; + } try { long lastSeenBySentry = MetastorePlugin.this.getClient().getLastSeenHMSPathSeqNum(); @@ -85,16 +89,22 @@ public void run() { private final Configuration conf; private SentryHDFSServiceClient sentryClient; - private UpdateableAuthzPaths authzPaths; + private volatile UpdateableAuthzPaths authzPaths; private Lock notificiationLock; // Initialized to some value > 1. - private static final AtomicLong seqNum = new AtomicLong(5); + protected static final AtomicLong seqNum = new AtomicLong(5); // Has to match the value of seqNum - private static volatile long lastSentSeqNum = seqNum.get(); + protected static volatile long lastSentSeqNum = seqNum.get(); private volatile boolean syncSent = false; - private final ExecutorService threadPool; + private volatile boolean initComplete = false; + private volatile boolean queueFlushComplete = false; + private volatile Throwable initError = null; + private final Queue updateQueue = new LinkedList(); + + private final ExecutorService threadPool; //NOPMD + private final Configuration sentryConf; static class ProxyHMSHandler extends HMSHandler { public ProxyHMSHandler(String name, HiveConf conf) throws MetaException { @@ -102,70 +112,86 @@ public ProxyHMSHandler(String name, HiveConf conf) throws MetaException { } } - public MetastorePlugin(Configuration conf) { + public MetastorePlugin(Configuration conf, Configuration sentryConf) { this.notificiationLock = new ReentrantLock(); + + if (!(conf instanceof HiveConf)) { + String error = "Configuration is not an instanceof HiveConf"; + LOGGER.error(error); + throw new RuntimeException(error); + } this.conf = new HiveConf((HiveConf)conf); + + this.sentryConf = new Configuration(sentryConf); this.conf.unset(HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS.varname); this.conf.unset(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS.varname); this.conf.unset(HiveConf.ConfVars.METASTORE_END_FUNCTION_LISTENERS.varname); this.conf.unset(HiveConf.ConfVars.METASTOREURIS.varname); - try { - this.authzPaths = createInitialUpdate(new ProxyHMSHandler("sentry.hdfs", (HiveConf)this.conf)); - } catch (Exception e1) { - LOGGER.error("Could not create Initial AuthzPaths or HMSHandler !!", e1); - throw new RuntimeException(e1); + Thread initUpdater = new Thread() { + @Override + public void run() { + MetastoreCacheInitializer cacheInitializer = null; + try { + cacheInitializer = + new MetastoreCacheInitializer(new ProxyHMSHandler("sentry.hdfs", + (HiveConf) MetastorePlugin.this.conf), + MetastorePlugin.this.conf); + MetastorePlugin.this.authzPaths = + cacheInitializer.createInitialUpdate(); + LOGGER.info("#### Metastore Plugin initialization complete !!"); + synchronized (updateQueue) { + while (!updateQueue.isEmpty()) { + PathsUpdate update = updateQueue.poll(); + if (update != null) { + processUpdate(update); + } + } + queueFlushComplete = true; + } + LOGGER.info("#### Finished flushing queued updates to Sentry !!"); + } catch (Exception e) { + LOGGER.error("#### Could not create Initial AuthzPaths or HMSHandler !!", e); + initError = e; + } finally { + if (cacheInitializer != null) { + try { + cacheInitializer.close(); + } catch (Exception e) { + LOGGER.info("#### Exception while closing cacheInitializer !!", e); + } + } + initComplete = true; + } + } + }; + if (this.conf.getBoolean( + ServerConfig.SENTRY_HDFS_SYNC_METASTORE_CACHE_ASYNC_INIT_ENABLE, + ServerConfig + .SENTRY_HDFS_SYNC_METASTORE_CACHE_ASYNC_INIT_ENABLE_DEFAULT)) { + LOGGER.warn("#### Metastore Cache initialization is set to aync..." + + "HDFS ACL synchronization will not happen until metastore" + + "cache initialization is completed !!"); + initUpdater.start(); + } else { + initUpdater.run(); //NOPMD } try { - sentryClient = new SentryHDFSServiceClient(conf); + sentryClient = SentryHDFSServiceClientFactory.create(sentryConf); } catch (Exception e) { sentryClient = null; LOGGER.error("Could not connect to Sentry HDFS Service !!", e); } ScheduledExecutorService threadPool = Executors.newScheduledThreadPool(1); threadPool.scheduleWithFixedDelay(new SyncTask(), - this.conf.getLong(ServerConfig.SENTRY_HDFS_INIT_UPDATE_RETRY_DELAY_MS, - ServerConfig.SENTRY_HDFS_INIT_UPDATE_RETRY_DELAY_DEFAULT), - this.conf.getLong(ServerConfig.SENTRY_HDFS_SYNC_CHECKER_PERIOD_MS, - ServerConfig.SENTRY_HDFS_SYNC_CHECKER_PERIOD_DEFAULT), - TimeUnit.MILLISECONDS); + this.conf.getLong(ServerConfig + .SENTRY_HDFS_INIT_UPDATE_RETRY_DELAY_MS, + ServerConfig.SENTRY_HDFS_INIT_UPDATE_RETRY_DELAY_DEFAULT), + this.conf.getLong(ServerConfig.SENTRY_HDFS_SYNC_CHECKER_PERIOD_MS, + ServerConfig.SENTRY_HDFS_SYNC_CHECKER_PERIOD_DEFAULT), + TimeUnit.MILLISECONDS); this.threadPool = threadPool; } - private UpdateableAuthzPaths createInitialUpdate(IHMSHandler hmsHandler) throws Exception { - UpdateableAuthzPaths authzPaths = new UpdateableAuthzPaths(new String[] {"/"}); - PathsUpdate tempUpdate = new PathsUpdate(-1, false); - List allDbStr = hmsHandler.get_all_databases(); - for (String dbName : allDbStr) { - Database db = hmsHandler.get_database(dbName); - List dbPath = PathsUpdate.parsePath(db.getLocationUri()); - if(dbPath != null) { - tempUpdate.newPathChange(db.getName()).addToAddPaths(dbPath); - } - List allTblStr = hmsHandler.get_all_tables(db.getName()); - for (String tblName : allTblStr) { - Table tbl = hmsHandler.get_table(db.getName(), tblName); - TPathChanges tblPathChange = tempUpdate.newPathChange(tbl - .getDbName() + "." + tbl.getTableName()); - List tblParts = - hmsHandler.get_partitions(db.getName(), tbl.getTableName(), (short) -1); - List tb1Path = PathsUpdate.parsePath(tbl.getSd().getLocation() == null ? - db.getLocationUri() : tbl.getSd().getLocation()); - if(tb1Path != null) { - tblPathChange.addToAddPaths(tb1Path); - } - for (Partition part : tblParts) { - List partPath = PathsUpdate.parsePath(part.getSd().getLocation()); - if(partPath != null) { - tblPathChange.addToAddPaths(partPath); - } - } - } - } - authzPaths.updatePartial(Lists.newArrayList(tempUpdate), - new ReentrantReadWriteLock()); - return authzPaths; - } - @Override public void addPath(String authzObj, String path) { List pathTree = PathsUpdate.parsePath(path); @@ -174,10 +200,10 @@ public void addPath(String authzObj, String path) { } LOGGER.debug("#### HMS Path Update [" + "OP : addPath, " - + "authzObj : " + authzObj + ", " + + "authzObj : " + authzObj.toLowerCase() + ", " + "path : " + path + "]"); PathsUpdate update = createHMSUpdate(); - update.newPathChange(authzObj).addToAddPaths(pathTree); + update.newPathChange(authzObj.toLowerCase()).addToAddPaths(pathTree); notifySentryAndApplyLocal(update); } @@ -185,24 +211,24 @@ public void addPath(String authzObj, String path) { public void removeAllPaths(String authzObj, List childObjects) { LOGGER.debug("#### HMS Path Update [" + "OP : removeAllPaths, " - + "authzObj : " + authzObj + ", " + + "authzObj : " + authzObj.toLowerCase() + ", " + "childObjs : " + (childObjects == null ? "[]" : childObjects) + "]"); PathsUpdate update = createHMSUpdate(); if (childObjects != null) { for (String childObj : childObjects) { - update.newPathChange(authzObj + "." + childObj).addToDelPaths( + update.newPathChange(authzObj.toLowerCase() + "." + childObj).addToDelPaths( Lists.newArrayList(PathsUpdate.ALL_PATHS)); } } - update.newPathChange(authzObj).addToDelPaths( - Lists.newArrayList(PathsUpdate.ALL_PATHS)); + update.newPathChange(authzObj.toLowerCase()).addToDelPaths( + Lists.newArrayList(PathsUpdate.ALL_PATHS)); notifySentryAndApplyLocal(update); } @Override public void removePath(String authzObj, String path) { if ("*".equals(path)) { - removeAllPaths(authzObj, null); + removeAllPaths(authzObj.toLowerCase(), null); } else { List pathTree = PathsUpdate.parsePath(path); if(pathTree == null) { @@ -210,10 +236,10 @@ public void removePath(String authzObj, String path) { } LOGGER.debug("#### HMS Path Update [" + "OP : removePath, " - + "authzObj : " + authzObj + ", " + + "authzObj : " + authzObj.toLowerCase() + ", " + "path : " + path + "]"); PathsUpdate update = createHMSUpdate(); - update.newPathChange(authzObj).addToDelPaths(pathTree); + update.newPathChange(authzObj.toLowerCase()).addToDelPaths(pathTree); notifySentryAndApplyLocal(update); } } @@ -221,6 +247,12 @@ public void removePath(String authzObj, String path) { @Override public void renameAuthzObject(String oldName, String oldPath, String newName, String newPath) { + if (oldName != null) { + oldName = oldName.toLowerCase(); + } + if (newName != null) { + newName = newName.toLowerCase(); + } PathsUpdate update = createHMSUpdate(); LOGGER.debug("#### HMS Path Update [" + "OP : renameAuthzObject, " @@ -242,10 +274,10 @@ public void renameAuthzObject(String oldName, String oldPath, String newName, private SentryHDFSServiceClient getClient() { if (sentryClient == null) { try { - sentryClient = new SentryHDFSServiceClient(conf); - } catch (IOException e) { + sentryClient = SentryHDFSServiceClientFactory.create(sentryConf); + } catch (Exception e) { sentryClient = null; - LOGGER.error("Could not connect to Sentry HDFS Service !!", e); + LOGGER.error("#### Could not connect to Sentry HDFS Service !!", e); } } return sentryClient; @@ -258,20 +290,25 @@ private PathsUpdate createHMSUpdate() { } protected void notifySentryNoLock(PathsUpdate update) { + final Timer.Context timerContext = + SentryHdfsMetricsUtil.getNotifyHMSUpdateTimer.time(); try { getClient().notifyHMSUpdate(update); } catch (Exception e) { LOGGER.error("Could not send update to Sentry HDFS Service !!", e); + SentryHdfsMetricsUtil.getFailedNotifyHMSUpdateCounter.inc(); + } finally { + timerContext.stop(); } } - protected void notifySentryAndApplyLocal(PathsUpdate update) { + protected void notifySentry(PathsUpdate update) { notificiationLock.lock(); - if (!syncSent) { - new SyncTask().run(); - } try { - authzPaths.updatePartial(Lists.newArrayList(update), new ReentrantReadWriteLock()); + if (!syncSent) { + new SyncTask().run(); + } + notifySentryNoLock(update); } finally { lastSentSeqNum = update.getSeqNum(); @@ -279,4 +316,43 @@ protected void notifySentryAndApplyLocal(PathsUpdate update) { LOGGER.debug("#### HMS Path Last update sent : ["+ lastSentSeqNum + "]"); } } + + protected void applyLocal(PathsUpdate update) { + final Timer.Context timerContext = + SentryHdfsMetricsUtil.getApplyLocalUpdateTimer.time(); + authzPaths.updatePartial(Lists.newArrayList(update), new ReentrantReadWriteLock()); + timerContext.stop(); + SentryHdfsMetricsUtil.getApplyLocalUpdateHistogram.update( + update.getPathChanges().size()); + } + + private void notifySentryAndApplyLocal(PathsUpdate update) { + if (initComplete) { + processUpdate(update); + } else { + if (initError == null) { + synchronized (updateQueue) { + if (!queueFlushComplete) { + updateQueue.add(update); + } else { + processUpdate(update); + } + } + } else { + StringWriter sw = new StringWriter(); + initError.printStackTrace(new PrintWriter(sw)); + LOGGER.error("#### Error initializing Metastore Plugin" + + "[" + sw.toString() + "] !!"); + throw new RuntimeException(initError); + } + LOGGER.warn("#### Path update [" + update.getSeqNum() + "] not sent to Sentry.." + + "Metastore hasn't been initialized yet !!"); + } + } + + protected void processUpdate(PathsUpdate update) { + applyLocal(update); + notifySentry(update); + } + } diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/MetastorePluginWithHA.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/MetastorePluginWithHA.java index 271e12151..4f6d7ca06 100644 --- a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/MetastorePluginWithHA.java +++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/MetastorePluginWithHA.java @@ -18,14 +18,11 @@ package org.apache.sentry.hdfs; import org.apache.curator.framework.CuratorFramework; -import org.apache.curator.framework.recipes.cache.PathChildrenCache; import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent; import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener; -import org.apache.curator.utils.ZKPaths; import org.apache.hadoop.conf.Configuration; import org.apache.sentry.hdfs.ServiceConstants.ServerConfig; import org.apache.sentry.provider.db.SentryPolicyStorePlugin.SentryPluginException; -import org.apache.sentry.provider.db.service.persistent.HAContext; import org.apache.sentry.binding.metastore.MetastoreAuthzBinding; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -69,25 +66,34 @@ public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) private String zkPath; private PluginCacheSyncUtil pluginCacheSync; - public MetastorePluginWithHA(Configuration conf) throws Exception { - super(conf); - zkPath = conf.get(ServerConfig.SENTRY_METASTORE_HA_ZOOKEEPER_NAMESPACE, + public MetastorePluginWithHA(Configuration conf, Configuration sentryConfig) throws Exception { + super(conf, sentryConfig); + zkPath = sentryConfig.get(ServerConfig.SENTRY_METASTORE_HA_ZOOKEEPER_NAMESPACE, ServerConfig.SENTRY_METASTORE_HA_ZOOKEEPER_NAMESPACE_DEFAULT); - pluginCacheSync = new PluginCacheSyncUtil(zkPath, conf, + pluginCacheSync = new PluginCacheSyncUtil(zkPath, sentryConfig, new SentryMetastoreHACacheListener(this)); + // start seq# from the last global seq + seqNum.set(pluginCacheSync.getUpdateCounter()); + MetastorePlugin.lastSentSeqNum = seqNum.get(); } @Override - protected void notifySentryAndApplyLocal(PathsUpdate update) { + protected void processUpdate(PathsUpdate update) { try { + // push to ZK in order to keep the metastore local cache in sync pluginCacheSync.handleCacheUpdate(update); + + // notify Sentry. Note that Sentry service already has a cache + // sync mechanism to replicate this update to all other Sentry servers + notifySentry(update); } catch (SentryPluginException e) { LOGGER.error("Error pushing update to cache", e); } } + // apply the update to local cache private void processCacheNotification(PathsUpdate update) { - super.notifySentryAndApplyLocal(update); + super.applyLocal(update); } } diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/PluginCacheSyncUtil.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/PluginCacheSyncUtil.java index 94c989587..4ce16c703 100644 --- a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/PluginCacheSyncUtil.java +++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/PluginCacheSyncUtil.java @@ -22,6 +22,7 @@ import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import com.codahale.metrics.Timer; import org.apache.curator.framework.recipes.atomic.DistributedAtomicLong; import org.apache.curator.framework.recipes.cache.PathChildrenCache; import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent; @@ -33,6 +34,7 @@ import org.apache.sentry.hdfs.Updateable.Update; import org.apache.sentry.provider.db.SentryPolicyStorePlugin.SentryPluginException; import org.apache.sentry.provider.db.service.persistent.HAContext; +import org.apache.zookeeper.KeeperException.NoNodeException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -117,6 +119,7 @@ public void run() { } public void handleCacheUpdate(Update update) throws SentryPluginException { + final Timer.Context timerContext = SentryHdfsMetricsUtil.getCacheSyncToZKTimer.time(); // post message to ZK cache try { // Acquire ZK lock for update cache sync. This ensures that the counter @@ -126,15 +129,25 @@ public void handleCacheUpdate(Update update) throws SentryPluginException { "Failed to get ZK lock for update cache syncup"); } } catch (Exception e1) { + // Stop timer in advance + timerContext.stop(); + SentryHdfsMetricsUtil.getFailedCacheSyncToZK.inc(); throw new SentryPluginException( "Error getting ZK lock for update cache syncup" + e1, e1); } - + boolean failed = false; try { - // increment the global sequence counter try { - update.setSeqNum(updateCounter.increment().postValue()); + // increment the global sequence counter if this is not a full update + if (!update.hasFullImage()) { + update.setSeqNum(updateCounter.increment().postValue()); + } else { + if (updateCounter.get().preValue() < update.getSeqNum()) { + updateCounter.add(update.getSeqNum() - updateCounter.get().preValue()); + } + } } catch (Exception e1) { + failed = true; throw new SentryPluginException( "Error setting ZK counter for update cache syncup" + e1, e1); } @@ -147,6 +160,7 @@ public void handleCacheUpdate(Update update) throws SentryPluginException { haContext.getCuratorFramework().create().creatingParentsIfNeeded() .forPath(newPath, update.serialize()); } catch (Exception e) { + failed = true; throw new SentryPluginException("error posting update to ZK ", e); } } finally { @@ -154,9 +168,16 @@ public void handleCacheUpdate(Update update) throws SentryPluginException { try { updatorLock.release(); } catch (Exception e) { + // Stop timer in advance + timerContext.stop(); + SentryHdfsMetricsUtil.getFailedCacheSyncToZK.inc(); throw new SentryPluginException( "Error releasing ZK lock for update cache syncup" + e, e); } + timerContext.stop(); + if (failed) { + SentryHdfsMetricsUtil.getFailedCacheSyncToZK.inc(); + } } } @@ -204,6 +225,10 @@ public void gcPluginCache(Configuration conf) { haContext.getCuratorFramework().delete().forPath(pathToDelete); gcCounter.increment(); LOGGER.debug("Deleted znode " + pathToDelete); + } catch (NoNodeException eN) { + // We might have endup with holes in the node counter due to network/ZK errors + // Ignore the delete error if the node doesn't exist and move on + gcCounter.increment(); } catch (Exception e) { LOGGER.info("Error cleaning up node " + pathToDelete, e); break; diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessor.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessor.java index 80f364804..e4f3f580e 100644 --- a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessor.java +++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessor.java @@ -22,6 +22,7 @@ import java.util.List; import java.util.Map; +import com.codahale.metrics.Timer; import org.apache.sentry.hdfs.service.thrift.SentryHDFSService; import org.apache.sentry.hdfs.service.thrift.TAuthzUpdateResponse; import org.apache.sentry.hdfs.service.thrift.TPathsUpdate; @@ -45,9 +46,15 @@ public TAuthzUpdateResponse get_all_authz_updates_from(long permSeqNum, long pat throw new TException( "This Sentry server is not communicating with other nodes and out of sync "); } - List permUpdates = SentryPlugin.instance.getAllPermsUpdatesFrom(permSeqNum); - List pathUpdates = SentryPlugin.instance.getAllPathsUpdatesFrom(pathSeqNum); + final Timer.Context timerContext = + SentryHdfsMetricsUtil.getAllAuthzUpdatesTimer.time(); try { + List permUpdates = + SentryPlugin.instance.getAllPermsUpdatesFrom(permSeqNum); + SentryHdfsMetricsUtil.getPermUpdateHistogram.update(permUpdates.size()); + List pathUpdates = + SentryPlugin.instance.getAllPathsUpdatesFrom(pathSeqNum); + SentryHdfsMetricsUtil.getPathUpdateHistogram.update(pathUpdates.size()); for (PathsUpdate update : pathUpdates) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("### Sending PATH preUpdate seq [" + update.getSeqNum() + "] ###"); @@ -80,6 +87,8 @@ public TAuthzUpdateResponse get_all_authz_updates_from(long permSeqNum, long pat } catch (Exception e) { LOGGER.error("Error Sending updates to downstream Cache", e); throw new TException(e); + } finally { + timerContext.stop(); } } else { LOGGER.error("SentryPlugin not initialized yet !!"); @@ -90,6 +99,8 @@ public TAuthzUpdateResponse get_all_authz_updates_from(long permSeqNum, long pat @Override public void handle_hms_notification(TPathsUpdate update) throws TException { + final Timer.Context timerContext = + SentryHdfsMetricsUtil.getHandleHmsNotificationTimer.time(); try { PathsUpdate hmsUpdate = new PathsUpdate(update); if (SentryPlugin.instance != null) { @@ -100,7 +111,15 @@ public void handle_hms_notification(TPathsUpdate update) throws TException { } } catch (Exception e) { LOGGER.error("Error handling notification from HMS", e); + SentryHdfsMetricsUtil.getFailedHandleHmsNotificationCounter.inc(); throw new TException(e); + } finally { + timerContext.stop(); + SentryHdfsMetricsUtil.getHandleHmsPathChangeHistogram.update( + update.getPathChangesSize()); + if (update.isHasFullImage()) { + SentryHdfsMetricsUtil.getHandleHmsHasFullImageCounter.inc(); + } } } diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessorFactory.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessorFactory.java index d35de75b9..db55b5aa3 100644 --- a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessorFactory.java +++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessorFactory.java @@ -18,21 +18,15 @@ package org.apache.sentry.hdfs; -import java.net.Socket; - import org.apache.hadoop.conf.Configuration; import org.apache.sentry.hdfs.service.thrift.SentryHDFSService; import org.apache.sentry.hdfs.service.thrift.SentryHDFSService.Iface; -import org.apache.sentry.provider.db.log.util.CommandUtil; +import org.apache.sentry.provider.db.service.thrift.ThriftUtil; import org.apache.sentry.service.thrift.ProcessorFactory; import org.apache.thrift.TException; import org.apache.thrift.TMultiplexedProcessor; import org.apache.thrift.TProcessor; import org.apache.thrift.protocol.TProtocol; -import org.apache.thrift.transport.TSaslClientTransport; -import org.apache.thrift.transport.TSaslServerTransport; -import org.apache.thrift.transport.TSocket; -import org.apache.thrift.transport.TTransport; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,59 +41,21 @@ public ProcessorWrapper(Iface iface) { } @Override public boolean process(TProtocol in, TProtocol out) throws TException { - setIpAddress(in); - setImpersonator(in); + ThriftUtil.setIpAddress(in); + ThriftUtil.setImpersonator(in); return super.process(in, out); } - - private void setImpersonator(final TProtocol in) { - TTransport transport = in.getTransport(); - if (transport instanceof TSaslServerTransport) { - String impersonator = ((TSaslServerTransport) transport).getSaslServer().getAuthorizationID(); - CommandUtil.setImpersonator(impersonator); - } - } - - private void setIpAddress(final TProtocol in) { - TTransport transport = in.getTransport(); - TSocket tSocket = getUnderlyingSocketFromTransport(transport); - if (tSocket != null) { - setIpAddress(tSocket.getSocket()); - } else { - LOGGER.warn("Unknown Transport, cannot determine ipAddress"); - } - } - - private void setIpAddress(Socket socket) { - CommandUtil.setIpAddress(socket.getInetAddress().toString()); - } - - private TSocket getUnderlyingSocketFromTransport(TTransport transport) { - if (transport != null) { - if (transport instanceof TSaslServerTransport) { - transport = ((TSaslServerTransport) transport).getUnderlyingTransport(); - } else if (transport instanceof TSaslClientTransport) { - transport = ((TSaslClientTransport) transport).getUnderlyingTransport(); - } else { - if (!(transport instanceof TSocket)) { - LOGGER.warn("Transport class [" + transport.getClass().getName() + "] is not of type TSocket"); - return null; - } - } - return (TSocket) transport; - } - return null; - } } public SentryHDFSServiceProcessorFactory(Configuration conf) { super(conf); } - + @Override public boolean register(TMultiplexedProcessor multiplexedProcessor) throws Exception { SentryHDFSServiceProcessor sentryServiceHandler = new SentryHDFSServiceProcessor(); + LOGGER.info("Calling registerProcessor from SentryHDFSServiceProcessorFactory"); TProcessor processor = new ProcessorWrapper(sentryServiceHandler); multiplexedProcessor.registerProcessor( SentryHDFSServiceClient.SENTRY_HDFS_SERVICE_NAME, processor); diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHdfsMetricsUtil.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHdfsMetricsUtil.java new file mode 100644 index 000000000..b67c94a51 --- /dev/null +++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHdfsMetricsUtil.java @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.hdfs; + +import com.codahale.metrics.Counter; +import com.codahale.metrics.Histogram; +import com.codahale.metrics.MetricRegistry; +import com.codahale.metrics.Timer; +import org.apache.sentry.provider.db.service.thrift.SentryMetrics; + +/** + * Util class to support metrics. + */ +public class SentryHdfsMetricsUtil { + // SentryMetrics + private static final SentryMetrics sentryMetrics = SentryMetrics.getInstance(); + + // Metrics for get_all_authz_updates_from in SentryHDFSServiceProcessor + // The time used for each get_all_authz_updates_from + public static final Timer getAllAuthzUpdatesTimer = sentryMetrics.getTimer( + MetricRegistry.name(SentryHDFSServiceProcessor.class, + "get-all-authz-updates-from")); + // The size of perm updates for each get_all_authz_updates_from + public static final Histogram getPermUpdateHistogram = sentryMetrics.getHistogram( + MetricRegistry.name(SentryHDFSServiceProcessor.class, "perm-updates-size")); + // The size of path updates for each get_all_authz_updates_from + public static final Histogram getPathUpdateHistogram = sentryMetrics.getHistogram( + MetricRegistry.name(SentryHDFSServiceProcessor.class, "paths-updates-size")); + + // Metrics for handle_hms_notification in SentryHDFSServiceProcessor + // The time used for each handle_hms_notification + public static final Timer getHandleHmsNotificationTimer = sentryMetrics.getTimer( + MetricRegistry.name(SentryHDFSServiceProcessor.class, "handle-hms-notification")); + // The number of failed handle_hms_notification + public static final Counter getFailedHandleHmsNotificationCounter = + sentryMetrics.getCounter(MetricRegistry.name(SentryHDFSServiceProcessor.class, + "handle-hms-notification", "failed-num")); + // The number of handle_hms_notification with full image update + public static final Counter getHandleHmsHasFullImageCounter = sentryMetrics.getCounter( + MetricRegistry.name(SentryHDFSServiceProcessor.class, "handle-hms-notification", + "has-full-image-num")); + // The size of path changes for each handle_hms_notification + public static final Histogram getHandleHmsPathChangeHistogram = sentryMetrics.getHistogram( + MetricRegistry.name(SentryHDFSServiceProcessor.class, "handle-hms-notification", + "path-changes-size")); + + // Metrics for retrieveFullImage in SentryPlugin.PermImageRetriever + // The time used for each retrieveFullImage + public static final Timer getRetrieveFullImageTimer = sentryMetrics.getTimer( + MetricRegistry.name(SentryPlugin.PermImageRetriever.class, "retrieve-full-image")); + // The size of privilege changes for each retrieveFullImage + public static final Histogram getPrivilegeChangesHistogram = sentryMetrics.getHistogram( + MetricRegistry.name(SentryPlugin.PermImageRetriever.class, "retrieve-full-image", + "privilege-changes-size")); + // The size of role changes for each retrieveFullImage call + public static final Histogram getRoleChangesHistogram = sentryMetrics.getHistogram( + MetricRegistry.name(SentryPlugin.PermImageRetriever.class, "retrieve-full-image", + "role-changes-size")); + + // Metrics for notifySentry HMS update in MetaStorePlugin + // The timer used for each notifySentry + public static final Timer getNotifyHMSUpdateTimer = sentryMetrics.getTimer( + MetricRegistry.name(MetastorePlugin.class, "notify-sentry-HMS-update")); + // The number of failed notifySentry + public static final Counter getFailedNotifyHMSUpdateCounter = sentryMetrics.getCounter( + MetricRegistry.name(MetastorePlugin.class, "notify-sentry-HMS-update", + "failed-num")); + + // Metrics for applyLocal update in MetastorePlugin + // The time used for each applyLocal + public static final Timer getApplyLocalUpdateTimer = sentryMetrics.getTimer( + MetricRegistry.name(MetastorePlugin.class, "apply-local-update")); + // The size of path changes for each applyLocal + public static final Histogram getApplyLocalUpdateHistogram = sentryMetrics.getHistogram( + MetricRegistry.name(MetastorePlugin.class, "apply-local-update", + "path-change-size")); + + // Metrics for handleCacheUpdate to ZK in PluginCacheSyncUtil + // The time used for each handleCacheUpdate + public static final Timer getCacheSyncToZKTimer = sentryMetrics.getTimer( + MetricRegistry.name(PluginCacheSyncUtil.class, "cache-sync-to-zk")); + // The number of failed handleCacheUpdate + public static final Counter getFailedCacheSyncToZK = sentryMetrics.getCounter( + MetricRegistry.name(PluginCacheSyncUtil.class, "cache-sync-to-zk", "failed-num")); +} diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryPlugin.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryPlugin.java index 221c39740..f3926a259 100644 --- a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryPlugin.java +++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryPlugin.java @@ -24,15 +24,14 @@ import java.util.Map; import java.util.concurrent.atomic.AtomicLong; +import com.codahale.metrics.Timer; import org.apache.hadoop.conf.Configuration; import org.apache.sentry.hdfs.ServiceConstants.ServerConfig; import org.apache.sentry.hdfs.UpdateForwarder.ExternalImageRetriever; -import org.apache.sentry.hdfs.service.thrift.TPathChanges; import org.apache.sentry.hdfs.service.thrift.TPermissionsUpdate; import org.apache.sentry.hdfs.service.thrift.TPrivilegeChanges; import org.apache.sentry.hdfs.service.thrift.TRoleChanges; import org.apache.sentry.provider.db.SentryPolicyStorePlugin; -import org.apache.sentry.provider.db.SentryPolicyStorePlugin.SentryPluginException; import org.apache.sentry.provider.db.service.persistent.SentryStore; import org.apache.sentry.provider.db.service.thrift.TAlterSentryRoleAddGroupsRequest; import org.apache.sentry.provider.db.service.thrift.TAlterSentryRoleDeleteGroupsRequest; @@ -47,9 +46,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Strings; -import com.google.common.collect.Lists; - public class SentryPlugin implements SentryPolicyStorePlugin { private static final Logger LOGGER = LoggerFactory.getLogger(SentryPlugin.class); @@ -66,6 +62,8 @@ public PermImageRetriever(SentryStore sentryStore) { @Override public PermissionsUpdate retrieveFullImage(long currSeqNum) { + final Timer.Context timerContext = + SentryHdfsMetricsUtil.getRetrieveFullImageTimer.time(); Map> privilegeImage = sentryStore.retrieveFullPrivilegeImage(); Map> roleImage = sentryStore.retrieveFullRoleImage(); @@ -85,6 +83,11 @@ public PermissionsUpdate retrieveFullImage(long currSeqNum) { } PermissionsUpdate permissionsUpdate = new PermissionsUpdate(tPermUpdate); permissionsUpdate.setSeqNum(currSeqNum); + timerContext.stop(); + SentryHdfsMetricsUtil.getPrivilegeChangesHistogram.update( + tPermUpdate.getPrivilegeChangesSize()); + SentryHdfsMetricsUtil.getRoleChangesHistogram.update( + tPermUpdate.getRoleChangesSize()); return permissionsUpdate; } @@ -165,7 +168,9 @@ public void onAlterSentryRoleGrantPrivilege( if (request.isSetPrivileges()) { String roleName = request.getRoleName(); for (TSentryPrivilege privilege : request.getPrivileges()) { - onAlterSentryRoleGrantPrivilegeCore(roleName, privilege); + if(!("COLUMN".equalsIgnoreCase(privilege.getPrivilegeScope()))) { + onAlterSentryRoleGrantPrivilegeCore(roleName, privilege); + } } } } @@ -202,7 +207,9 @@ public void onAlterSentryRoleRevokePrivilege( if (request.isSetPrivileges()) { String roleName = request.getRoleName(); for (TSentryPrivilege privilege : request.getPrivileges()) { - onAlterSentryRoleRevokePrivilegeCore(roleName, privilege); + if(!("COLUMN".equalsIgnoreCase(privilege.getPrivilegeScope()))) { + onAlterSentryRoleRevokePrivilegeCore(roleName, privilege); + } } } } @@ -260,7 +267,7 @@ private String getAuthzObj(TSentryPrivilege privilege) { authzObj = dbName + "." + tblName; } } - return authzObj; + return authzObj == null ? null : authzObj.toLowerCase(); } private String getAuthzObj(TSentryAuthorizable authzble) { @@ -274,6 +281,6 @@ private String getAuthzObj(TSentryAuthorizable authzble) { authzObj = dbName + "." + tblName; } } - return authzObj; + return authzObj == null ? null : authzObj.toLowerCase(); } } diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/UpdateForwarder.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/UpdateForwarder.java index 22a436a67..73872813f 100644 --- a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/UpdateForwarder.java +++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/UpdateForwarder.java @@ -19,6 +19,7 @@ import java.io.Closeable; import java.io.IOException; +import java.util.Collections; import java.util.Iterator; import java.util.LinkedList; import java.util.List; @@ -34,14 +35,12 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.collect.Lists; - public class UpdateForwarder implements Updateable, Closeable { - public static interface ExternalImageRetriever { + interface ExternalImageRetriever { - public K retrieveFullImage(long currSeqNum); + K retrieveFullImage(long currSeqNum); } @@ -77,9 +76,9 @@ public UpdateForwarder(Configuration conf, Updateable updateable, ExternalImageRetriever imageRetreiver, int maxUpdateLogSize) { this(conf, updateable, imageRetreiver, maxUpdateLogSize, INIT_UPDATE_RETRY_DELAY); } - public UpdateForwarder(Configuration conf, Updateable updateable, + public UpdateForwarder(Configuration conf, Updateable updateable, //NOPMD ExternalImageRetriever imageRetreiver, int maxUpdateLogSize, - int initUpdateRetryDelay) { + int initUpdateRetryDelay) { this.maxUpdateLogSize = maxUpdateLogSize; this.imageRetreiver = imageRetreiver; if (imageRetreiver != null) { @@ -177,7 +176,7 @@ public void run() { } else { if (editNotMissed) { // apply partial preUpdate - updateable.updatePartial(Lists.newArrayList(update), lock); + updateable.updatePartial(Collections.singletonList(update), lock); } else { // Retrieve full update from External Source and if (imageRetreiver != null) { @@ -197,7 +196,7 @@ protected void appendToUpdateLog(K update) { synchronized (getUpdateLog()) { boolean logCompacted = false; if (getMaxUpdateLogSize() > 0) { - if (update.hasFullImage() || (getUpdateLog().size() == getMaxUpdateLogSize())) { + if (update.hasFullImage() || getUpdateLog().size() == getMaxUpdateLogSize()) { // Essentially a log compaction getUpdateLog().clear(); getUpdateLog().add(update.hasFullImage() ? update @@ -227,7 +226,7 @@ public List getAllUpdatesFrom(long seqNum) { List retVal = new LinkedList(); synchronized (getUpdateLog()) { long currSeqNum = lastCommittedSeqNum.get(); - if (LOGGER.isDebugEnabled() && (updateable != null)) { + if (LOGGER.isDebugEnabled() && updateable != null) { LOGGER.debug("#### GetAllUpdatesFrom [" + "type=" + updateable.getClass() + ", " + "reqSeqNum=" + seqNum + ", " diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/UpdateForwarderWithHA.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/UpdateForwarderWithHA.java index 9a4e7bbe9..574627cf9 100644 --- a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/UpdateForwarderWithHA.java +++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/UpdateForwarderWithHA.java @@ -19,26 +19,16 @@ import java.io.IOException; import java.util.LinkedList; -import java.util.concurrent.TimeUnit; import org.apache.curator.framework.CuratorFramework; -import org.apache.curator.framework.recipes.atomic.DistributedAtomicLong; -import org.apache.curator.framework.recipes.cache.PathChildrenCache; import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent; import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener; -import org.apache.curator.framework.recipes.locks.InterProcessSemaphoreMutex; -import org.apache.curator.utils.ZKPaths; import org.apache.hadoop.conf.Configuration; -import org.apache.sentry.SentryUserException; import org.apache.sentry.hdfs.ServiceConstants.ServerConfig; -import org.apache.sentry.hdfs.UpdateForwarder; import org.apache.sentry.provider.db.SentryPolicyStorePlugin.SentryPluginException; -import org.apache.sentry.provider.db.service.persistent.HAContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.cache.Cache; - public class UpdateForwarderWithHA extends UpdateForwarder implements Updateable { private static final Logger LOGGER = LoggerFactory.getLogger(UpdateForwarderWithHA.class); diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/UpdateablePermissions.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/UpdateablePermissions.java index 2fe81fde7..3d756c914 100644 --- a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/UpdateablePermissions.java +++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/UpdateablePermissions.java @@ -20,8 +20,6 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReadWriteLock; -import org.apache.sentry.hdfs.PermissionsUpdate; -import org.apache.sentry.hdfs.Updateable; import org.apache.sentry.hdfs.UpdateForwarder.ExternalImageRetriever; public class UpdateablePermissions implements Updateable{ diff --git a/sentry-hdfs/sentry-hdfs-service/src/test/java/org/apache/sentry/hdfs/TestHAUpdateForwarder.java b/sentry-hdfs/sentry-hdfs-service/src/test/java/org/apache/sentry/hdfs/TestHAUpdateForwarder.java index 40af05a2d..5246e0546 100644 --- a/sentry-hdfs/sentry-hdfs-service/src/test/java/org/apache/sentry/hdfs/TestHAUpdateForwarder.java +++ b/sentry-hdfs/sentry-hdfs-service/src/test/java/org/apache/sentry/hdfs/TestHAUpdateForwarder.java @@ -19,7 +19,6 @@ import static org.junit.Assert.assertEquals; -import java.io.IOException; import java.util.List; import org.apache.curator.test.TestingServer; @@ -28,7 +27,6 @@ import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; import org.junit.After; import org.junit.Before; -import org.junit.BeforeClass; import org.junit.Test; import com.google.common.collect.Lists; diff --git a/sentry-hdfs/sentry-hdfs-service/src/test/java/org/apache/sentry/hdfs/TestMetastoreCacheInitializer.java b/sentry-hdfs/sentry-hdfs-service/src/test/java/org/apache/sentry/hdfs/TestMetastoreCacheInitializer.java new file mode 100644 index 000000000..9e6072df2 --- /dev/null +++ b/sentry-hdfs/sentry-hdfs-service/src/test/java/org/apache/sentry/hdfs/TestMetastoreCacheInitializer.java @@ -0,0 +1,179 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.hdfs; + +import com.google.common.collect.Lists; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.IHMSHandler; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.junit.Assert; +import org.junit.Test; +import org.mockito.Mockito; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; + +public class TestMetastoreCacheInitializer { + + @Test + public void testInitializer() throws Exception { + + Database db1 = Mockito.mock(Database.class); + Mockito.when(db1.getName()).thenReturn("db1"); + Mockito.when(db1.getLocationUri()).thenReturn("hdfs:///db1"); + Database db2 = Mockito.mock(Database.class); + Mockito.when(db2.getName()).thenReturn("db2"); + Mockito.when(db2.getLocationUri()).thenReturn("hdfs:///db2"); + Database db3 = Mockito.mock(Database.class); + Mockito.when(db3.getName()).thenReturn("db3"); + Mockito.when(db3.getLocationUri()).thenReturn("hdfs:///db3"); + + Table tab21 = Mockito.mock(Table.class); + Mockito.when(tab21.getDbName()).thenReturn("db2"); + Mockito.when(tab21.getTableName()).thenReturn("tab21"); + StorageDescriptor sd21 = Mockito.mock(StorageDescriptor.class); + Mockito.when(sd21.getLocation()).thenReturn("hdfs:///db2/tab21"); + Mockito.when(tab21.getSd()).thenReturn(sd21); + + Table tab31 = Mockito.mock(Table.class); + Mockito.when(tab31.getDbName()).thenReturn("db3"); + Mockito.when(tab31.getTableName()).thenReturn("tab31"); + StorageDescriptor sd31 = Mockito.mock(StorageDescriptor.class); + Mockito.when(sd31.getLocation()).thenReturn("hdfs:///db3/tab31"); + Mockito.when(tab31.getSd()).thenReturn(sd31); + + Partition part311 = Mockito.mock(Partition.class); + StorageDescriptor sd311 = Mockito.mock(StorageDescriptor.class); + Mockito.when(sd311.getLocation()).thenReturn("hdfs:///db3/tab31/part311"); + Mockito.when(part311.getSd()).thenReturn(sd311); + + Partition part312 = Mockito.mock(Partition.class); + StorageDescriptor sd312 = Mockito.mock(StorageDescriptor.class); + Mockito.when(sd312.getLocation()).thenReturn("hdfs:///db3/tab31/part312"); + Mockito.when(part312.getSd()).thenReturn(sd312); + + IHMSHandler hmsHandler = Mockito.mock(IHMSHandler.class); + Mockito.when(hmsHandler.get_all_databases()).thenReturn(Lists + .newArrayList("db1", "db2", "db3")); + Mockito.when(hmsHandler.get_database("db1")).thenReturn(db1); + Mockito.when(hmsHandler.get_all_tables("db1")).thenReturn(new + ArrayList()); + + Mockito.when(hmsHandler.get_database("db2")).thenReturn(db2); + Mockito.when(hmsHandler.get_all_tables("db2")).thenReturn(Lists + .newArrayList("tab21")); + Mockito.when(hmsHandler.get_table_objects_by_name("db2", + Lists.newArrayList("tab21"))) + .thenReturn(Lists.newArrayList(tab21)); + Mockito.when(hmsHandler.get_partition_names("db2", "tab21", (short) -1)) + .thenReturn(new ArrayList()); + + Mockito.when(hmsHandler.get_database("db3")).thenReturn(db3); + Mockito.when(hmsHandler.get_all_tables("db3")).thenReturn(Lists + .newArrayList("tab31")); + Mockito.when(hmsHandler.get_table_objects_by_name("db3", + Lists.newArrayList("tab31"))) + .thenReturn(Lists.newArrayList(tab31)); + Mockito.when(hmsHandler.get_partition_names("db3", "tab31", (short) -1)) + .thenReturn(Lists.newArrayList("part311", "part312")); + + Mockito.when(hmsHandler.get_partitions_by_names("db3", "tab31", + Lists.newArrayList("part311"))) + .thenReturn(Lists.newArrayList(part311)); + Mockito.when(hmsHandler.get_partitions_by_names("db3", "tab31", + Lists.newArrayList("part312"))) + .thenReturn(Lists.newArrayList(part312)); + + Configuration conf = new Configuration(); + conf.setInt(ServiceConstants.ServerConfig + .SENTRY_HDFS_SYNC_METASTORE_CACHE_MAX_PART_PER_RPC, 1); + conf.setInt(ServiceConstants.ServerConfig + .SENTRY_HDFS_SYNC_METASTORE_CACHE_MAX_TABLES_PER_RPC, 1); + conf.setInt(ServiceConstants.ServerConfig + .SENTRY_HDFS_SYNC_METASTORE_CACHE_INIT_THREADS, 1); + + MetastoreCacheInitializer cacheInitializer = new + MetastoreCacheInitializer(hmsHandler, conf); + UpdateableAuthzPaths update = cacheInitializer.createInitialUpdate(); + + Assert.assertEquals(new HashSet(Arrays.asList("db1")), update.findAuthzObjectExactMatches(new + String[]{"db1"})); + Assert.assertEquals(new HashSet(Arrays.asList("db2")), update.findAuthzObjectExactMatches(new + String[]{"db2"})); + Assert.assertEquals(new HashSet(Arrays.asList("db2.tab21")), update.findAuthzObjectExactMatches(new + String[]{"db2", "tab21"})); + Assert.assertEquals(new HashSet(Arrays.asList("db3")), update.findAuthzObjectExactMatches(new + String[]{"db3"})); + Assert.assertEquals(new HashSet(Arrays.asList("db3.tab31")), update.findAuthzObjectExactMatches(new + String[]{"db3", "tab31"})); + Assert.assertEquals(new HashSet(Arrays.asList("db3.tab31")), update.findAuthzObjectExactMatches(new + String[]{"db3", "tab31", "part311"})); + Assert.assertEquals(new HashSet(Arrays.asList("db3.tab31")), update.findAuthzObjectExactMatches(new + String[]{"db3", "tab31", "part312"})); + cacheInitializer.close(); + + } + + // Make sure exceptions in initializer parallel tasks are propagated well + @Test + public void testExceptionInTask() throws Exception { + //Set up mocks: db1.tb1, with tb1 returning a wrong dbname (db2) + Database db1 = Mockito.mock(Database.class); + Mockito.when(db1.getName()).thenReturn("db1"); + Mockito.when(db1.getLocationUri()).thenReturn("hdfs:///db1"); + + Table tab1 = Mockito.mock(Table.class); + //Return a wrong db name, so that this triggers an exception + Mockito.when(tab1.getDbName()).thenReturn("db2"); + Mockito.when(tab1.getTableName()).thenReturn("tab1"); + + IHMSHandler hmsHandler = Mockito.mock(IHMSHandler.class); + Mockito.when(hmsHandler.get_all_databases()).thenReturn(Lists + .newArrayList("db1")); + Mockito.when(hmsHandler.get_database("db1")).thenReturn(db1); + Mockito.when(hmsHandler.get_table_objects_by_name("db1", + Lists.newArrayList("tab1"))) + .thenReturn(Lists.newArrayList(tab1)); + Mockito.when(hmsHandler.get_all_tables("db1")).thenReturn(Lists + .newArrayList("tab1")); + + Configuration conf = new Configuration(); + conf.setInt(ServiceConstants.ServerConfig + .SENTRY_HDFS_SYNC_METASTORE_CACHE_MAX_PART_PER_RPC, 1); + conf.setInt(ServiceConstants.ServerConfig + .SENTRY_HDFS_SYNC_METASTORE_CACHE_MAX_TABLES_PER_RPC, 1); + conf.setInt(ServiceConstants.ServerConfig + .SENTRY_HDFS_SYNC_METASTORE_CACHE_INIT_THREADS, 1); + conf.setInt(ServiceConstants.ServerConfig + .SENTRY_HDFS_SYNC_METASTORE_CACHE_RETRY_MAX_NUM, 2); + + try { + MetastoreCacheInitializer cacheInitializer = new + MetastoreCacheInitializer(hmsHandler, conf); + cacheInitializer.createInitialUpdate(); + Assert.fail("Expected cacheInitializer to fail"); + } catch (Exception e) { + Assert.assertTrue(e instanceof RuntimeException); + } + + } +} diff --git a/sentry-hdfs/sentry-hdfs-service/src/test/java/org/apache/sentry/hdfs/TestUpdateForwarder.java b/sentry-hdfs/sentry-hdfs-service/src/test/java/org/apache/sentry/hdfs/TestUpdateForwarder.java index ee9a7a388..315d4b3a8 100644 --- a/sentry-hdfs/sentry-hdfs-service/src/test/java/org/apache/sentry/hdfs/TestUpdateForwarder.java +++ b/sentry-hdfs/sentry-hdfs-service/src/test/java/org/apache/sentry/hdfs/TestUpdateForwarder.java @@ -23,11 +23,9 @@ import java.util.List; import java.util.concurrent.locks.ReadWriteLock; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.hadoop.conf.Configuration; -import org.apache.sentry.hdfs.UpdateForwarder; -import org.apache.sentry.hdfs.Updateable; import org.apache.sentry.hdfs.UpdateForwarder.ExternalImageRetriever; import org.apache.sentry.hdfs.Updateable.Update; import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; @@ -262,6 +260,14 @@ public void testGetUpdates() throws Exception { @Test public void testGetUpdatesAfterExternalEntityReset() throws Exception { + /* + * Disabled for Sentry HA. Since the sequence numbers are trakced in ZK, the + * lower sequence updates are ignored which causes this test to fail in HA + * mode + */ + Assume.assumeTrue(!testConf.getBoolean(ServerConfig.SENTRY_HA_ENABLED, + false)); + DummyImageRetreiver imageRetreiver = new DummyImageRetreiver(); imageRetreiver.setState("a,b,c"); updateForwarder = UpdateForwarder.create( diff --git a/sentry-policy/pom.xml b/sentry-policy/pom.xml index 4fb4f3cc6..45dc675a0 100644 --- a/sentry-policy/pom.xml +++ b/sentry-policy/pom.xml @@ -22,7 +22,7 @@ limitations under the License. org.apache.sentry sentry - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-policy @@ -34,6 +34,8 @@ limitations under the License. sentry-policy-db sentry-policy-indexer sentry-policy-search + sentry-policy-sqoop + sentry-policy-kafka diff --git a/sentry-policy/sentry-policy-common/pom.xml b/sentry-policy/sentry-policy-common/pom.xml index 179cf2946..fbec06f07 100644 --- a/sentry-policy/sentry-policy-common/pom.xml +++ b/sentry-policy/sentry-policy-common/pom.xml @@ -21,7 +21,7 @@ limitations under the License. org.apache.sentry sentry-policy - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-policy-common @@ -40,6 +40,11 @@ limitations under the License. com.google.guava guava + + junit + junit + test + diff --git a/sentry-provider/sentry-provider-file/src/main/java/org/apache/sentry/provider/file/KeyValue.java b/sentry-policy/sentry-policy-common/src/main/java/org/apache/sentry/policy/common/KeyValue.java similarity index 75% rename from sentry-provider/sentry-provider-file/src/main/java/org/apache/sentry/provider/file/KeyValue.java rename to sentry-policy/sentry-policy-common/src/main/java/org/apache/sentry/policy/common/KeyValue.java index 8015561e1..77e5fdfb7 100644 --- a/sentry-provider/sentry-provider-file/src/main/java/org/apache/sentry/provider/file/KeyValue.java +++ b/sentry-policy/sentry-policy-common/src/main/java/org/apache/sentry/policy/common/KeyValue.java @@ -16,9 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.sentry.provider.file; -import static org.apache.sentry.provider.file.PolicyFileConstants.KV_JOINER; -import static org.apache.sentry.provider.file.PolicyFileConstants.KV_SPLITTER; +package org.apache.sentry.policy.common; import java.util.List; @@ -29,34 +27,38 @@ public class KeyValue { private final String value; public KeyValue(String keyValue) { - List kvList = Lists.newArrayList(KV_SPLITTER.trimResults().limit(2).split(keyValue)); - if(kvList.size() != 2) { + List kvList = Lists.newArrayList(PolicyConstants.KV_SPLITTER.trimResults().limit(2).split(keyValue)); + if (kvList.size() != 2) { throw new IllegalArgumentException("Invalid key value: " + keyValue + " " + kvList); } key = kvList.get(0); value = kvList.get(1); - if(key.isEmpty()) { + if (key.isEmpty()) { throw new IllegalArgumentException("Key cannot be empty"); - } else if(value.isEmpty()) { + } else if (value.isEmpty()) { throw new IllegalArgumentException("Value cannot be empty"); } } + public KeyValue(String key, String value) { super(); this.key = key; this.value = value; } + public String getKey() { return key; } + public String getValue() { return value; } @Override public String toString() { - return KV_JOINER.join(key, value); + return PolicyConstants.KV_JOINER.join(key, value); } + @Override public int hashCode() { final int prime = 31; @@ -65,25 +67,33 @@ public int hashCode() { result = prime * result + ((value == null) ? 0 : value.hashCode()); return result; } + @Override public boolean equals(Object obj) { - if (this == obj) + if (this == obj) { return true; - if (obj == null) + } + if (obj == null) { return false; - if (getClass() != obj.getClass()) + } + if (getClass() != obj.getClass()) { return false; + } KeyValue other = (KeyValue) obj; if (key == null) { - if (other.key != null) + if (other.key != null) { return false; - } else if (!key.equalsIgnoreCase(other.key)) + } + } else if (!key.equalsIgnoreCase(other.key)) { return false; + } if (value == null) { - if (other.value != null) + if (other.value != null) { return false; - } else if (!value.equalsIgnoreCase(other.value)) + } + } else if (!value.equalsIgnoreCase(other.value)) { return false; + } return true; } } diff --git a/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/ProviderConstants.java b/sentry-policy/sentry-policy-common/src/main/java/org/apache/sentry/policy/common/PolicyConstants.java similarity index 95% rename from sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/ProviderConstants.java rename to sentry-policy/sentry-policy-common/src/main/java/org/apache/sentry/policy/common/PolicyConstants.java index c6f7e2cbc..0bad8c172 100644 --- a/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/ProviderConstants.java +++ b/sentry-policy/sentry-policy-common/src/main/java/org/apache/sentry/policy/common/PolicyConstants.java @@ -14,12 +14,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.sentry.provider.common; +package org.apache.sentry.policy.common; import com.google.common.base.Joiner; import com.google.common.base.Splitter; -public class ProviderConstants { +public class PolicyConstants { public static final String ROLE_SEPARATOR = ","; public static final String AUTHORIZABLE_SEPARATOR = "->"; diff --git a/sentry-policy/sentry-policy-common/src/main/java/org/apache/sentry/policy/common/PolicyEngine.java b/sentry-policy/sentry-policy-common/src/main/java/org/apache/sentry/policy/common/PolicyEngine.java index 38a5b6544..bbb009cdb 100644 --- a/sentry-policy/sentry-policy-common/src/main/java/org/apache/sentry/policy/common/PolicyEngine.java +++ b/sentry-policy/sentry-policy-common/src/main/java/org/apache/sentry/policy/common/PolicyEngine.java @@ -17,7 +17,6 @@ package org.apache.sentry.policy.common; -import java.util.List; import java.util.Set; import javax.annotation.concurrent.ThreadSafe; @@ -39,7 +38,7 @@ public interface PolicyEngine { * This is typically a factory that returns a privilege used to evaluate wildcards. * @return the privilege factory */ - public PrivilegeFactory getPrivilegeFactory(); + PrivilegeFactory getPrivilegeFactory(); /** * Get privileges associated with a group. Returns Strings which can be resolved @@ -50,7 +49,7 @@ public interface PolicyEngine { * @param active role-set * @return non-null immutable set of privileges */ - public ImmutableSet getAllPrivileges(Set groups, ActiveRoleSet roleSet) + ImmutableSet getAllPrivileges(Set groups, ActiveRoleSet roleSet) throws SentryConfigurationException; /** @@ -63,10 +62,10 @@ public ImmutableSet getAllPrivileges(Set groups, ActiveRoleSet r * @param authorizable Hierarchy (Can be null) * @return non-null immutable set of privileges */ - public ImmutableSet getPrivileges(Set groups, ActiveRoleSet roleSet, Authorizable... authorizableHierarchy) + ImmutableSet getPrivileges(Set groups, ActiveRoleSet roleSet, Authorizable... authorizableHierarchy) throws SentryConfigurationException; - public void close(); + void close(); - public void validatePolicy(boolean strictValidation) throws SentryConfigurationException; + void validatePolicy(boolean strictValidation) throws SentryConfigurationException; } diff --git a/sentry-policy/sentry-policy-common/src/main/java/org/apache/sentry/policy/common/Privilege.java b/sentry-policy/sentry-policy-common/src/main/java/org/apache/sentry/policy/common/Privilege.java index c7e17342c..27d5afae0 100644 --- a/sentry-policy/sentry-policy-common/src/main/java/org/apache/sentry/policy/common/Privilege.java +++ b/sentry-policy/sentry-policy-common/src/main/java/org/apache/sentry/policy/common/Privilege.java @@ -17,5 +17,5 @@ package org.apache.sentry.policy.common; public interface Privilege { - public boolean implies(Privilege p); + boolean implies(Privilege p); } diff --git a/sentry-policy/sentry-policy-common/src/main/java/org/apache/sentry/policy/common/PrivilegeValidator.java b/sentry-policy/sentry-policy-common/src/main/java/org/apache/sentry/policy/common/PrivilegeValidator.java index 5548f0403..36abdd4a4 100644 --- a/sentry-policy/sentry-policy-common/src/main/java/org/apache/sentry/policy/common/PrivilegeValidator.java +++ b/sentry-policy/sentry-policy-common/src/main/java/org/apache/sentry/policy/common/PrivilegeValidator.java @@ -20,5 +20,5 @@ public interface PrivilegeValidator { - public void validate(PrivilegeValidatorContext context) throws ConfigurationException; + void validate(PrivilegeValidatorContext context) throws ConfigurationException; } diff --git a/sentry-provider/sentry-provider-file/src/test/java/org/apache/sentry/provider/file/TestKeyValue.java b/sentry-policy/sentry-policy-common/src/test/java/org/apache/sentry/policy/common/TestKeyValue.java similarity index 58% rename from sentry-provider/sentry-provider-file/src/test/java/org/apache/sentry/provider/file/TestKeyValue.java rename to sentry-policy/sentry-policy-common/src/test/java/org/apache/sentry/policy/common/TestKeyValue.java index 4353a03d5..0ab656961 100644 --- a/sentry-provider/sentry-provider-file/src/test/java/org/apache/sentry/provider/file/TestKeyValue.java +++ b/sentry-policy/sentry-policy-common/src/test/java/org/apache/sentry/policy/common/TestKeyValue.java @@ -1,25 +1,24 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ -package org.apache.sentry.provider.file; -import static junit.framework.Assert.assertEquals; -import static junit.framework.Assert.assertFalse; -import static org.apache.sentry.provider.file.PolicyFileConstants.KV_JOINER; +package org.apache.sentry.policy.common; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.apache.sentry.policy.common.PolicyConstants.KV_JOINER; import org.junit.Test; @@ -32,12 +31,12 @@ public void testWithSeparators() throws Exception { assertEquals("/u/h/w/t/partition=value/", kv.getValue()); } - @Test(expected=IllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testEmptyKey() throws Exception { new KeyValue(KV_JOINER.join("", "b")); } - @Test(expected=IllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testEmptyValue() throws Exception { new KeyValue(KV_JOINER.join("a", "")); } @@ -49,6 +48,7 @@ public void testOneParameterConstructor() throws Exception { KeyValue kv3 = new KeyValue(KV_JOINER.join("k2", "v2")); doTest(kv1, kv2, kv3); } + @Test public void testTwoParameterConstructor() throws Exception { KeyValue kv1 = new KeyValue("k1", "v1"); diff --git a/sentry-policy/sentry-policy-db/pom.xml b/sentry-policy/sentry-policy-db/pom.xml index 4e5825f33..1b1ae43cc 100644 --- a/sentry-policy/sentry-policy-db/pom.xml +++ b/sentry-policy/sentry-policy-db/pom.xml @@ -21,7 +21,7 @@ limitations under the License. org.apache.sentry sentry-policy - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-policy-db diff --git a/sentry-policy/sentry-policy-db/src/main/java/org/apache/sentry/policy/db/AbstractDBPrivilegeValidator.java b/sentry-policy/sentry-policy-db/src/main/java/org/apache/sentry/policy/db/AbstractDBPrivilegeValidator.java index 1b774eea3..8bd311ae3 100644 --- a/sentry-policy/sentry-policy-db/src/main/java/org/apache/sentry/policy/db/AbstractDBPrivilegeValidator.java +++ b/sentry-policy/sentry-policy-db/src/main/java/org/apache/sentry/policy/db/AbstractDBPrivilegeValidator.java @@ -16,8 +16,8 @@ */ package org.apache.sentry.policy.db; -import static org.apache.sentry.provider.file.PolicyFileConstants.AUTHORIZABLE_SPLITTER; -import static org.apache.sentry.provider.file.PolicyFileConstants.PRIVILEGE_PREFIX; +import static org.apache.sentry.policy.common.PolicyConstants.AUTHORIZABLE_SPLITTER; +import static org.apache.sentry.policy.common.PolicyConstants.PRIVILEGE_PREFIX; import java.util.List; diff --git a/sentry-policy/sentry-policy-db/src/main/java/org/apache/sentry/policy/db/DBModelAuthorizables.java b/sentry-policy/sentry-policy-db/src/main/java/org/apache/sentry/policy/db/DBModelAuthorizables.java index e47c7338e..96b172dfb 100644 --- a/sentry-policy/sentry-policy-db/src/main/java/org/apache/sentry/policy/db/DBModelAuthorizables.java +++ b/sentry-policy/sentry-policy-db/src/main/java/org/apache/sentry/policy/db/DBModelAuthorizables.java @@ -24,7 +24,7 @@ import org.apache.sentry.core.model.db.Server; import org.apache.sentry.core.model.db.Table; import org.apache.sentry.core.model.db.View; -import org.apache.sentry.provider.file.KeyValue; +import org.apache.sentry.policy.common.KeyValue; public class DBModelAuthorizables { diff --git a/sentry-policy/sentry-policy-db/src/main/java/org/apache/sentry/policy/db/DBWildcardPrivilege.java b/sentry-policy/sentry-policy-db/src/main/java/org/apache/sentry/policy/db/DBWildcardPrivilege.java index e2de7a7fc..116e0aa96 100644 --- a/sentry-policy/sentry-policy-db/src/main/java/org/apache/sentry/policy/db/DBWildcardPrivilege.java +++ b/sentry-policy/sentry-policy-db/src/main/java/org/apache/sentry/policy/db/DBWildcardPrivilege.java @@ -21,20 +21,15 @@ package org.apache.sentry.policy.db; -import static org.apache.sentry.provider.common.ProviderConstants.AUTHORIZABLE_JOINER; -import static org.apache.sentry.provider.common.ProviderConstants.AUTHORIZABLE_SPLITTER; - import java.util.List; import org.apache.sentry.core.common.utils.PathUtils; import org.apache.sentry.core.model.db.AccessConstants; import org.apache.sentry.core.model.db.DBModelAuthorizable.AuthorizableType; +import org.apache.sentry.policy.common.PolicyConstants; import org.apache.sentry.policy.common.Privilege; import org.apache.sentry.policy.common.PrivilegeFactory; -import org.apache.sentry.provider.file.KeyValue; -import org.apache.sentry.provider.file.PolicyFileConstants; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.sentry.policy.common.KeyValue; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -44,8 +39,6 @@ // XXX this class is made ugly by the fact that Action is not a Authorizable. public class DBWildcardPrivilege implements Privilege { - private static final Logger LOGGER = LoggerFactory - .getLogger(DBWildcardPrivilege.class); private final ImmutableList parts; @@ -55,7 +48,8 @@ public DBWildcardPrivilege(String wildcardString) { throw new IllegalArgumentException("Wildcard string cannot be null or empty."); } Listparts = Lists.newArrayList(); - for (String authorizable : AUTHORIZABLE_SPLITTER.trimResults().split(wildcardString)) { + for (String authorizable : PolicyConstants.AUTHORIZABLE_SPLITTER.trimResults().split( + wildcardString)) { if (authorizable.isEmpty()) { throw new IllegalArgumentException("Privilege '" + wildcardString + "' has an empty section"); } @@ -121,20 +115,20 @@ private boolean impliesKeyValue(KeyValue policyPart, KeyValue requestPart) { Preconditions.checkState(policyPart.getKey().equalsIgnoreCase(requestPart.getKey()), "Please report, this method should not be called with two different keys"); if(policyPart.getValue().equals(AccessConstants.ALL) || - policyPart.getValue().equalsIgnoreCase("ALL") || policyPart.equals(requestPart)) { + policyPart.getValue().equalsIgnoreCase("ALL")) { return true; - } else if (!PolicyFileConstants.PRIVILEGE_NAME.equalsIgnoreCase(policyPart.getKey()) + } else if (!PolicyConstants.PRIVILEGE_NAME.equalsIgnoreCase(policyPart.getKey()) && AccessConstants.ALL.equalsIgnoreCase(requestPart.getValue())) { /* privilege request is to match with any object of given type */ return true; - } else if (!PolicyFileConstants.PRIVILEGE_NAME.equalsIgnoreCase(policyPart.getKey()) + } else if (!PolicyConstants.PRIVILEGE_NAME.equalsIgnoreCase(policyPart.getKey()) && AccessConstants.SOME.equalsIgnoreCase(requestPart.getValue())) { /* privilege request is to match with any object of given type */ return true; } else if(policyPart.getKey().equalsIgnoreCase(AuthorizableType.URI.name())) { return impliesURI(policyPart.getValue(), requestPart.getValue()); } - return false; + return policyPart.equals(requestPart); } @VisibleForTesting @@ -144,7 +138,7 @@ protected static boolean impliesURI(String privilege, String request) { @Override public String toString() { - return AUTHORIZABLE_JOINER.join(parts); + return PolicyConstants.AUTHORIZABLE_JOINER.join(parts); } @Override diff --git a/sentry-policy/sentry-policy-db/src/main/java/org/apache/sentry/policy/db/SimpleDBPolicyEngine.java b/sentry-policy/sentry-policy-db/src/main/java/org/apache/sentry/policy/db/SimpleDBPolicyEngine.java index a03794ea0..b5b584f62 100644 --- a/sentry-policy/sentry-policy-db/src/main/java/org/apache/sentry/policy/db/SimpleDBPolicyEngine.java +++ b/sentry-policy/sentry-policy-db/src/main/java/org/apache/sentry/policy/db/SimpleDBPolicyEngine.java @@ -16,7 +16,6 @@ */ package org.apache.sentry.policy.db; -import java.util.List; import java.util.Set; import org.apache.sentry.core.common.ActiveRoleSet; @@ -63,7 +62,7 @@ public PrivilegeFactory getPrivilegeFactory() { @Override public ImmutableSet getAllPrivileges(Set groups, ActiveRoleSet roleSet) throws SentryConfigurationException { - return getPrivileges(groups, roleSet, null); + return getPrivileges(groups, roleSet); } /** diff --git a/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/AbstractTestSimplePolicyEngine.java b/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/AbstractTestSimplePolicyEngine.java index d1151e3d7..0a65b2c5a 100644 --- a/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/AbstractTestSimplePolicyEngine.java +++ b/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/AbstractTestSimplePolicyEngine.java @@ -21,7 +21,7 @@ import java.util.Set; import java.util.TreeSet; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.commons.io.FileUtils; import org.apache.sentry.core.common.ActiveRoleSet; diff --git a/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestDBModelAuthorizables.java b/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestDBModelAuthorizables.java index 16045c74c..ad14278de 100644 --- a/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestDBModelAuthorizables.java +++ b/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestDBModelAuthorizables.java @@ -17,8 +17,8 @@ * under the License. */ package org.apache.sentry.policy.db; -import static junit.framework.Assert.assertEquals; -import static junit.framework.Assert.assertNull; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; import org.apache.sentry.core.model.db.AccessURI; import org.apache.sentry.core.model.db.Database; diff --git a/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestDBWildcardPrivilege.java b/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestDBWildcardPrivilege.java index bc1194e85..aa6fccdfd 100644 --- a/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestDBWildcardPrivilege.java +++ b/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestDBWildcardPrivilege.java @@ -17,19 +17,16 @@ * under the License. */ package org.apache.sentry.policy.db; -import static junit.framework.Assert.assertEquals; -import static junit.framework.Assert.assertFalse; -import static junit.framework.Assert.assertTrue; -import static org.apache.sentry.provider.file.PolicyFileConstants.AUTHORIZABLE_JOINER; -import static org.apache.sentry.provider.file.PolicyFileConstants.KV_JOINER; -import static org.apache.sentry.provider.file.PolicyFileConstants.KV_SEPARATOR; +import static org.apache.sentry.policy.common.PolicyConstants.AUTHORIZABLE_JOINER; +import static org.apache.sentry.policy.common.PolicyConstants.KV_JOINER; +import static org.apache.sentry.policy.common.PolicyConstants.KV_SEPARATOR; import org.apache.sentry.core.model.db.AccessConstants; import org.apache.sentry.policy.common.Privilege; -import org.apache.sentry.provider.file.KeyValue; +import org.apache.sentry.policy.common.KeyValue; import org.junit.Test; -public class TestDBWildcardPrivilege { +public class TestDBWildcardPrivilege extends org.junit.Assert { private static final String ALL = AccessConstants.ALL; diff --git a/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestDatabaseRequiredInRole.java b/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestDatabaseRequiredInRole.java index f9b00b4ba..c08a4f406 100644 --- a/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestDatabaseRequiredInRole.java +++ b/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestDatabaseRequiredInRole.java @@ -18,7 +18,7 @@ */ package org.apache.sentry.policy.db; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.sentry.policy.common.PrivilegeValidatorContext; import org.apache.shiro.config.ConfigurationException; @@ -43,7 +43,7 @@ public void testURIWithDBInPerDbPolicyFile() throws Exception { "server=server1->db=db1->URI=file:///user/db/warehouse/tab1")); Assert.fail("Expected ConfigurationException"); } catch (ConfigurationException e) { - ; + // expected } } } diff --git a/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestPolicyParsingNegative.java b/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestPolicyParsingNegative.java index 5f7c67103..fc21cebfe 100644 --- a/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestPolicyParsingNegative.java +++ b/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestPolicyParsingNegative.java @@ -19,7 +19,7 @@ import java.io.File; import java.io.IOException; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.commons.io.FileUtils; import org.apache.sentry.core.common.ActiveRoleSet; diff --git a/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestResourceAuthorizationProviderGeneralCases.java b/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestResourceAuthorizationProviderGeneralCases.java index 53b83a501..89559a6e5 100644 --- a/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestResourceAuthorizationProviderGeneralCases.java +++ b/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestResourceAuthorizationProviderGeneralCases.java @@ -23,7 +23,7 @@ import java.util.List; import java.util.Set; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.commons.io.FileUtils; import org.apache.sentry.core.common.Action; diff --git a/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestResourceAuthorizationProviderSpecialCases.java b/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestResourceAuthorizationProviderSpecialCases.java index 3ae901ec4..3d3e45a39 100644 --- a/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestResourceAuthorizationProviderSpecialCases.java +++ b/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestResourceAuthorizationProviderSpecialCases.java @@ -22,7 +22,7 @@ import java.util.List; import java.util.Set; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.commons.io.FileUtils; import org.apache.sentry.core.common.Action; diff --git a/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestSimpleDBPolicyEngineDFS.java b/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestSimpleDBPolicyEngineDFS.java index f8c36e2da..77232a6cd 100644 --- a/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestSimpleDBPolicyEngineDFS.java +++ b/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestSimpleDBPolicyEngineDFS.java @@ -20,7 +20,7 @@ import java.io.IOException; import java.util.Set; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; diff --git a/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestSimpleDBPolicyEngineLocalFS.java b/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestSimpleDBPolicyEngineLocalFS.java index cb4e1a2e2..f779949c0 100644 --- a/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestSimpleDBPolicyEngineLocalFS.java +++ b/sentry-policy/sentry-policy-db/src/test/java/org/apache/sentry/policy/db/TestSimpleDBPolicyEngineLocalFS.java @@ -19,7 +19,7 @@ import java.io.File; import java.io.IOException; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.commons.io.FileUtils; import org.apache.sentry.provider.file.PolicyFiles; diff --git a/sentry-policy/sentry-policy-indexer/pom.xml b/sentry-policy/sentry-policy-indexer/pom.xml index 49647c0f0..1a5058163 100644 --- a/sentry-policy/sentry-policy-indexer/pom.xml +++ b/sentry-policy/sentry-policy-indexer/pom.xml @@ -21,7 +21,7 @@ limitations under the License. org.apache.sentry sentry-policy - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-policy-indexer diff --git a/sentry-policy/sentry-policy-indexer/src/main/java/org/apache/sentry/policy/indexer/AbstractIndexerPrivilegeValidator.java b/sentry-policy/sentry-policy-indexer/src/main/java/org/apache/sentry/policy/indexer/AbstractIndexerPrivilegeValidator.java index 6f0012ba0..a01824cc1 100644 --- a/sentry-policy/sentry-policy-indexer/src/main/java/org/apache/sentry/policy/indexer/AbstractIndexerPrivilegeValidator.java +++ b/sentry-policy/sentry-policy-indexer/src/main/java/org/apache/sentry/policy/indexer/AbstractIndexerPrivilegeValidator.java @@ -16,8 +16,8 @@ */ package org.apache.sentry.policy.indexer; -import static org.apache.sentry.provider.file.PolicyFileConstants.AUTHORIZABLE_SPLITTER; -import static org.apache.sentry.provider.file.PolicyFileConstants.PRIVILEGE_PREFIX; +import static org.apache.sentry.policy.common.PolicyConstants.AUTHORIZABLE_SPLITTER; +import static org.apache.sentry.policy.common.PolicyConstants.PRIVILEGE_PREFIX; import java.util.List; diff --git a/sentry-policy/sentry-policy-indexer/src/main/java/org/apache/sentry/policy/indexer/IndexerModelAuthorizables.java b/sentry-policy/sentry-policy-indexer/src/main/java/org/apache/sentry/policy/indexer/IndexerModelAuthorizables.java index 7657327a6..13893b347 100644 --- a/sentry-policy/sentry-policy-indexer/src/main/java/org/apache/sentry/policy/indexer/IndexerModelAuthorizables.java +++ b/sentry-policy/sentry-policy-indexer/src/main/java/org/apache/sentry/policy/indexer/IndexerModelAuthorizables.java @@ -19,7 +19,7 @@ import org.apache.sentry.core.model.indexer.Indexer; import org.apache.sentry.core.model.indexer.IndexerModelAuthorizable; import org.apache.sentry.core.model.indexer.IndexerModelAuthorizable.AuthorizableType; -import org.apache.sentry.provider.file.KeyValue; +import org.apache.sentry.policy.common.KeyValue; public class IndexerModelAuthorizables { diff --git a/sentry-policy/sentry-policy-indexer/src/main/java/org/apache/sentry/policy/indexer/IndexerWildcardPrivilege.java b/sentry-policy/sentry-policy-indexer/src/main/java/org/apache/sentry/policy/indexer/IndexerWildcardPrivilege.java index 5ab138253..0ec0ce14f 100644 --- a/sentry-policy/sentry-policy-indexer/src/main/java/org/apache/sentry/policy/indexer/IndexerWildcardPrivilege.java +++ b/sentry-policy/sentry-policy-indexer/src/main/java/org/apache/sentry/policy/indexer/IndexerWildcardPrivilege.java @@ -21,16 +21,13 @@ package org.apache.sentry.policy.indexer; -import static org.apache.sentry.provider.file.PolicyFileConstants.AUTHORIZABLE_JOINER; -import static org.apache.sentry.provider.file.PolicyFileConstants.AUTHORIZABLE_SPLITTER; - import java.util.List; import org.apache.sentry.core.model.indexer.IndexerConstants; +import org.apache.sentry.policy.common.PolicyConstants; import org.apache.sentry.policy.common.Privilege; import org.apache.sentry.policy.common.PrivilegeFactory; -import org.apache.sentry.provider.file.KeyValue; -import org.apache.sentry.provider.file.PolicyFileConstants; +import org.apache.sentry.policy.common.KeyValue; import com.google.common.base.Preconditions; import com.google.common.base.Strings; @@ -47,7 +44,8 @@ public IndexerWildcardPrivilege(String wildcardString) { throw new IllegalArgumentException("Wildcard string cannot be null or empty."); } Listparts = Lists.newArrayList(); - for (String authorizable : AUTHORIZABLE_SPLITTER.trimResults().split(wildcardString)) { + for (String authorizable : PolicyConstants.AUTHORIZABLE_SPLITTER.trimResults().split( + wildcardString)) { if (authorizable.isEmpty()) { throw new IllegalArgumentException("Privilege '" + wildcardString + "' has an empty section"); } @@ -110,7 +108,7 @@ private boolean impliesKeyValue(KeyValue policyPart, KeyValue requestPart) { "Please report, this method should not be called with two different keys"); if(policyPart.getValue().equals(IndexerConstants.ALL) || policyPart.equals(requestPart)) { return true; - } else if (!PolicyFileConstants.PRIVILEGE_NAME.equalsIgnoreCase(policyPart.getKey()) + } else if (!PolicyConstants.PRIVILEGE_NAME.equalsIgnoreCase(policyPart.getKey()) && IndexerConstants.ALL.equalsIgnoreCase(requestPart.getValue())) { /* privilege request is to match with any object of given type */ return true; @@ -120,7 +118,7 @@ private boolean impliesKeyValue(KeyValue policyPart, KeyValue requestPart) { @Override public String toString() { - return AUTHORIZABLE_JOINER.join(parts); + return PolicyConstants.AUTHORIZABLE_JOINER.join(parts); } @Override diff --git a/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/AbstractTestIndexerPolicyEngine.java b/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/AbstractTestIndexerPolicyEngine.java index d7d1ae28f..66455e866 100644 --- a/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/AbstractTestIndexerPolicyEngine.java +++ b/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/AbstractTestIndexerPolicyEngine.java @@ -21,7 +21,7 @@ import java.util.Set; import java.util.TreeSet; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.commons.io.FileUtils; import org.apache.sentry.core.common.ActiveRoleSet; diff --git a/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerAuthorizationProviderGeneralCases.java b/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerAuthorizationProviderGeneralCases.java index 00c1b6d98..d6d8b7949 100644 --- a/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerAuthorizationProviderGeneralCases.java +++ b/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerAuthorizationProviderGeneralCases.java @@ -23,7 +23,7 @@ import java.util.List; import java.util.Set; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.commons.io.FileUtils; import org.apache.sentry.core.common.Action; @@ -66,7 +66,6 @@ public class TestIndexerAuthorizationProviderGeneralCases { private static final Indexer IND_TMP = new Indexer("tmpindexer"); private static final Indexer IND_PURCHASES_PARTIAL = new Indexer("purchases_partial"); - private static final IndexerModelAction ALL = IndexerModelAction.ALL; private static final IndexerModelAction READ = IndexerModelAction.READ; private static final IndexerModelAction WRITE = IndexerModelAction.WRITE; diff --git a/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerAuthorizationProviderSpecialCases.java b/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerAuthorizationProviderSpecialCases.java index 0765b9228..9c211b7e0 100644 --- a/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerAuthorizationProviderSpecialCases.java +++ b/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerAuthorizationProviderSpecialCases.java @@ -22,7 +22,7 @@ import java.util.List; import java.util.Set; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.commons.io.FileUtils; import org.apache.sentry.core.common.Action; diff --git a/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerModelAuthorizables.java b/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerModelAuthorizables.java index 7a6230b10..8d21dc38a 100644 --- a/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerModelAuthorizables.java +++ b/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerModelAuthorizables.java @@ -17,8 +17,8 @@ * under the License. */ package org.apache.sentry.policy.indexer; -import static junit.framework.Assert.assertEquals; -import static junit.framework.Assert.assertNull; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; import org.apache.sentry.core.model.indexer.Indexer; import org.junit.Test; diff --git a/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerPolicyEngineDFS.java b/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerPolicyEngineDFS.java index f439430b5..c6d671833 100644 --- a/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerPolicyEngineDFS.java +++ b/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerPolicyEngineDFS.java @@ -19,7 +19,7 @@ import java.io.File; import java.io.IOException; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; diff --git a/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerPolicyEngineLocalFS.java b/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerPolicyEngineLocalFS.java index 95cf2a6b2..f083f492a 100644 --- a/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerPolicyEngineLocalFS.java +++ b/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerPolicyEngineLocalFS.java @@ -19,7 +19,7 @@ import java.io.File; import java.io.IOException; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.commons.io.FileUtils; import org.apache.sentry.provider.file.PolicyFiles; diff --git a/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerPolicyNegative.java b/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerPolicyNegative.java index 07065607e..a453c4863 100644 --- a/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerPolicyNegative.java +++ b/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerPolicyNegative.java @@ -20,7 +20,7 @@ import java.io.IOException; import java.util.Collections; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.commons.io.FileUtils; import org.apache.sentry.core.common.ActiveRoleSet; diff --git a/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerRequiredInRole.java b/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerRequiredInRole.java index 8494a8f55..57876e565 100644 --- a/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerRequiredInRole.java +++ b/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerRequiredInRole.java @@ -18,7 +18,7 @@ */ package org.apache.sentry.policy.indexer; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.sentry.policy.common.PrivilegeValidatorContext; import org.apache.shiro.config.ConfigurationException; @@ -35,7 +35,7 @@ public void testEmptyRole() throws Exception { indexerRequiredInRole.validate(new PrivilegeValidatorContext("index=index1")); Assert.fail("Expected ConfigurationException"); } catch (ConfigurationException e) { - ; + // expected } // check with db @@ -43,7 +43,7 @@ public void testEmptyRole() throws Exception { indexerRequiredInRole.validate(new PrivilegeValidatorContext("db1","index=index2")); Assert.fail("Expected ConfigurationException"); } catch (ConfigurationException e) { - ; + // expected } } diff --git a/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerWildcardPrivilege.java b/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerWildcardPrivilege.java index 48c5b07d7..17cebc316 100644 --- a/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerWildcardPrivilege.java +++ b/sentry-policy/sentry-policy-indexer/src/test/java/org/apache/sentry/policy/indexer/TestIndexerWildcardPrivilege.java @@ -17,18 +17,16 @@ * under the License. */ package org.apache.sentry.policy.indexer; -import static junit.framework.Assert.assertFalse; -import static junit.framework.Assert.assertTrue; -import static org.apache.sentry.provider.file.PolicyFileConstants.AUTHORIZABLE_JOINER; -import static org.apache.sentry.provider.file.PolicyFileConstants.KV_JOINER; -import static org.apache.sentry.provider.file.PolicyFileConstants.KV_SEPARATOR; +import static org.apache.sentry.policy.common.PolicyConstants.AUTHORIZABLE_JOINER; +import static org.apache.sentry.policy.common.PolicyConstants.KV_JOINER; +import static org.apache.sentry.policy.common.PolicyConstants.KV_SEPARATOR; import org.apache.sentry.core.model.indexer.IndexerConstants; import org.apache.sentry.policy.common.Privilege; -import org.apache.sentry.provider.file.KeyValue; +import org.apache.sentry.policy.common.KeyValue; import org.junit.Test; -public class TestIndexerWildcardPrivilege { +public class TestIndexerWildcardPrivilege extends org.junit.Assert { private static final String ALL = IndexerConstants.ALL; diff --git a/sentry-policy/sentry-policy-kafka/pom.xml b/sentry-policy/sentry-policy-kafka/pom.xml new file mode 100644 index 000000000..21d34eb40 --- /dev/null +++ b/sentry-policy/sentry-policy-kafka/pom.xml @@ -0,0 +1,80 @@ + + + + 4.0.0 + + org.apache.sentry + sentry-policy + 1.7.0-incubating-SNAPSHOT + + + sentry-policy-kafka + Sentry Policy for Kafka + + + + junit + junit + test + + + org.apache.hadoop + hadoop-common + provided + + + org.apache.hadoop + hadoop-minicluster + test + + + log4j + log4j + + + org.apache.shiro + shiro-core + + + com.google.guava + guava + + + org.slf4j + slf4j-api + + + org.slf4j + slf4j-log4j12 + + + org.apache.sentry + sentry-core-model-kafka + + + org.apache.sentry + sentry-provider-common + + + org.apache.sentry + sentry-provider-file + + + + diff --git a/sentry-policy/sentry-policy-kafka/src/main/java/org/apache/sentry/policy/kafka/KafkaModelAuthorizables.java b/sentry-policy/sentry-policy-kafka/src/main/java/org/apache/sentry/policy/kafka/KafkaModelAuthorizables.java new file mode 100644 index 000000000..7be4241eb --- /dev/null +++ b/sentry-policy/sentry-policy-kafka/src/main/java/org/apache/sentry/policy/kafka/KafkaModelAuthorizables.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.policy.kafka; + +import org.apache.sentry.core.model.kafka.Cluster; +import org.apache.sentry.core.model.kafka.ConsumerGroup; +import org.apache.sentry.core.model.kafka.KafkaAuthorizable; +import org.apache.sentry.core.model.kafka.KafkaAuthorizable.AuthorizableType; +import org.apache.sentry.core.model.kafka.Host; +import org.apache.sentry.core.model.kafka.Topic; +import org.apache.sentry.policy.common.KeyValue; +import org.apache.shiro.config.ConfigurationException; + +public class KafkaModelAuthorizables { + public static KafkaAuthorizable from(KeyValue keyValue) throws ConfigurationException { + String prefix = keyValue.getKey().toLowerCase(); + String name = keyValue.getValue(); + for (AuthorizableType type : AuthorizableType.values()) { + if (prefix.equalsIgnoreCase(type.name())) { + return from(type, name); + } + } + return null; + } + + public static KafkaAuthorizable from(String keyValue) throws ConfigurationException { + return from(new KeyValue(keyValue)); + } + + public static KafkaAuthorizable from(AuthorizableType type, String name) throws ConfigurationException { + switch (type) { + case HOST: + return new Host(name); + case CLUSTER: { + if (!name.equals(Cluster.NAME)) { + throw new ConfigurationException("Kafka's cluster resource can only have name " + Cluster.NAME); + } + return new Cluster(); + } + case TOPIC: + return new Topic(name); + case CONSUMERGROUP: + return new ConsumerGroup(name); + default: + return null; + } + } +} diff --git a/sentry-policy/sentry-policy-kafka/src/main/java/org/apache/sentry/policy/kafka/KafkaPrivilegeValidator.java b/sentry-policy/sentry-policy-kafka/src/main/java/org/apache/sentry/policy/kafka/KafkaPrivilegeValidator.java new file mode 100644 index 000000000..7383e50d8 --- /dev/null +++ b/sentry-policy/sentry-policy-kafka/src/main/java/org/apache/sentry/policy/kafka/KafkaPrivilegeValidator.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.policy.kafka; + +import static org.apache.sentry.policy.common.PolicyConstants.AUTHORIZABLE_SPLITTER; +import static org.apache.sentry.policy.common.PolicyConstants.PRIVILEGE_PREFIX; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.apache.sentry.core.model.kafka.KafkaActionFactory; +import org.apache.sentry.core.model.kafka.KafkaAuthorizable; +import org.apache.sentry.core.model.kafka.Host; +import org.apache.sentry.policy.common.PrivilegeValidator; +import org.apache.sentry.policy.common.PrivilegeValidatorContext; +import org.apache.shiro.config.ConfigurationException; + +import com.google.common.collect.Lists; + +/** + * Validator for Kafka privileges. + * Below are the requirements for a kafka privilege to be valid. + * 1. Privilege must start with Host resource. + * 2. Privilege must have at most one non Host resource, Cluster or Topic or ConsumerGroup, followed + * by Host resource. + * 3. Privilege must end with exactly one action. + */ +public class KafkaPrivilegeValidator implements PrivilegeValidator { + + public static final String KafkaPrivilegeHelpMsg = + "Invalid Kafka privilege." + + " Kafka privilege must be of the form host=->=->action=," + + " where can be '*' or any valid host name," + + " can be one of " + Arrays.toString(getKafkaAuthorizablesExceptHost()) + + " is name of the resource," + + " can be one of " + Arrays.toString(KafkaActionFactory.KafkaActionType.values()) + + "."; + + private static KafkaAuthorizable.AuthorizableType[] getKafkaAuthorizablesExceptHost() { + final KafkaAuthorizable.AuthorizableType[] authorizableTypes = KafkaAuthorizable.AuthorizableType.values(); + List authorizableTypesWithoutHost = new ArrayList<>(authorizableTypes.length - 1); + for (KafkaAuthorizable.AuthorizableType authorizableType: authorizableTypes) { + if (!authorizableType.equals(KafkaAuthorizable.AuthorizableType.HOST)) { + authorizableTypesWithoutHost.add(authorizableType); + } + } + return authorizableTypesWithoutHost.toArray(new KafkaAuthorizable.AuthorizableType[authorizableTypesWithoutHost.size()]); + } + + public KafkaPrivilegeValidator() { + } + + @Override + public void validate(PrivilegeValidatorContext context) throws ConfigurationException { + List splits = Lists.newArrayList(); + for (String section : AUTHORIZABLE_SPLITTER.split(context.getPrivilege())) { + splits.add(section); + } + + // Check privilege splits length is 2 or 3 + if (splits.size() < 2 || splits.size() > 3) { + throw new ConfigurationException(KafkaPrivilegeHelpMsg); + } + + // Check privilege starts with Host resource + if (isAction(splits.get(0))) { + throw new ConfigurationException("Kafka privilege can not start with an action.\n" + KafkaPrivilegeHelpMsg); + } + KafkaAuthorizable hostAuthorizable = KafkaModelAuthorizables.from(splits.get(0)); + if (hostAuthorizable == null) { + throw new ConfigurationException("No Kafka authorizable found for " + splits.get(0) + "\n." + KafkaPrivilegeHelpMsg); + } + if (!(hostAuthorizable instanceof Host)) { + throw new ConfigurationException("Kafka privilege must begin with host authorizable.\n" + KafkaPrivilegeHelpMsg); + } + + // Check privilege has at most one non Host resource following Host resource + if (splits.size() == 3) { + if (isAction(splits.get(1))) { + throw new ConfigurationException("Kafka privilege can have action only at the end of privilege.\n" + KafkaPrivilegeHelpMsg); + } + KafkaAuthorizable authorizable = KafkaModelAuthorizables.from(splits.get(1)); + if (authorizable == null) { + throw new ConfigurationException("No Kafka authorizable found for " + splits.get(1) + "\n." + KafkaPrivilegeHelpMsg); + } + if (authorizable instanceof Host) { + throw new ConfigurationException("Host authorizable can be specified just once in a Kafka privilege.\n" + KafkaPrivilegeHelpMsg); + } + } + + // Check privilege ends with exactly one valid action + if (!isAction(splits.get(splits.size() - 1))) { + throw new ConfigurationException("Kafka privilege must end with a valid action.\n" + KafkaPrivilegeHelpMsg); + } + } + + private boolean isAction(String privilegePart) { + final String privilege = privilegePart.toLowerCase(); + final String action = privilege.replace(PRIVILEGE_PREFIX, "").toLowerCase(); + return privilege.startsWith(PRIVILEGE_PREFIX) && + KafkaActionFactory.getInstance().getActionByName(action) != null; + } +} diff --git a/sentry-policy/sentry-policy-kafka/src/main/java/org/apache/sentry/policy/kafka/KafkaWildcardPrivilege.java b/sentry-policy/sentry-policy-kafka/src/main/java/org/apache/sentry/policy/kafka/KafkaWildcardPrivilege.java new file mode 100644 index 000000000..bc299b02e --- /dev/null +++ b/sentry-policy/sentry-policy-kafka/src/main/java/org/apache/sentry/policy/kafka/KafkaWildcardPrivilege.java @@ -0,0 +1,146 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.policy.kafka; + +import static org.apache.sentry.policy.common.PolicyConstants.AUTHORIZABLE_SPLITTER; + +import java.util.List; + +import org.apache.sentry.core.model.kafka.KafkaActionConstant; +import org.apache.sentry.core.model.kafka.KafkaAuthorizable; +import org.apache.sentry.policy.common.Privilege; +import org.apache.sentry.policy.common.PrivilegeFactory; +import org.apache.sentry.policy.common.KeyValue; + +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; + +public class KafkaWildcardPrivilege implements Privilege { + + private static String ALL_HOSTS = "*"; + + public static class Factory implements PrivilegeFactory { + @Override + public Privilege createPrivilege(String permission) { + return new KafkaWildcardPrivilege(permission); + } + } + + private final ImmutableList parts; + + public KafkaWildcardPrivilege(String permission) { + if (Strings.isNullOrEmpty(permission)) { + throw new IllegalArgumentException("Permission string cannot be null or empty."); + } + Listparts = Lists.newArrayList(); + for (String authorizable : AUTHORIZABLE_SPLITTER.trimResults().split(permission.trim())) { + if (authorizable.isEmpty()) { + throw new IllegalArgumentException("Privilege '" + permission + "' has an empty section"); + } + parts.add(new KeyValue(authorizable)); + } + if (parts.isEmpty()) { + throw new AssertionError("Privilege, " + permission + ", did not consist of any valid authorizable."); + } + this.parts = ImmutableList.copyOf(parts); + } + + @Override + public boolean implies(Privilege p) { + if (!(p instanceof KafkaWildcardPrivilege)) { + return false; + } + KafkaWildcardPrivilege wp = (KafkaWildcardPrivilege)p; + List otherParts = wp.parts; + if(equals(wp)) { + return true; + } + int index = 0; + for (KeyValue otherPart : otherParts) { + // If this privilege has less parts than the other privilege, everything + // after the number of parts contained + // in this privilege is automatically implied, so return true + if (parts.size() - 1 < index) { + return true; + } else { + KeyValue part = parts.get(index); + // Support for action inheritance from parent to child + if (part.getKey().equalsIgnoreCase(KafkaActionConstant.actionName) + && !(otherPart.getKey().equalsIgnoreCase(KafkaActionConstant.actionName))) { + continue; + } + // are the keys even equal + if(!part.getKey().equalsIgnoreCase(otherPart.getKey())) { + return false; + } + if (!impliesKeyValue(part, otherPart)) { + return false; + } + index++; + } + } + // If this privilege has more parts than + // the other parts, only imply it if + // all of the other parts are "*" or "ALL" + for (; index < parts.size(); index++) { + KeyValue part = parts.get(index); + if (!part.getValue().equals(KafkaActionConstant.ALL)) { + return false; + } + } + return true; + } + + private boolean impliesKeyValue(KeyValue policyPart, KeyValue requestPart) { + Preconditions.checkState(policyPart.getKey().equalsIgnoreCase(requestPart.getKey()), + "Please report, this method should not be called with two different keys"); + + // Host is a special resource, not declared as resource in Kafka. Each Kafka resource can be + // authorized based on the host request originated from and to handle this, Sentry uses host as + // a resource. Kafka allows using '*' as wildcard for all hosts. '*' however is not a valid + // Kafka action. + if (hasHostWidCard(policyPart)) { + return true; + } + + if (KafkaActionConstant.actionName.equalsIgnoreCase(policyPart.getKey())) { // is action + return policyPart.getValue().equalsIgnoreCase(KafkaActionConstant.ALL) || + policyPart.equals(requestPart); + } else { + return policyPart.getValue().equals(requestPart.getValue()); + } + } + + private boolean hasHostWidCard(KeyValue policyPart) { + if (policyPart.getKey().equalsIgnoreCase(KafkaAuthorizable.AuthorizableType.HOST.toString()) && + policyPart.getValue().equalsIgnoreCase(ALL_HOSTS)) { + return true; + } + return false; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + for(KeyValue kv: this.parts) { + sb.append(kv.getKey() + "=" + kv.getValue() + "->"); + } + return sb.toString(); + } +} diff --git a/sentry-policy/sentry-policy-kafka/src/main/java/org/apache/sentry/policy/kafka/SimpleKafkaPolicyEngine.java b/sentry-policy/sentry-policy-kafka/src/main/java/org/apache/sentry/policy/kafka/SimpleKafkaPolicyEngine.java new file mode 100644 index 000000000..7e043e1c8 --- /dev/null +++ b/sentry-policy/sentry-policy-kafka/src/main/java/org/apache/sentry/policy/kafka/SimpleKafkaPolicyEngine.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.policy.kafka; + +import java.util.Set; + +import org.apache.sentry.core.common.ActiveRoleSet; +import org.apache.sentry.core.common.Authorizable; +import org.apache.sentry.core.common.SentryConfigurationException; +import org.apache.sentry.policy.common.PolicyEngine; +import org.apache.sentry.policy.common.PrivilegeFactory; +import org.apache.sentry.policy.common.PrivilegeValidator; +import org.apache.sentry.provider.common.ProviderBackend; +import org.apache.sentry.provider.common.ProviderBackendContext; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; + +public class SimpleKafkaPolicyEngine implements PolicyEngine { + private static final Logger LOGGER = LoggerFactory.getLogger(SimpleKafkaPolicyEngine.class); + private final ProviderBackend providerBackend; + + public SimpleKafkaPolicyEngine(ProviderBackend providerBackend) { + this.providerBackend = providerBackend; + ProviderBackendContext context = new ProviderBackendContext(); + context.setAllowPerDatabase(false); + context.setValidators(ImmutableList.of(new KafkaPrivilegeValidator())); + this.providerBackend.initialize(context); + } + + @Override + public PrivilegeFactory getPrivilegeFactory() { + return new KafkaWildcardPrivilege.Factory(); + } + + @Override + public ImmutableSet getAllPrivileges(Set groups, ActiveRoleSet roleSet) + throws SentryConfigurationException { + return getPrivileges(groups, roleSet); + } + + @Override + public ImmutableSet getPrivileges(Set groups, ActiveRoleSet roleSet, + Authorizable... authorizableHierarchy) + throws SentryConfigurationException { + if(LOGGER.isDebugEnabled()) { + LOGGER.debug("Getting permissions for {}", groups); + } + ImmutableSet result = providerBackend.getPrivileges(groups, roleSet); + if(LOGGER.isDebugEnabled()) { + LOGGER.debug("result = " + result); + } + return result; + } + + @Override + public void close() { + if (providerBackend != null) { + providerBackend.close(); + } + } + + @Override + public void validatePolicy(boolean strictValidation) + throws SentryConfigurationException { + if (providerBackend != null) { + providerBackend.validatePolicy(strictValidation); + } + } + +} diff --git a/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/KafkaPolicyFileProviderBackend.java b/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/KafkaPolicyFileProviderBackend.java new file mode 100644 index 000000000..c4a2f7b7c --- /dev/null +++ b/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/KafkaPolicyFileProviderBackend.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.sentry.policy.kafka; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.sentry.provider.file.SimpleFileProviderBackend; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class KafkaPolicyFileProviderBackend extends SimpleKafkaPolicyEngine { + private static final Logger LOGGER = LoggerFactory.getLogger(KafkaPolicyFileProviderBackend.class); + public KafkaPolicyFileProviderBackend(String resource) throws IOException { + super(new SimpleFileProviderBackend(new Configuration(), resource)); + LOGGER.warn("The DB provider backend is the preferred option over file provider backend as the kafka policy engine"); + } +} diff --git a/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/MockGroupMappingServiceProvider.java b/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/MockGroupMappingServiceProvider.java new file mode 100644 index 000000000..572c74dbc --- /dev/null +++ b/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/MockGroupMappingServiceProvider.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.sentry.policy.kafka; + +import java.util.Set; + +import org.apache.sentry.provider.common.GroupMappingService; + +import com.google.common.collect.Multimap; +import com.google.common.collect.Sets; + +public class MockGroupMappingServiceProvider implements GroupMappingService { + private final Multimap userToGroupMap; + + public MockGroupMappingServiceProvider(Multimap userToGroupMap) { + this.userToGroupMap = userToGroupMap; + } + @Override + public Set getGroups(String user) { + return Sets.newHashSet(userToGroupMap.get(user)); + } + +} diff --git a/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/TestKafkaModelAuthorizables.java b/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/TestKafkaModelAuthorizables.java new file mode 100644 index 000000000..421466e74 --- /dev/null +++ b/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/TestKafkaModelAuthorizables.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.sentry.policy.kafka; + +import static junit.framework.Assert.assertEquals; +import static junit.framework.Assert.assertNull; +import static junit.framework.Assert.fail; + +import org.apache.sentry.core.model.kafka.Cluster; +import org.apache.sentry.core.model.kafka.ConsumerGroup; +import org.apache.sentry.core.model.kafka.Host; +import org.apache.sentry.core.model.kafka.Topic; +import org.apache.shiro.config.ConfigurationException; +import org.junit.Test; + +public class TestKafkaModelAuthorizables { + + @Test + public void testHost() throws Exception { + Host host1 = (Host)KafkaModelAuthorizables.from("HOST=host1"); + assertEquals("host1", host1.getName()); + } + + @Test(expected=IllegalArgumentException.class) + public void testNoKV() throws Exception { + System.out.println(KafkaModelAuthorizables.from("nonsense")); + } + + @Test(expected=IllegalArgumentException.class) + public void testEmptyKey() throws Exception { + System.out.println(KafkaModelAuthorizables.from("=host1")); + } + + @Test(expected=IllegalArgumentException.class) + public void testEmptyValue() throws Exception { + System.out.println(KafkaModelAuthorizables.from("HOST=")); + } + + @Test + public void testNotAuthorizable() throws Exception { + assertNull(KafkaModelAuthorizables.from("k=v")); + } + + @Test + public void testResourceNameIsCaseSensitive() throws Exception { + Host host1 = (Host)KafkaModelAuthorizables.from("HOST=Host1"); + assertEquals("Host1", host1.getName()); + + Cluster cluster1 = (Cluster)KafkaModelAuthorizables.from("Cluster=kafka-cluster"); + assertEquals("kafka-cluster", cluster1.getName()); + + Topic topic1 = (Topic)KafkaModelAuthorizables.from("topic=topiC1"); + assertEquals("topiC1", topic1.getName()); + + ConsumerGroup consumergroup1 = (ConsumerGroup)KafkaModelAuthorizables.from("ConsumerGroup=CG1"); + assertEquals("CG1", consumergroup1.getName()); + } + + @Test + public void testClusterResourceNameIsRestricted() throws Exception { + try { + KafkaModelAuthorizables.from("Cluster=cluster1"); + fail("Cluster with name other than " + Cluster.NAME + " must not have been created."); + } catch (ConfigurationException cex) { + assertEquals("Exception message is not as expected.", "Kafka's cluster resource can only have name " + Cluster.NAME, cex.getMessage()); + } catch (Exception ex) { + fail("Configuration exception was expected for invalid Cluster name."); + } + } +} diff --git a/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/TestKafkaPrivilegeValidator.java b/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/TestKafkaPrivilegeValidator.java new file mode 100644 index 000000000..7caa3a9dd --- /dev/null +++ b/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/TestKafkaPrivilegeValidator.java @@ -0,0 +1,169 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.sentry.policy.kafka; + +import junit.framework.Assert; + +import org.apache.sentry.policy.common.PrivilegeValidatorContext; +import org.apache.shiro.config.ConfigurationException; +import org.junit.Test; + +public class TestKafkaPrivilegeValidator { + @Test + public void testOnlyHostResource() { + KafkaPrivilegeValidator kafkaPrivilegeValidator = new KafkaPrivilegeValidator(); + try { + kafkaPrivilegeValidator.validate(new PrivilegeValidatorContext("host=host1")); + } catch (ConfigurationException ex) { + Assert.assertEquals(KafkaPrivilegeValidator.KafkaPrivilegeHelpMsg, ex.getMessage()); + } + } + + @Test + public void testWithoutHostResource() throws Exception { + KafkaPrivilegeValidator kafkaPrivilegeValidator = new KafkaPrivilegeValidator(); + testHostResourceIsChecked(kafkaPrivilegeValidator, "cluster=kafka-cluster->action=read"); + testHostResourceIsChecked(kafkaPrivilegeValidator, "topic=t1->action=read"); + testHostResourceIsChecked(kafkaPrivilegeValidator, "consumergroup=g1->action=read"); + } + + private void testHostResourceIsChecked(KafkaPrivilegeValidator kafkaPrivilegeValidator, String privilege) { + try { + kafkaPrivilegeValidator.validate(new PrivilegeValidatorContext(privilege)); + Assert.fail("Expected ConfigurationException"); + } catch (ConfigurationException ex) { + Assert.assertEquals("Kafka privilege must begin with host authorizable.\n" + KafkaPrivilegeValidator.KafkaPrivilegeHelpMsg, ex.getMessage()); + } + } + + @Test + public void testValidPrivileges() throws Exception { + KafkaPrivilegeValidator kafkaPrivilegeValidator = new KafkaPrivilegeValidator(); + try { + kafkaPrivilegeValidator.validate(new PrivilegeValidatorContext("host=host1->cluster=kafka-cluster->action=read")); + } catch (ConfigurationException ex) { + Assert.fail("Not expected ConfigurationException"); + } + try { + kafkaPrivilegeValidator.validate(new PrivilegeValidatorContext("host=host1->topic=t1->action=read")); + } catch (ConfigurationException ex) { + Assert.fail("Not expected ConfigurationException"); + } + try { + kafkaPrivilegeValidator.validate(new PrivilegeValidatorContext("host=host1->consumergroup=g1->action=read")); + } catch (ConfigurationException ex) { + Assert.fail("Not expected ConfigurationException"); + } + } + + @Test + public void testInvalidHostResource() throws Exception { + KafkaPrivilegeValidator kafkaPrivilegeValidator = new KafkaPrivilegeValidator(); + try { + kafkaPrivilegeValidator.validate(new PrivilegeValidatorContext("hhost=host1->cluster=kafka-cluster->action=read")); + Assert.fail("Expected ConfigurationException"); + } catch (ConfigurationException ex) { + } + } + + @Test + public void testInvalidClusterResource() throws Exception { + KafkaPrivilegeValidator kafkaPrivilegeValidator = new KafkaPrivilegeValidator(); + try { + kafkaPrivilegeValidator.validate(new PrivilegeValidatorContext("host=host1->clluster=kafka-cluster->action=read")); + Assert.fail("Expected ConfigurationException"); + } catch (ConfigurationException ex) { + } + } + + @Test + public void testInvalidTopicResource() throws Exception { + KafkaPrivilegeValidator kafkaPrivilegeValidator = new KafkaPrivilegeValidator(); + try { + kafkaPrivilegeValidator.validate(new PrivilegeValidatorContext("host=host1->ttopic=t1->action=read")); + Assert.fail("Expected ConfigurationException"); + } catch (ConfigurationException ex) { + } + } + + @Test + public void testInvalidConsumerGroupResource() throws Exception { + KafkaPrivilegeValidator kafkaPrivilegeValidator = new KafkaPrivilegeValidator(); + try { + kafkaPrivilegeValidator.validate(new PrivilegeValidatorContext("host=host1->coonsumergroup=g1->action=read")); + Assert.fail("Expected ConfigurationException"); + } catch (ConfigurationException ex) { + } + } + + @Test + public void testPrivilegeMustHaveExcatlyOneHost() { + KafkaPrivilegeValidator kafkaPrivilegeValidator = new KafkaPrivilegeValidator(); + try { + kafkaPrivilegeValidator.validate(new PrivilegeValidatorContext("host=host1->host=host2->action=read")); + Assert.fail("Multiple Host resources are not allowed within a Kafka privilege."); + } catch (ConfigurationException ex) { + Assert.assertEquals("Host authorizable can be specified just once in a Kafka privilege.\n" + KafkaPrivilegeValidator.KafkaPrivilegeHelpMsg, ex.getMessage()); + } + } + + @Test + public void testPrivilegeCanNotStartWithAction() { + KafkaPrivilegeValidator kafkaPrivilegeValidator = new KafkaPrivilegeValidator(); + try { + kafkaPrivilegeValidator.validate(new PrivilegeValidatorContext("action=write->host=host1->topic=t1")); + Assert.fail("Kafka privilege can not start with an action."); + } catch (ConfigurationException ex) { + Assert.assertEquals("Kafka privilege can not start with an action.\n" + KafkaPrivilegeValidator.KafkaPrivilegeHelpMsg, ex.getMessage()); + } + } + + @Test + public void testPrivilegeWithMoreParts() { + KafkaPrivilegeValidator kafkaPrivilegeValidator = new KafkaPrivilegeValidator(); + try { + kafkaPrivilegeValidator.validate(new PrivilegeValidatorContext("host=host1->topic=t1->consumergroup=cg1->action=read")); + Assert.fail("Kafka privilege can have one Host authorizable, at most one non Host authorizable and one action."); + } catch (ConfigurationException ex) { + Assert.assertEquals(KafkaPrivilegeValidator.KafkaPrivilegeHelpMsg, ex.getMessage()); + } + } + + @Test + public void testPrivilegeNotEndingWithAction() { + KafkaPrivilegeValidator kafkaPrivilegeValidator = new KafkaPrivilegeValidator(); + try { + kafkaPrivilegeValidator.validate(new PrivilegeValidatorContext("host=host1->topic=t1->consumergroup=cg1")); + Assert.fail("Kafka privilege must end with a valid action."); + } catch (ConfigurationException ex) { + Assert.assertEquals("Kafka privilege must end with a valid action.\n" + KafkaPrivilegeValidator.KafkaPrivilegeHelpMsg, ex.getMessage()); + } + } + + @Test + public void testPrivilegeNotEndingWithValidAction() { + KafkaPrivilegeValidator kafkaPrivilegeValidator = new KafkaPrivilegeValidator(); + try { + kafkaPrivilegeValidator.validate(new PrivilegeValidatorContext("host=host1->topic=t1->action=bla")); + Assert.fail("Kafka privilege must end with a valid action."); + } catch (ConfigurationException ex) { + Assert.assertEquals("Kafka privilege must end with a valid action.\n" + KafkaPrivilegeValidator.KafkaPrivilegeHelpMsg, ex.getMessage()); + } + } +} diff --git a/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/TestKafkaWildcardPrivilege.java b/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/TestKafkaWildcardPrivilege.java new file mode 100644 index 000000000..bdef91c78 --- /dev/null +++ b/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/TestKafkaWildcardPrivilege.java @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.sentry.policy.kafka; +import static junit.framework.Assert.assertFalse; +import static junit.framework.Assert.assertTrue; + +import org.apache.sentry.core.model.kafka.KafkaActionConstant; +import org.apache.sentry.policy.common.PolicyConstants; +import org.apache.sentry.policy.common.Privilege; +import org.apache.sentry.policy.common.KeyValue; +import org.junit.Test; + +public class TestKafkaWildcardPrivilege { + private static final Privilege KAFKA_HOST1_ALL = + create(new KeyValue("HOST", "host1"), new KeyValue("action", KafkaActionConstant.ALL)); + private static final Privilege KAFKA_HOST1_READ = + create(new KeyValue("HOST", "host1"), new KeyValue("action", KafkaActionConstant.READ)); + private static final Privilege KAFKA_HOST1_WRITE = + create(new KeyValue("HOST", "host1"), new KeyValue("action", KafkaActionConstant.WRITE)); + + private static final Privilege KAFKA_HOST1_TOPIC1_ALL = + create(new KeyValue("HOST", "host1"), new KeyValue("TOPIC", "topic1"), new KeyValue("action", KafkaActionConstant.ALL)); + private static final Privilege KAFKA_HOST1_TOPIC1_READ = + create(new KeyValue("HOST", "host1"), new KeyValue("TOPIC", "topic1"), new KeyValue("action", KafkaActionConstant.READ)); + private static final Privilege KAFKA_HOST1_TOPIC1_WRITE = + create(new KeyValue("HOST", "host1"), new KeyValue("TOPIC", "topic1"), new KeyValue("action", KafkaActionConstant.WRITE)); + + private static final Privilege KAFKA_HOST1_CLUSTER1_ALL = + create(new KeyValue("HOST", "host1"), new KeyValue("CLUSTER", "cluster1"), new KeyValue("action", KafkaActionConstant.ALL)); + private static final Privilege KAFKA_HOST1_CLUSTER1_READ = + create(new KeyValue("HOST", "host1"), new KeyValue("CLUSTER", "cluster1"), new KeyValue("action", KafkaActionConstant.READ)); + private static final Privilege KAFKA_HOST1_CLUSTER1_WRITE = + create(new KeyValue("HOST", "host1"), new KeyValue("CLUSTER", "cluster1"), new KeyValue("action", KafkaActionConstant.WRITE)); + + private static final Privilege KAFKA_HOST1_GROUP1_ALL = + create(new KeyValue("HOST", "host1"), new KeyValue("GROUP", "cgroup1"), new KeyValue("action", KafkaActionConstant.ALL)); + private static final Privilege KAFKA_HOST1_GROUP1_READ = + create(new KeyValue("HOST", "host1"), new KeyValue("GROUP", "cgroup1"), new KeyValue("action", KafkaActionConstant.READ)); + private static final Privilege KAFKA_HOST1_GROUP1_WRITE = + create(new KeyValue("HOST", "host1"), new KeyValue("GROUP", "cgroup1"), new KeyValue("action", KafkaActionConstant.WRITE)); + + @Test + public void testSimpleAction() throws Exception { + //host + assertFalse(KAFKA_HOST1_WRITE.implies(KAFKA_HOST1_READ)); + assertFalse(KAFKA_HOST1_READ.implies(KAFKA_HOST1_WRITE)); + //consumer group + assertFalse(KAFKA_HOST1_GROUP1_WRITE.implies(KAFKA_HOST1_GROUP1_READ)); + assertFalse(KAFKA_HOST1_GROUP1_READ.implies(KAFKA_HOST1_GROUP1_WRITE)); + //topic + assertFalse(KAFKA_HOST1_TOPIC1_READ.implies(KAFKA_HOST1_TOPIC1_WRITE)); + assertFalse(KAFKA_HOST1_TOPIC1_WRITE.implies(KAFKA_HOST1_TOPIC1_READ)); + //cluster + assertFalse(KAFKA_HOST1_CLUSTER1_READ.implies(KAFKA_HOST1_CLUSTER1_WRITE)); + assertFalse(KAFKA_HOST1_CLUSTER1_WRITE.implies(KAFKA_HOST1_CLUSTER1_READ)); + } + + @Test + public void testShorterThanRequest() throws Exception { + //topic + assertTrue(KAFKA_HOST1_ALL.implies(KAFKA_HOST1_TOPIC1_ALL)); + assertTrue(KAFKA_HOST1_ALL.implies(KAFKA_HOST1_TOPIC1_READ)); + assertTrue(KAFKA_HOST1_ALL.implies(KAFKA_HOST1_TOPIC1_WRITE)); + + assertFalse(KAFKA_HOST1_WRITE.implies(KAFKA_HOST1_READ)); + assertTrue(KAFKA_HOST1_READ.implies(KAFKA_HOST1_TOPIC1_READ)); + assertTrue(KAFKA_HOST1_WRITE.implies(KAFKA_HOST1_TOPIC1_WRITE)); + + //cluster + assertTrue(KAFKA_HOST1_ALL.implies(KAFKA_HOST1_CLUSTER1_ALL)); + assertTrue(KAFKA_HOST1_ALL.implies(KAFKA_HOST1_CLUSTER1_READ)); + assertTrue(KAFKA_HOST1_ALL.implies(KAFKA_HOST1_CLUSTER1_WRITE)); + + assertTrue(KAFKA_HOST1_READ.implies(KAFKA_HOST1_CLUSTER1_READ)); + assertTrue(KAFKA_HOST1_WRITE.implies(KAFKA_HOST1_CLUSTER1_WRITE)); + + //consumer group + assertTrue(KAFKA_HOST1_ALL.implies(KAFKA_HOST1_GROUP1_ALL)); + assertTrue(KAFKA_HOST1_ALL.implies(KAFKA_HOST1_GROUP1_READ)); + assertTrue(KAFKA_HOST1_ALL.implies(KAFKA_HOST1_GROUP1_WRITE)); + + assertTrue(KAFKA_HOST1_READ.implies(KAFKA_HOST1_GROUP1_READ)); + assertTrue(KAFKA_HOST1_WRITE.implies(KAFKA_HOST1_GROUP1_WRITE)); + } + + @Test + public void testActionAll() throws Exception { + //host + assertTrue(KAFKA_HOST1_ALL.implies(KAFKA_HOST1_READ)); + assertTrue(KAFKA_HOST1_ALL.implies(KAFKA_HOST1_WRITE)); + + //topic + assertTrue(KAFKA_HOST1_TOPIC1_ALL.implies(KAFKA_HOST1_TOPIC1_READ)); + assertTrue(KAFKA_HOST1_TOPIC1_ALL.implies(KAFKA_HOST1_TOPIC1_WRITE)); + + //cluster + assertTrue(KAFKA_HOST1_CLUSTER1_ALL.implies(KAFKA_HOST1_CLUSTER1_READ)); + assertTrue(KAFKA_HOST1_CLUSTER1_ALL.implies(KAFKA_HOST1_CLUSTER1_WRITE)); + + //consumer group + assertTrue(KAFKA_HOST1_GROUP1_ALL.implies(KAFKA_HOST1_GROUP1_READ)); + assertTrue(KAFKA_HOST1_GROUP1_ALL.implies(KAFKA_HOST1_GROUP1_WRITE)); + } + + @Test + public void testUnexpected() throws Exception { + Privilege p = new Privilege() { + @Override + public boolean implies(Privilege p) { + return false; + } + }; + Privilege topic1 = create(new KeyValue("HOST", "host"), new KeyValue("TOPIC", "topic1")); + assertFalse(topic1.implies(null)); + assertFalse(topic1.implies(p)); + assertFalse(topic1.equals(null)); + assertFalse(topic1.equals(p)); + } + + @Test(expected=IllegalArgumentException.class) + public void testNullString() throws Exception { + System.out.println(create((String)null)); + } + + @Test(expected=IllegalArgumentException.class) + public void testEmptyString() throws Exception { + System.out.println(create("")); + } + + @Test(expected=IllegalArgumentException.class) + public void testEmptyKey() throws Exception { + System.out.println(create(PolicyConstants.KV_JOINER.join("", "host1"))); + } + + @Test(expected=IllegalArgumentException.class) + public void testEmptyValue() throws Exception { + System.out.println(create(PolicyConstants.KV_JOINER.join("HOST", ""))); + } + + @Test(expected=IllegalArgumentException.class) + public void testEmptyPart() throws Exception { + System.out.println(create(PolicyConstants.AUTHORIZABLE_JOINER. + join(PolicyConstants.KV_JOINER.join("HOST", "host1"), ""))); + } + + @Test(expected=IllegalArgumentException.class) + public void testOnlySeperators() throws Exception { + System.out.println(create(PolicyConstants.AUTHORIZABLE_JOINER. + join(PolicyConstants.KV_SEPARATOR, PolicyConstants.KV_SEPARATOR, PolicyConstants.KV_SEPARATOR))); + } + + static KafkaWildcardPrivilege create(KeyValue... keyValues) { + return create(PolicyConstants.AUTHORIZABLE_JOINER.join(keyValues)); + + } + static KafkaWildcardPrivilege create(String s) { + return new KafkaWildcardPrivilege(s); + } +} diff --git a/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/engine/AbstractTestKafkaPolicyEngine.java b/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/engine/AbstractTestKafkaPolicyEngine.java new file mode 100644 index 000000000..810c05e34 --- /dev/null +++ b/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/engine/AbstractTestKafkaPolicyEngine.java @@ -0,0 +1,163 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.sentry.policy.kafka.engine; + +import java.io.File; +import java.io.IOException; +import java.util.Set; +import java.util.TreeSet; + +import junit.framework.Assert; + +import org.apache.commons.io.FileUtils; +import org.apache.sentry.core.common.ActiveRoleSet; +import org.apache.sentry.policy.common.PolicyEngine; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import com.google.common.collect.Sets; +import com.google.common.io.Files; + +public abstract class AbstractTestKafkaPolicyEngine { + + private static final String ADMIN = "host=*->action=all"; + private static final String ADMIN_HOST1 = "host=host1->action=all"; + private static final String CONSUMER_T1_ALL = "host=*->topic=t1->action=read"; + private static final String CONSUMER_T1_HOST1 = "host=host1->topic=t1->action=read"; + private static final String CONSUMER_T2_HOST2 = "host=host2->topic=t2->action=read"; + private static final String PRODUCER_T1_ALL = "host=*->topic=t1->action=write"; + private static final String PRODUCER_T1_HOST1 = "host=host1->topic=t1->action=write"; + private static final String PRODUCER_T2_HOST2 = "host=host2->topic=t2->action=write"; + private static final String CONSUMER_PRODUCER_T1 = "host=host1->topic=t1->action=all"; + + private PolicyEngine policy; + private static File baseDir; + + @BeforeClass + public static void setupClazz() throws IOException { + baseDir = Files.createTempDir(); + } + + @AfterClass + public static void teardownClazz() throws IOException { + if (baseDir != null) { + FileUtils.deleteQuietly(baseDir); + } + } + + protected void setPolicy(PolicyEngine policy) { + this.policy = policy; + } + + protected static File getBaseDir() { + return baseDir; + } + + @Before + public void setup() throws IOException { + afterSetup(); + } + + @After + public void teardown() throws IOException { + beforeTeardown(); + } + + protected void afterSetup() throws IOException {} + + protected void beforeTeardown() throws IOException {} + + + @Test + public void testConsumer0() throws Exception { + Set expected = Sets.newTreeSet(Sets.newHashSet(CONSUMER_T1_ALL)); + Assert.assertEquals(expected.toString(), + new TreeSet(policy.getPrivileges(set("consumer_group0"), ActiveRoleSet.ALL)) + .toString()); + } + + @Test + public void testConsumer1() throws Exception { + Set expected = Sets.newTreeSet(Sets.newHashSet(CONSUMER_T1_HOST1)); + Assert.assertEquals(expected.toString(), + new TreeSet(policy.getPrivileges(set("consumer_group1"), ActiveRoleSet.ALL)) + .toString()); + } + + @Test + public void testConsumer2() throws Exception { + Set expected = Sets.newTreeSet(Sets.newHashSet(CONSUMER_T2_HOST2)); + Assert.assertEquals(expected.toString(), + new TreeSet(policy.getPrivileges(set("consumer_group2"), ActiveRoleSet.ALL)) + .toString()); + } + + @Test + public void testProducer0() throws Exception { + Set expected = Sets.newTreeSet(Sets.newHashSet(PRODUCER_T1_ALL)); + Assert.assertEquals(expected.toString(), + new TreeSet(policy.getPrivileges(set("producer_group0"), ActiveRoleSet.ALL)) + .toString()); + } + + @Test + public void testProducer1() throws Exception { + Set expected = Sets.newTreeSet(Sets.newHashSet(PRODUCER_T1_HOST1)); + Assert.assertEquals(expected.toString(), + new TreeSet(policy.getPrivileges(set("producer_group1"), ActiveRoleSet.ALL)) + .toString()); + } + + + @Test + public void testProducer2() throws Exception { + Set expected = Sets.newTreeSet(Sets.newHashSet(PRODUCER_T2_HOST2)); + Assert.assertEquals(expected.toString(), + new TreeSet(policy.getPrivileges(set("producer_group2"), ActiveRoleSet.ALL)) + .toString()); + } + + @Test + public void testConsumerProducer0() throws Exception { + Set expected = Sets.newTreeSet(Sets.newHashSet(CONSUMER_PRODUCER_T1)); + Assert.assertEquals(expected.toString(), + new TreeSet(policy.getPrivileges(set("consumer_producer_group0"), ActiveRoleSet.ALL)) + .toString()); + } + + @Test + public void testSubAdmin() throws Exception { + Set expected = Sets.newTreeSet(Sets.newHashSet(ADMIN_HOST1)); + Assert.assertEquals(expected.toString(), + new TreeSet(policy.getPrivileges(set("subadmin_group"), ActiveRoleSet.ALL)) + .toString()); + } + + @Test + public void testAdmin() throws Exception { + Set expected = Sets.newTreeSet(Sets.newHashSet(ADMIN)); + Assert + .assertEquals(expected.toString(), + new TreeSet(policy.getPrivileges(set("admin_group"), ActiveRoleSet.ALL)) + .toString()); + } + + private static Set set(String... values) { + return Sets.newHashSet(values); + } +} diff --git a/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/engine/TestKafkaPolicyEngineDFS.java b/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/engine/TestKafkaPolicyEngineDFS.java new file mode 100644 index 000000000..f2bd3c84e --- /dev/null +++ b/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/engine/TestKafkaPolicyEngineDFS.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.sentry.policy.kafka.engine; + +import java.io.File; +import java.io.IOException; + +import junit.framework.Assert; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.sentry.policy.kafka.KafkaPolicyFileProviderBackend; +import org.apache.sentry.provider.file.PolicyFiles; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +public class TestKafkaPolicyEngineDFS extends AbstractTestKafkaPolicyEngine { + private static MiniDFSCluster dfsCluster; + private static FileSystem fileSystem; + private static Path root; + private static Path etc; + + @BeforeClass + public static void setupLocalClazz() throws IOException { + File baseDir = getBaseDir(); + Assert.assertNotNull(baseDir); + File dfsDir = new File(baseDir, "dfs"); + Assert.assertTrue(dfsDir.isDirectory() || dfsDir.mkdirs()); + Configuration conf = new Configuration(); + conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dfsDir.getPath()); + dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); + fileSystem = dfsCluster.getFileSystem(); + root = new Path(fileSystem.getUri().toString()); + etc = new Path(root, "/etc"); + fileSystem.mkdirs(etc); + } + + @AfterClass + public static void teardownLocalClazz() { + if(dfsCluster != null) { + dfsCluster.shutdown(); + } + } + + @Override + protected void afterSetup() throws IOException { + fileSystem.delete(etc, true); + fileSystem.mkdirs(etc); + PolicyFiles.copyToDir(fileSystem, etc, "test-authz-provider.ini"); + setPolicy(new KafkaPolicyFileProviderBackend(new Path(etc, + "test-authz-provider.ini").toString())); + } + + @Override + protected void beforeTeardown() throws IOException { + fileSystem.delete(etc, true); + } +} diff --git a/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/engine/TestKafkaPolicyEngineLocalFS.java b/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/engine/TestKafkaPolicyEngineLocalFS.java new file mode 100644 index 000000000..4bc061dc7 --- /dev/null +++ b/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/engine/TestKafkaPolicyEngineLocalFS.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.sentry.policy.kafka.engine; + +import java.io.File; +import java.io.IOException; + +import junit.framework.Assert; + +import org.apache.commons.io.FileUtils; +import org.apache.sentry.policy.kafka.KafkaPolicyFileProviderBackend; +import org.apache.sentry.provider.file.PolicyFiles; + +public class TestKafkaPolicyEngineLocalFS extends AbstractTestKafkaPolicyEngine { + + @Override + protected void afterSetup() throws IOException { + File baseDir = getBaseDir(); + Assert.assertNotNull(baseDir); + Assert.assertTrue(baseDir.isDirectory() || baseDir.mkdirs()); + PolicyFiles.copyToDir(baseDir, "test-authz-provider.ini"); + setPolicy(new KafkaPolicyFileProviderBackend(new File(baseDir, "test-authz-provider.ini").getPath())); + } + + @Override + protected void beforeTeardown() throws IOException { + File baseDir = getBaseDir(); + Assert.assertNotNull(baseDir); + FileUtils.deleteQuietly(baseDir); + } +} diff --git a/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/provider/TestKafkaAuthorizationProviderGeneralCases.java b/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/provider/TestKafkaAuthorizationProviderGeneralCases.java new file mode 100644 index 000000000..386d2d5f6 --- /dev/null +++ b/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/provider/TestKafkaAuthorizationProviderGeneralCases.java @@ -0,0 +1,218 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.sentry.policy.kafka.provider; + +import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Set; + +import junit.framework.Assert; + +import org.apache.commons.io.FileUtils; +import org.apache.sentry.core.common.Action; +import org.apache.sentry.core.common.ActiveRoleSet; +import org.apache.sentry.core.common.Authorizable; +import org.apache.sentry.core.common.Subject; +import org.apache.sentry.core.model.kafka.Cluster; +import org.apache.sentry.core.model.kafka.ConsumerGroup; +import org.apache.sentry.core.model.kafka.KafkaActionConstant; +import org.apache.sentry.core.model.kafka.KafkaActionFactory.KafkaAction; +import org.apache.sentry.core.model.kafka.Host; +import org.apache.sentry.core.model.kafka.Topic; +import org.apache.sentry.policy.kafka.KafkaPolicyFileProviderBackend; +import org.apache.sentry.policy.kafka.MockGroupMappingServiceProvider; +import org.apache.sentry.provider.common.HadoopGroupResourceAuthorizationProvider; +import org.apache.sentry.provider.common.ResourceAuthorizationProvider; +import org.apache.sentry.provider.file.PolicyFiles; +import org.junit.After; +import org.junit.Test; + +import com.google.common.base.Objects; +import com.google.common.collect.HashMultimap; +import com.google.common.collect.Multimap; +import com.google.common.collect.Sets; +import com.google.common.io.Files; + +public class TestKafkaAuthorizationProviderGeneralCases { + private static final Multimap USER_TO_GROUP_MAP = HashMultimap.create(); + + private static final Host HOST_1 = new Host("host1"); + private static final Host HOST_2 = new Host("host2"); + private static final Cluster cluster1 = new Cluster(); + private static final Topic topic1 = new Topic("t1"); + private static final Topic topic2 = new Topic("t2"); + private static final ConsumerGroup cgroup1 = new ConsumerGroup("cg1"); + private static final ConsumerGroup cgroup2 = new ConsumerGroup("cg2"); + + private static final KafkaAction ALL = new KafkaAction(KafkaActionConstant.ALL); + private static final KafkaAction READ = new KafkaAction(KafkaActionConstant.READ); + private static final KafkaAction WRITE = new KafkaAction(KafkaActionConstant.WRITE); + private static final KafkaAction CREATE = new KafkaAction(KafkaActionConstant.CREATE); + private static final KafkaAction DELETE = new KafkaAction(KafkaActionConstant.DELETE); + private static final KafkaAction ALTER = new KafkaAction(KafkaActionConstant.ALTER); + private static final KafkaAction DESCRIBE = new KafkaAction(KafkaActionConstant.DESCRIBE); + private static final KafkaAction CLUSTER_ACTION = new KafkaAction( + KafkaActionConstant.CLUSTER_ACTION); + + private static final Set allActions = Sets.newHashSet(ALL, READ, WRITE, CREATE, DELETE, ALTER, DESCRIBE, CLUSTER_ACTION); + + private static final Subject ADMIN = new Subject("admin1"); + private static final Subject SUB_ADMIN = new Subject("subadmin1"); + private static final Subject CONSUMER0 = new Subject("consumer0"); + private static final Subject CONSUMER1 = new Subject("consumer1"); + private static final Subject CONSUMER2 = new Subject("consumer2"); + private static final Subject PRODUCER0 = new Subject("producer0"); + private static final Subject PRODUCER1 = new Subject("producer1"); + private static final Subject PRODUCER2 = new Subject("producer2"); + private static final Subject CONSUMER_PRODUCER0 = new Subject("consumer_producer0"); + + private static final String ADMIN_GROUP = "admin_group"; + private static final String SUBADMIN_GROUP = "subadmin_group"; + private static final String CONSUMER_GROUP0 = "consumer_group0"; + private static final String CONSUMER_GROUP1 = "consumer_group1"; + private static final String CONSUMER_GROUP2 = "consumer_group2"; + private static final String PRODUCER_GROUP0 = "producer_group0"; + private static final String PRODUCER_GROUP1 = "producer_group1"; + private static final String PRODUCER_GROUP2 = "producer_group2"; + private static final String CONSUMER_PRODUCER_GROUP0 = "consumer_producer_group0"; + + static { + USER_TO_GROUP_MAP.putAll(ADMIN.getName(), Arrays.asList(ADMIN_GROUP)); + USER_TO_GROUP_MAP.putAll(SUB_ADMIN.getName(), Arrays.asList(SUBADMIN_GROUP )); + USER_TO_GROUP_MAP.putAll(CONSUMER0.getName(), Arrays.asList(CONSUMER_GROUP0)); + USER_TO_GROUP_MAP.putAll(CONSUMER1.getName(), Arrays.asList(CONSUMER_GROUP1)); + USER_TO_GROUP_MAP.putAll(CONSUMER2.getName(), Arrays.asList(CONSUMER_GROUP2)); + USER_TO_GROUP_MAP.putAll(PRODUCER0.getName(), Arrays.asList(PRODUCER_GROUP0)); + USER_TO_GROUP_MAP.putAll(PRODUCER1.getName(), Arrays.asList(PRODUCER_GROUP1)); + USER_TO_GROUP_MAP.putAll(PRODUCER2.getName(), Arrays.asList(PRODUCER_GROUP2)); + USER_TO_GROUP_MAP.putAll(CONSUMER_PRODUCER0.getName(), Arrays.asList(CONSUMER_PRODUCER_GROUP0)); + } + + private final ResourceAuthorizationProvider authzProvider; + private File baseDir; + + public TestKafkaAuthorizationProviderGeneralCases() throws IOException { + baseDir = Files.createTempDir(); + PolicyFiles.copyToDir(baseDir, "test-authz-provider.ini"); + authzProvider = new HadoopGroupResourceAuthorizationProvider( + new KafkaPolicyFileProviderBackend(new File(baseDir, "test-authz-provider.ini").getPath()), + new MockGroupMappingServiceProvider(USER_TO_GROUP_MAP)); + } + + @After + public void teardown() { + if(baseDir != null) { + FileUtils.deleteQuietly(baseDir); + } + } + + private void doTestResourceAuthorizationProvider(Subject subject, List authorizableHierarchy, + Set actions, boolean expected) throws Exception { + Objects.ToStringHelper helper = Objects.toStringHelper("TestParameters"); + helper.add("Subject", subject).add("authzHierarchy", authorizableHierarchy).add("action", actions); + Assert.assertEquals(helper.toString(), expected, + authzProvider.hasAccess(subject, authorizableHierarchy, actions, ActiveRoleSet.ALL)); + } + + @Test + public void testAdmin() throws Exception { + doTestResourceAuthorizationProvider(SUB_ADMIN, Arrays.asList(HOST_1,cluster1), allActions, true); + doTestResourceAuthorizationProvider(SUB_ADMIN, Arrays.asList(HOST_1,topic1), allActions, true); + doTestResourceAuthorizationProvider(SUB_ADMIN, Arrays.asList(HOST_1,topic2), allActions, true); + doTestResourceAuthorizationProvider(SUB_ADMIN, Arrays.asList(HOST_1,cgroup1), allActions, true); + doTestResourceAuthorizationProvider(SUB_ADMIN, Arrays.asList(HOST_1,cgroup2), allActions, true); + doTestResourceAuthorizationProvider(SUB_ADMIN, Arrays.asList(HOST_1), allActions, true); + + doTestResourceAuthorizationProvider(SUB_ADMIN, Arrays.asList(HOST_2,cluster1), allActions, false); + doTestResourceAuthorizationProvider(SUB_ADMIN, Arrays.asList(HOST_2,topic1), allActions, false); + doTestResourceAuthorizationProvider(SUB_ADMIN, Arrays.asList(HOST_2,topic2), allActions, false); + doTestResourceAuthorizationProvider(SUB_ADMIN, Arrays.asList(HOST_2,cgroup1), allActions, false); + doTestResourceAuthorizationProvider(SUB_ADMIN, Arrays.asList(HOST_2,cgroup2), allActions, false); + doTestResourceAuthorizationProvider(SUB_ADMIN, Arrays.asList(HOST_2), allActions, false); + + doTestResourceAuthorizationProvider(ADMIN, Arrays.asList(HOST_1,cluster1), allActions, true); + doTestResourceAuthorizationProvider(ADMIN, Arrays.asList(HOST_1,topic1), allActions, true); + doTestResourceAuthorizationProvider(ADMIN, Arrays.asList(HOST_1,topic2), allActions, true); + doTestResourceAuthorizationProvider(ADMIN, Arrays.asList(HOST_1,cgroup1), allActions, true); + doTestResourceAuthorizationProvider(ADMIN, Arrays.asList(HOST_1,cgroup2), allActions, true); + doTestResourceAuthorizationProvider(ADMIN, Arrays.asList(HOST_1), allActions, true); + + doTestResourceAuthorizationProvider(ADMIN, Arrays.asList(HOST_2,cluster1), allActions, true); + doTestResourceAuthorizationProvider(ADMIN, Arrays.asList(HOST_2,topic1), allActions, true); + doTestResourceAuthorizationProvider(ADMIN, Arrays.asList(HOST_2,topic2), allActions, true); + doTestResourceAuthorizationProvider(ADMIN, Arrays.asList(HOST_2,cgroup1), allActions, true); + doTestResourceAuthorizationProvider(ADMIN, Arrays.asList(HOST_2,cgroup2), allActions, true); + doTestResourceAuthorizationProvider(ADMIN, Arrays.asList(HOST_2), allActions, true); + } + + @Test + public void testConsumer() throws Exception { + for (KafkaAction action : allActions) { + for (Host host : Sets.newHashSet(HOST_1, HOST_2)) { + doTestResourceAuthorizationProvider(CONSUMER0, Arrays.asList(host, topic1), + Sets.newHashSet(action), READ.equals(action)); + } + } + for (KafkaAction action : allActions) { + for (Host host : Sets.newHashSet(HOST_1, HOST_2)) { + doTestResourceAuthorizationProvider(CONSUMER1, Arrays.asList(host, topic1), + Sets.newHashSet(action), HOST_1.equals(host) && READ.equals(action)); + } + } + for (KafkaAction action : allActions) { + for (Host host : Sets.newHashSet(HOST_1, HOST_2)) { + doTestResourceAuthorizationProvider(CONSUMER2, Arrays.asList(host, topic2), + Sets.newHashSet(action), HOST_2.equals(host) && READ.equals(action)); + } + } + } + + @Test + public void testProducer() throws Exception { + for (KafkaAction action : allActions) { + for (Host host : Sets.newHashSet(HOST_1, HOST_2)) { + doTestResourceAuthorizationProvider(PRODUCER0, Arrays.asList(host, topic1), + Sets.newHashSet(action), WRITE.equals(action)); + } + } + for (KafkaAction action : allActions) { + for (Host host : Sets.newHashSet(HOST_1, HOST_2)) { + doTestResourceAuthorizationProvider(PRODUCER1, Arrays.asList(host, topic1), + Sets.newHashSet(action), HOST_1.equals(host) && WRITE.equals(action)); + } + } + for (KafkaAction action : allActions) { + for (Host host : Sets.newHashSet(HOST_1, HOST_2)) { + doTestResourceAuthorizationProvider(PRODUCER2, Arrays.asList(host, topic2), + Sets.newHashSet(action), HOST_2.equals(host) && WRITE.equals(action)); + } + } + } + + @Test + public void testConsumerProducer() throws Exception { + for (KafkaAction action : allActions) { + doTestResourceAuthorizationProvider(CONSUMER_PRODUCER0, Arrays.asList(HOST_1, topic1), + Sets.newHashSet(action), true); + } + } + +} diff --git a/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/provider/TestKafkaAuthorizationProviderSpecialCases.java b/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/provider/TestKafkaAuthorizationProviderSpecialCases.java new file mode 100644 index 000000000..0a453ce93 --- /dev/null +++ b/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/provider/TestKafkaAuthorizationProviderSpecialCases.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.sentry.policy.kafka.provider; + +import java.io.File; +import java.io.IOException; +import java.util.List; +import java.util.Set; + +import junit.framework.Assert; + +import org.apache.commons.io.FileUtils; +import org.apache.sentry.core.common.Action; +import org.apache.sentry.core.common.ActiveRoleSet; +import org.apache.sentry.core.common.Authorizable; +import org.apache.sentry.core.common.Subject; +import org.apache.sentry.core.model.kafka.KafkaActionConstant; +import org.apache.sentry.core.model.kafka.KafkaActionFactory.KafkaAction; +import org.apache.sentry.core.model.kafka.Host; +import org.apache.sentry.core.model.kafka.Topic; +import org.apache.sentry.policy.kafka.KafkaPolicyFileProviderBackend; +import org.apache.sentry.provider.common.AuthorizationProvider; +import org.apache.sentry.provider.file.LocalGroupResourceAuthorizationProvider; +import org.apache.sentry.provider.file.PolicyFile; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Sets; +import com.google.common.io.Files; + +public class TestKafkaAuthorizationProviderSpecialCases { + private AuthorizationProvider authzProvider; + private PolicyFile policyFile; + private File baseDir; + private File iniFile; + private String initResource; + @Before + public void setup() throws IOException { + baseDir = Files.createTempDir(); + iniFile = new File(baseDir, "policy.ini"); + initResource = "file://" + iniFile.getPath(); + policyFile = new PolicyFile(); + } + + @After + public void teardown() throws IOException { + if(baseDir != null) { + FileUtils.deleteQuietly(baseDir); + } + } + + @Test + public void testDuplicateEntries() throws Exception { + Subject user1 = new Subject("user1"); + Host host1 = new Host("host1"); + Topic topic1 = new Topic("t1"); + Set actions = Sets.newHashSet(new KafkaAction(KafkaActionConstant.READ)); + policyFile.addGroupsToUser(user1.getName(), true, "group1", "group1") + .addRolesToGroup("group1", true, "role1", "role1") + .addPermissionsToRole("role1", true, "host=host1->topic=t1->action=read", + "host=host1->topic=t1->action=read"); + policyFile.write(iniFile); + KafkaPolicyFileProviderBackend policy = new KafkaPolicyFileProviderBackend(initResource); + authzProvider = new LocalGroupResourceAuthorizationProvider(initResource, policy); + List authorizableHierarchy = ImmutableList.of(host1, topic1); + Assert.assertTrue(authorizableHierarchy.toString(), + authzProvider.hasAccess(user1, authorizableHierarchy, actions, ActiveRoleSet.ALL)); + } + +} diff --git a/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/provider/TestKafkaPolicyNegative.java b/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/provider/TestKafkaPolicyNegative.java new file mode 100644 index 000000000..1cb694a0e --- /dev/null +++ b/sentry-policy/sentry-policy-kafka/src/test/java/org/apache/sentry/policy/kafka/provider/TestKafkaPolicyNegative.java @@ -0,0 +1,105 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.policy.kafka.provider; + +import java.io.File; +import java.io.IOException; + +import junit.framework.Assert; + +import org.apache.commons.io.FileUtils; +import org.apache.sentry.core.common.ActiveRoleSet; +import org.apache.sentry.policy.common.PolicyEngine; +import org.apache.sentry.policy.kafka.KafkaPolicyFileProviderBackend; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.google.common.base.Charsets; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Sets; +import com.google.common.io.Files; + +public class TestKafkaPolicyNegative { + private File baseDir; + private File globalPolicyFile; + + @Before + public void setup() { + baseDir = Files.createTempDir(); + globalPolicyFile = new File(baseDir, "global.ini"); + } + + @After + public void teardown() { + if(baseDir != null) { + FileUtils.deleteQuietly(baseDir); + } + } + + private void append(String from, File to) throws IOException { + Files.append(from + "\n", to, Charsets.UTF_8); + } + + @Test + public void testauthorizedKafkaInPolicyFile() throws Exception { + append("[groups]", globalPolicyFile); + append("other_group = other_role", globalPolicyFile); + append("[roles]", globalPolicyFile); + append("other_role = host=host1->topic=t1->action=read, host=host1->consumergroup=l1->action=read", globalPolicyFile); + PolicyEngine policy = new KafkaPolicyFileProviderBackend(globalPolicyFile.getPath()); + //malicious_group has no privilege + ImmutableSet permissions = policy.getAllPrivileges(Sets.newHashSet("malicious_group"), ActiveRoleSet.ALL); + Assert.assertTrue(permissions.toString(), permissions.isEmpty()); + //other_group has two privileges + permissions = policy.getAllPrivileges(Sets.newHashSet("other_group"), ActiveRoleSet.ALL); + Assert.assertTrue(permissions.toString(), permissions.size() == 2); + } + + @Test + public void testNoHostNameConfig() throws Exception { + append("[groups]", globalPolicyFile); + append("other_group = malicious_role", globalPolicyFile); + append("[roles]", globalPolicyFile); + append("malicious_role = topic=t1->action=read", globalPolicyFile); + PolicyEngine policy = new KafkaPolicyFileProviderBackend(globalPolicyFile.getPath()); + ImmutableSet permissions = policy.getAllPrivileges(Sets.newHashSet("other_group"), ActiveRoleSet.ALL); + Assert.assertTrue(permissions.toString(), permissions.isEmpty()); + } + + @Test + public void testHostAllName() throws Exception { + append("[groups]", globalPolicyFile); + append("group = malicious_role", globalPolicyFile); + append("[roles]", globalPolicyFile); + append("malicious_role = host=*->action=read", globalPolicyFile); + PolicyEngine policy = new KafkaPolicyFileProviderBackend(globalPolicyFile.getPath()); + ImmutableSet permissions = policy.getAllPrivileges(Sets.newHashSet("group"), ActiveRoleSet.ALL); + Assert.assertTrue(permissions.toString(), permissions.size() == 1); + } + + @Test + public void testAll() throws Exception { + append("[groups]", globalPolicyFile); + append("group = malicious_role", globalPolicyFile); + append("[roles]", globalPolicyFile); + append("malicious_role = *", globalPolicyFile); + PolicyEngine policy = new KafkaPolicyFileProviderBackend(globalPolicyFile.getPath()); + ImmutableSet permissions = policy.getAllPrivileges(Sets.newHashSet("group"), ActiveRoleSet.ALL); + Assert.assertTrue(permissions.toString(), permissions.isEmpty()); + } +} diff --git a/sentry-policy/sentry-policy-kafka/src/test/resources/log4j.properties b/sentry-policy/sentry-policy-kafka/src/test/resources/log4j.properties new file mode 100644 index 000000000..7703069e8 --- /dev/null +++ b/sentry-policy/sentry-policy-kafka/src/test/resources/log4j.properties @@ -0,0 +1,31 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# Define some default values that can be overridden by system properties. +# +# For testing, it may also be convenient to specify + +log4j.rootLogger=DEBUG,console + +log4j.appender.console=org.apache.log4j.ConsoleAppender +log4j.appender.console.target=System.err +log4j.appender.console.layout=org.apache.log4j.PatternLayout +log4j.appender.console.layout.ConversionPattern=%d (%t) [%p - %l] %m%n + +log4j.logger.org.apache.hadoop.conf.Configuration=INFO \ No newline at end of file diff --git a/sentry-policy/sentry-policy-kafka/src/test/resources/test-authz-provider.ini b/sentry-policy/sentry-policy-kafka/src/test/resources/test-authz-provider.ini new file mode 100644 index 000000000..1951aba84 --- /dev/null +++ b/sentry-policy/sentry-policy-kafka/src/test/resources/test-authz-provider.ini @@ -0,0 +1,38 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +[groups] +admin_group = admin_all +subadmin_group = admin_host1 +consumer_group0 = consumer_t1_all +consumer_group1 = consumer_t1_host1 +consumer_group2 = consumer_t2_host2 +producer_group0 = producer_t1_all +producer_group1 = producer_t1_host1 +producer_group2 = producer_t2_host2 +consumer_producer_group0 = consumer_producer_t1 + +[roles] +admin_all = host=*->action=all +admin_host1 = host=host1->action=all +consumer_t1_all = host=*->topic=t1->action=read +consumer_t1_host1 = host=host1->topic=t1->action=read +consumer_t2_host2 = host=host2->topic=t2->action=read +producer_t1_all = host=*->topic=t1->action=write +producer_t1_host1 = host=host1->topic=t1->action=write +producer_t2_host2 = host=host2->topic=t2->action=write +consumer_producer_t1 = host=host1->topic=t1->action=all diff --git a/sentry-policy/sentry-policy-search/pom.xml b/sentry-policy/sentry-policy-search/pom.xml index 8dcaeb267..673c615ed 100644 --- a/sentry-policy/sentry-policy-search/pom.xml +++ b/sentry-policy/sentry-policy-search/pom.xml @@ -21,7 +21,7 @@ limitations under the License. org.apache.sentry sentry-policy - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-policy-search diff --git a/sentry-policy/sentry-policy-search/src/main/java/org/apache/sentry/policy/search/AbstractSearchPrivilegeValidator.java b/sentry-policy/sentry-policy-search/src/main/java/org/apache/sentry/policy/search/AbstractSearchPrivilegeValidator.java index a4e611cc2..054c354d8 100644 --- a/sentry-policy/sentry-policy-search/src/main/java/org/apache/sentry/policy/search/AbstractSearchPrivilegeValidator.java +++ b/sentry-policy/sentry-policy-search/src/main/java/org/apache/sentry/policy/search/AbstractSearchPrivilegeValidator.java @@ -16,8 +16,8 @@ */ package org.apache.sentry.policy.search; -import static org.apache.sentry.provider.file.PolicyFileConstants.AUTHORIZABLE_SPLITTER; -import static org.apache.sentry.provider.file.PolicyFileConstants.PRIVILEGE_PREFIX; +import static org.apache.sentry.policy.common.PolicyConstants.AUTHORIZABLE_SPLITTER; +import static org.apache.sentry.policy.common.PolicyConstants.PRIVILEGE_PREFIX; import java.util.List; diff --git a/sentry-policy/sentry-policy-search/src/main/java/org/apache/sentry/policy/search/SearchModelAuthorizables.java b/sentry-policy/sentry-policy-search/src/main/java/org/apache/sentry/policy/search/SearchModelAuthorizables.java index 655148593..252f50ab6 100644 --- a/sentry-policy/sentry-policy-search/src/main/java/org/apache/sentry/policy/search/SearchModelAuthorizables.java +++ b/sentry-policy/sentry-policy-search/src/main/java/org/apache/sentry/policy/search/SearchModelAuthorizables.java @@ -19,7 +19,7 @@ import org.apache.sentry.core.model.search.Collection; import org.apache.sentry.core.model.search.SearchModelAuthorizable; import org.apache.sentry.core.model.search.SearchModelAuthorizable.AuthorizableType; -import org.apache.sentry.provider.file.KeyValue; +import org.apache.sentry.policy.common.KeyValue; public class SearchModelAuthorizables { diff --git a/sentry-policy/sentry-policy-search/src/main/java/org/apache/sentry/policy/search/SearchWildcardPrivilege.java b/sentry-policy/sentry-policy-search/src/main/java/org/apache/sentry/policy/search/SearchWildcardPrivilege.java index 9a33fcf78..e25faf2dd 100644 --- a/sentry-policy/sentry-policy-search/src/main/java/org/apache/sentry/policy/search/SearchWildcardPrivilege.java +++ b/sentry-policy/sentry-policy-search/src/main/java/org/apache/sentry/policy/search/SearchWildcardPrivilege.java @@ -21,16 +21,13 @@ package org.apache.sentry.policy.search; -import static org.apache.sentry.provider.file.PolicyFileConstants.AUTHORIZABLE_JOINER; -import static org.apache.sentry.provider.file.PolicyFileConstants.AUTHORIZABLE_SPLITTER; - import java.util.List; import org.apache.sentry.core.model.search.SearchConstants; +import org.apache.sentry.policy.common.PolicyConstants; import org.apache.sentry.policy.common.Privilege; import org.apache.sentry.policy.common.PrivilegeFactory; -import org.apache.sentry.provider.file.KeyValue; -import org.apache.sentry.provider.file.PolicyFileConstants; +import org.apache.sentry.policy.common.KeyValue; import com.google.common.base.Preconditions; import com.google.common.base.Strings; @@ -47,7 +44,8 @@ public SearchWildcardPrivilege(String wildcardString) { throw new IllegalArgumentException("Wildcard string cannot be null or empty."); } Listparts = Lists.newArrayList(); - for (String authorizable : AUTHORIZABLE_SPLITTER.trimResults().split(wildcardString)) { + for (String authorizable : PolicyConstants.AUTHORIZABLE_SPLITTER.trimResults().split( + wildcardString)) { if (authorizable.isEmpty()) { throw new IllegalArgumentException("Privilege '" + wildcardString + "' has an empty section"); } @@ -110,7 +108,7 @@ private boolean impliesKeyValue(KeyValue policyPart, KeyValue requestPart) { "Please report, this method should not be called with two different keys"); if(policyPart.getValue().equals(SearchConstants.ALL) || policyPart.equals(requestPart)) { return true; - } else if (!PolicyFileConstants.PRIVILEGE_NAME.equalsIgnoreCase(policyPart.getKey()) + } else if (!PolicyConstants.PRIVILEGE_NAME.equalsIgnoreCase(policyPart.getKey()) && SearchConstants.ALL.equalsIgnoreCase(requestPart.getValue())) { /* privilege request is to match with any object of given type */ return true; @@ -120,7 +118,7 @@ private boolean impliesKeyValue(KeyValue policyPart, KeyValue requestPart) { @Override public String toString() { - return AUTHORIZABLE_JOINER.join(parts); + return PolicyConstants.AUTHORIZABLE_JOINER.join(parts); } @Override diff --git a/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/AbstractTestSearchPolicyEngine.java b/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/AbstractTestSearchPolicyEngine.java index d1c415b28..1a9b1a1bf 100644 --- a/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/AbstractTestSearchPolicyEngine.java +++ b/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/AbstractTestSearchPolicyEngine.java @@ -21,7 +21,7 @@ import java.util.Set; import java.util.TreeSet; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.commons.io.FileUtils; import org.apache.sentry.core.common.ActiveRoleSet; diff --git a/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestCollectionRequiredInRole.java b/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestCollectionRequiredInRole.java index b626f1a43..f0bb62254 100644 --- a/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestCollectionRequiredInRole.java +++ b/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestCollectionRequiredInRole.java @@ -18,7 +18,7 @@ */ package org.apache.sentry.policy.search; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.sentry.policy.common.PrivilegeValidatorContext; import org.apache.shiro.config.ConfigurationException; @@ -35,7 +35,7 @@ public void testEmptyRole() throws Exception { collRequiredInRole.validate(new PrivilegeValidatorContext("index=index1")); Assert.fail("Expected ConfigurationException"); } catch (ConfigurationException e) { - ; + // expected } // check with db @@ -43,7 +43,7 @@ public void testEmptyRole() throws Exception { collRequiredInRole.validate(new PrivilegeValidatorContext("db1","index=index2")); Assert.fail("Expected ConfigurationException"); } catch (ConfigurationException e) { - ; + // expected } } diff --git a/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchAuthorizationProviderGeneralCases.java b/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchAuthorizationProviderGeneralCases.java index bdb1c9675..9e1b1a709 100644 --- a/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchAuthorizationProviderGeneralCases.java +++ b/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchAuthorizationProviderGeneralCases.java @@ -23,7 +23,7 @@ import java.util.List; import java.util.Set; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.commons.io.FileUtils; import org.apache.sentry.core.common.Action; @@ -66,7 +66,6 @@ public class TestSearchAuthorizationProviderGeneralCases { private static final Collection COLL_TMP = new Collection("tmpcollection"); private static final Collection COLL_PURCHASES_PARTIAL = new Collection("purchases_partial"); - private static final SearchModelAction ALL = SearchModelAction.ALL; private static final SearchModelAction QUERY = SearchModelAction.QUERY; private static final SearchModelAction UPDATE = SearchModelAction.UPDATE; diff --git a/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchAuthorizationProviderSpecialCases.java b/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchAuthorizationProviderSpecialCases.java index 801a7024b..3cd0b75ec 100644 --- a/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchAuthorizationProviderSpecialCases.java +++ b/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchAuthorizationProviderSpecialCases.java @@ -22,7 +22,7 @@ import java.util.List; import java.util.Set; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.commons.io.FileUtils; import org.apache.sentry.core.common.Action; diff --git a/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchModelAuthorizables.java b/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchModelAuthorizables.java index 79e1fdbea..94fe9f048 100644 --- a/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchModelAuthorizables.java +++ b/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchModelAuthorizables.java @@ -17,8 +17,8 @@ * under the License. */ package org.apache.sentry.policy.search; -import static junit.framework.Assert.assertEquals; -import static junit.framework.Assert.assertNull; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; import org.apache.sentry.core.model.search.Collection; import org.junit.Test; diff --git a/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchPolicyEngineDFS.java b/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchPolicyEngineDFS.java index 735935e1d..5c14ab62a 100644 --- a/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchPolicyEngineDFS.java +++ b/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchPolicyEngineDFS.java @@ -19,7 +19,7 @@ import java.io.File; import java.io.IOException; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; diff --git a/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchPolicyEngineLocalFS.java b/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchPolicyEngineLocalFS.java index 77048cf38..593afe76d 100644 --- a/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchPolicyEngineLocalFS.java +++ b/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchPolicyEngineLocalFS.java @@ -19,7 +19,7 @@ import java.io.File; import java.io.IOException; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.commons.io.FileUtils; import org.apache.sentry.provider.file.PolicyFiles; diff --git a/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchPolicyNegative.java b/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchPolicyNegative.java index 2abe8f2d6..0993cc4fa 100644 --- a/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchPolicyNegative.java +++ b/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchPolicyNegative.java @@ -20,7 +20,7 @@ import java.io.IOException; import java.util.Collections; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.commons.io.FileUtils; import org.apache.sentry.core.common.ActiveRoleSet; diff --git a/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchWildcardPrivilege.java b/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchWildcardPrivilege.java index cb5531fb7..3cf4a3999 100644 --- a/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchWildcardPrivilege.java +++ b/sentry-policy/sentry-policy-search/src/test/java/org/apache/sentry/policy/search/TestSearchWildcardPrivilege.java @@ -17,18 +17,16 @@ * under the License. */ package org.apache.sentry.policy.search; -import static junit.framework.Assert.assertFalse; -import static junit.framework.Assert.assertTrue; -import static org.apache.sentry.provider.file.PolicyFileConstants.AUTHORIZABLE_JOINER; -import static org.apache.sentry.provider.file.PolicyFileConstants.KV_JOINER; -import static org.apache.sentry.provider.file.PolicyFileConstants.KV_SEPARATOR; +import static org.apache.sentry.policy.common.PolicyConstants.AUTHORIZABLE_JOINER; +import static org.apache.sentry.policy.common.PolicyConstants.KV_JOINER; +import static org.apache.sentry.policy.common.PolicyConstants.KV_SEPARATOR; import org.apache.sentry.core.model.search.SearchConstants; import org.apache.sentry.policy.common.Privilege; -import org.apache.sentry.provider.file.KeyValue; +import org.apache.sentry.policy.common.KeyValue; import org.junit.Test; -public class TestSearchWildcardPrivilege { +public class TestSearchWildcardPrivilege extends org.junit.Assert { private static final String ALL = SearchConstants.ALL; diff --git a/sentry-policy/sentry-policy-sqoop/pom.xml b/sentry-policy/sentry-policy-sqoop/pom.xml new file mode 100644 index 000000000..13112bfa8 --- /dev/null +++ b/sentry-policy/sentry-policy-sqoop/pom.xml @@ -0,0 +1,80 @@ + + + + 4.0.0 + + org.apache.sentry + sentry-policy + 1.7.0-incubating-SNAPSHOT + + + sentry-policy-sqoop + Sentry Policy for Sqoop + + + + junit + junit + test + + + org.apache.hadoop + hadoop-common + provided + + + org.apache.hadoop + hadoop-minicluster + test + + + log4j + log4j + + + org.apache.shiro + shiro-core + + + com.google.guava + guava + + + org.slf4j + slf4j-api + + + org.slf4j + slf4j-log4j12 + + + org.apache.sentry + sentry-core-model-sqoop + + + org.apache.sentry + sentry-provider-common + + + org.apache.sentry + sentry-provider-file + + + + diff --git a/sentry-policy/sentry-policy-sqoop/src/main/java/org/apache/sentry/policy/sqoop/ServerNameRequiredMatch.java b/sentry-policy/sentry-policy-sqoop/src/main/java/org/apache/sentry/policy/sqoop/ServerNameRequiredMatch.java new file mode 100644 index 000000000..ef1c88b34 --- /dev/null +++ b/sentry-policy/sentry-policy-sqoop/src/main/java/org/apache/sentry/policy/sqoop/ServerNameRequiredMatch.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.policy.sqoop; + +import static org.apache.sentry.policy.common.PolicyConstants.AUTHORIZABLE_SPLITTER; +import static org.apache.sentry.policy.common.PolicyConstants.PRIVILEGE_PREFIX; + +import java.util.List; + +import org.apache.sentry.core.model.sqoop.Server; +import org.apache.sentry.core.model.sqoop.SqoopAuthorizable; +import org.apache.sentry.policy.common.PrivilegeValidatorContext; +import org.apache.sentry.policy.common.PrivilegeValidator; +import org.apache.shiro.config.ConfigurationException; + +import com.google.common.collect.Lists; + +public class ServerNameRequiredMatch implements PrivilegeValidator { + private final String sqoopServerName; + public ServerNameRequiredMatch(String sqoopServerName) { + this.sqoopServerName = sqoopServerName; + } + @Override + public void validate(PrivilegeValidatorContext context) + throws ConfigurationException { + Iterable authorizables = parsePrivilege(context.getPrivilege()); + boolean match = false; + for (SqoopAuthorizable authorizable : authorizables) { + if (authorizable instanceof Server && authorizable.getName().equalsIgnoreCase(sqoopServerName)) { + match = true; + break; + } + } + if (!match) { + String msg = "server=[name] in " + context.getPrivilege() + + " is required. The name is expected " + sqoopServerName; + throw new ConfigurationException(msg); + } + } + + private Iterable parsePrivilege(String string) { + List result = Lists.newArrayList(); + for(String section : AUTHORIZABLE_SPLITTER.split(string)) { + if(!section.toLowerCase().startsWith(PRIVILEGE_PREFIX)) { + SqoopAuthorizable authorizable = SqoopModelAuthorizables.from(section); + if(authorizable == null) { + String msg = "No authorizable found for " + section; + throw new ConfigurationException(msg); + } + result.add(authorizable); + } + } + return result; + } +} diff --git a/sentry-policy/sentry-policy-sqoop/src/main/java/org/apache/sentry/policy/sqoop/SimpleSqoopPolicyEngine.java b/sentry-policy/sentry-policy-sqoop/src/main/java/org/apache/sentry/policy/sqoop/SimpleSqoopPolicyEngine.java new file mode 100644 index 000000000..e8615a062 --- /dev/null +++ b/sentry-policy/sentry-policy-sqoop/src/main/java/org/apache/sentry/policy/sqoop/SimpleSqoopPolicyEngine.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.policy.sqoop; + +import java.util.Set; + +import org.apache.sentry.core.common.ActiveRoleSet; +import org.apache.sentry.core.common.Authorizable; +import org.apache.sentry.core.common.SentryConfigurationException; +import org.apache.sentry.policy.common.PolicyEngine; +import org.apache.sentry.policy.common.PrivilegeFactory; +import org.apache.sentry.policy.common.PrivilegeValidator; +import org.apache.sentry.provider.common.ProviderBackend; +import org.apache.sentry.provider.common.ProviderBackendContext; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; + +public class SimpleSqoopPolicyEngine implements PolicyEngine { + private static final Logger LOGGER = LoggerFactory.getLogger(SimpleSqoopPolicyEngine.class); + private final ProviderBackend providerBackend; + + public SimpleSqoopPolicyEngine(String sqoopServerName, ProviderBackend providerBackend) { + this.providerBackend = providerBackend; + ProviderBackendContext context = new ProviderBackendContext(); + context.setAllowPerDatabase(false); + context.setValidators(ImmutableList.of(new ServerNameRequiredMatch(sqoopServerName))); + this.providerBackend.initialize(context); + } + @Override + public PrivilegeFactory getPrivilegeFactory() { + return new SqoopWildcardPrivilege.Factory(); + } + + @Override + public ImmutableSet getAllPrivileges(Set groups, + ActiveRoleSet roleSet) throws SentryConfigurationException { + return getPrivileges(groups, roleSet); + } + + @Override + public ImmutableSet getPrivileges(Set groups, + ActiveRoleSet roleSet, Authorizable... authorizableHierarchy) + throws SentryConfigurationException { + if(LOGGER.isDebugEnabled()) { + LOGGER.debug("Getting permissions for {}", groups); + } + ImmutableSet result = providerBackend.getPrivileges(groups, roleSet); + if(LOGGER.isDebugEnabled()) { + LOGGER.debug("result = " + result); + } + return result; + } + + @Override + public void close() { + if (providerBackend != null) { + providerBackend.close(); + } + } + + @Override + public void validatePolicy(boolean strictValidation) + throws SentryConfigurationException { + if (providerBackend != null) { + providerBackend.validatePolicy(strictValidation); + } + } + +} diff --git a/sentry-policy/sentry-policy-sqoop/src/main/java/org/apache/sentry/policy/sqoop/SqoopModelAuthorizables.java b/sentry-policy/sentry-policy-sqoop/src/main/java/org/apache/sentry/policy/sqoop/SqoopModelAuthorizables.java new file mode 100644 index 000000000..b03b4dceb --- /dev/null +++ b/sentry-policy/sentry-policy-sqoop/src/main/java/org/apache/sentry/policy/sqoop/SqoopModelAuthorizables.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.policy.sqoop; + +import org.apache.sentry.core.model.sqoop.Connector; +import org.apache.sentry.core.model.sqoop.Job; +import org.apache.sentry.core.model.sqoop.Link; +import org.apache.sentry.core.model.sqoop.Server; +import org.apache.sentry.core.model.sqoop.SqoopAuthorizable; +import org.apache.sentry.core.model.sqoop.SqoopAuthorizable.AuthorizableType; +import org.apache.sentry.policy.common.KeyValue; + +public class SqoopModelAuthorizables { + public static SqoopAuthorizable from(KeyValue keyValue) { + String prefix = keyValue.getKey().toLowerCase(); + String name = keyValue.getValue().toLowerCase(); + for (AuthorizableType type : AuthorizableType.values()) { + if(prefix.equalsIgnoreCase(type.name())) { + return from(type, name); + } + } + return null; + } + + public static SqoopAuthorizable from(String keyValue) { + return from(new KeyValue(keyValue)); + } + + public static SqoopAuthorizable from(AuthorizableType type, String name) { + switch(type) { + case SERVER: + return new Server(name); + case JOB: + return new Job(name); + case CONNECTOR: + return new Connector(name); + case LINK: + return new Link(name); + default: + return null; + } + } +} diff --git a/sentry-policy/sentry-policy-sqoop/src/main/java/org/apache/sentry/policy/sqoop/SqoopWildcardPrivilege.java b/sentry-policy/sentry-policy-sqoop/src/main/java/org/apache/sentry/policy/sqoop/SqoopWildcardPrivilege.java new file mode 100644 index 000000000..ae89cf452 --- /dev/null +++ b/sentry-policy/sentry-policy-sqoop/src/main/java/org/apache/sentry/policy/sqoop/SqoopWildcardPrivilege.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.policy.sqoop; + +import static org.apache.sentry.policy.common.PolicyConstants.AUTHORIZABLE_SPLITTER; + +import java.util.List; + +import org.apache.sentry.core.model.sqoop.SqoopActionConstant; +import org.apache.sentry.policy.common.Privilege; +import org.apache.sentry.policy.common.PrivilegeFactory; +import org.apache.sentry.policy.common.KeyValue; + +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; + +public class SqoopWildcardPrivilege implements Privilege { + + public static class Factory implements PrivilegeFactory { + @Override + public Privilege createPrivilege(String permission) { + return new SqoopWildcardPrivilege(permission); + } + } + + private final ImmutableList parts; + + public SqoopWildcardPrivilege(String permission) { + if (Strings.isNullOrEmpty(permission)) { + throw new IllegalArgumentException("permission string cannot be null or empty."); + } + Listparts = Lists.newArrayList(); + for (String authorizable : AUTHORIZABLE_SPLITTER.trimResults().split(permission.trim())) { + if (authorizable.isEmpty()) { + throw new IllegalArgumentException("Privilege '" + permission + "' has an empty section"); + } + parts.add(new KeyValue(authorizable)); + } + if (parts.isEmpty()) { + throw new AssertionError("Should never occur: " + permission); + } + this.parts = ImmutableList.copyOf(parts); + } + + @Override + public boolean implies(Privilege p) { + if (!(p instanceof SqoopWildcardPrivilege)) { + return false; + } + SqoopWildcardPrivilege wp = (SqoopWildcardPrivilege)p; + List otherParts = wp.parts; + if(equals(wp)) { + return true; + } + int index = 0; + for (KeyValue otherPart : otherParts) { + // If this privilege has less parts than the other privilege, everything + // after the number of parts contained + // in this privilege is automatically implied, so return true + if (parts.size() - 1 < index) { + return true; + } else { + KeyValue part = parts.get(index); + // Support for action inheritance from parent to child + if (part.getKey().equalsIgnoreCase(SqoopActionConstant.NAME) + && !(otherPart.getKey().equalsIgnoreCase(SqoopActionConstant.NAME))) { + continue; + } + // are the keys even equal + if(!part.getKey().equalsIgnoreCase(otherPart.getKey())) { + return false; + } + if (!impliesKeyValue(part, otherPart)) { + return false; + } + index++; + } + } + // If this privilege has more parts than + // the other parts, only imply it if + // all of the other parts are "*" or "ALL" + for (; index < parts.size(); index++) { + KeyValue part = parts.get(index); + if (!part.getValue().equals(SqoopActionConstant.ALL)) { + return false; + } + } + return true; + } + + private boolean impliesKeyValue(KeyValue policyPart, KeyValue requestPart) { + Preconditions.checkState(policyPart.getKey().equalsIgnoreCase(requestPart.getKey()), + "Please report, this method should not be called with two different keys"); + if(policyPart.getValue().equalsIgnoreCase(SqoopActionConstant.ALL) || + policyPart.getValue().equalsIgnoreCase(SqoopActionConstant.ALL_NAME) || + policyPart.equals(requestPart)) { + return true; + } else if (!SqoopActionConstant.NAME.equalsIgnoreCase(policyPart.getKey()) + && SqoopActionConstant.ALL.equalsIgnoreCase(requestPart.getValue())) { + /* privilege request is to match with any object of given type */ + return true; + } + return false; + + } +} diff --git a/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/AbstractTestSqoopPolicyEngine.java b/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/AbstractTestSqoopPolicyEngine.java new file mode 100644 index 000000000..49b9bc1d5 --- /dev/null +++ b/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/AbstractTestSqoopPolicyEngine.java @@ -0,0 +1,145 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.sentry.policy.sqoop; + +import java.io.File; +import java.io.IOException; +import java.util.Set; +import java.util.TreeSet; + +import org.junit.Assert; + +import org.apache.commons.io.FileUtils; +import org.apache.sentry.core.common.ActiveRoleSet; +import org.apache.sentry.policy.common.PolicyEngine; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import com.google.common.collect.Sets; +import com.google.common.io.Files; + +public abstract class AbstractTestSqoopPolicyEngine { + private static final String OPERATOR_JDBC_CONNECTORS_READ = "server=server1->connector=generic-jdbc-connector->action=read"; + private static final String OPERATOR_HDFS_CONNECTORS_READ = "server=server1->connector=hdfs-connector->action=read"; + private static final String OPERATOR_KAFKA_CONNECTORS_READ = "server=server1->connector=kafka-connector->action=read"; + private static final String OPERATOR_KITE_CONNECTORS_READ = "server=server1->connector=kite-connector->action=read"; + private static final String ANALYST_JOBS_ALL = "server=server1->job=all->action=*"; + private static final String OPERATOR_JOB1_READ = "server=server1->job=job1->action=read"; + private static final String OPERATOR_JOB2_READ = "server=server1->job=job2->action=read"; + private static final String ANALYST_LINKS_ALL = "server=server1->link=all->action=*"; + private static final String OPERATOR_LINK1_READ = "server=server1->link=link1->action=read"; + private static final String OPERATOR_LINK2_READ = "server=server1->link=link2->action=read"; + private static final String ADMIN = "server=server1->action=*"; + + private PolicyEngine policy; + private static File baseDir; + + protected String sqoopServerName = "server1"; + + @BeforeClass + public static void setupClazz() throws IOException { + baseDir = Files.createTempDir(); + } + + @AfterClass + public static void teardownClazz() throws IOException { + if(baseDir != null) { + FileUtils.deleteQuietly(baseDir); + } + } + + protected void setPolicy(PolicyEngine policy) { + this.policy = policy; + } + protected static File getBaseDir() { + return baseDir; + } + @Before + public void setup() throws IOException { + afterSetup(); + } + @After + public void teardown() throws IOException { + beforeTeardown(); + } + protected void afterSetup() throws IOException { + + } + + protected void beforeTeardown() throws IOException { + + } + + @Test + public void testDeveloper() throws Exception { + Set expected = Sets.newTreeSet(Sets.newHashSet( + OPERATOR_JDBC_CONNECTORS_READ, OPERATOR_HDFS_CONNECTORS_READ, + OPERATOR_KAFKA_CONNECTORS_READ, OPERATOR_KITE_CONNECTORS_READ, + ANALYST_JOBS_ALL, ANALYST_LINKS_ALL)); + Assert.assertEquals(expected.toString(), + Sets.newTreeSet(policy.getPrivileges(set("developer"), ActiveRoleSet.ALL)) + .toString()); + } + + @Test + public void testAnalyst() throws Exception { + Set expected = Sets.newTreeSet(Sets.newHashSet(ANALYST_JOBS_ALL, ANALYST_LINKS_ALL)); + Assert.assertEquals(expected.toString(), + new TreeSet(policy.getPrivileges(set("analyst"), ActiveRoleSet.ALL)) + .toString()); + } + + @Test + public void testConnectorOperator() throws Exception { + + } + + @Test + public void testJobOperator() throws Exception { + Set expected = Sets.newTreeSet(Sets + .newHashSet(OPERATOR_JOB1_READ,OPERATOR_JOB2_READ)); + Assert.assertEquals(expected.toString(), + new TreeSet(policy.getPrivileges(set("job1_2_operator"), ActiveRoleSet.ALL)) + .toString()); + } + + @Test + public void testLinkOperator() throws Exception { + Set expected = Sets.newTreeSet(Sets + .newHashSet(OPERATOR_LINK1_READ, OPERATOR_LINK2_READ)); + Assert.assertEquals(expected.toString(), + new TreeSet(policy.getPrivileges(set("link1_2_operator"), ActiveRoleSet.ALL)) + .toString()); + } + + @Test + public void testAdmin() throws Exception { + Set expected = Sets.newTreeSet(Sets.newHashSet(ADMIN)); + Assert.assertEquals(expected.toString(), + new TreeSet(policy.getPrivileges(set("admin"), ActiveRoleSet.ALL)) + .toString()); + } + + private static Set set(String... values) { + return Sets.newHashSet(values); + } +} diff --git a/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/MockGroupMappingServiceProvider.java b/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/MockGroupMappingServiceProvider.java new file mode 100644 index 000000000..fd577d6e6 --- /dev/null +++ b/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/MockGroupMappingServiceProvider.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.sentry.policy.sqoop; + +import java.util.Set; + +import org.apache.sentry.provider.common.GroupMappingService; + +import com.google.common.collect.Multimap; +import com.google.common.collect.Sets; + +public class MockGroupMappingServiceProvider implements GroupMappingService { + private final Multimap userToGroupMap; + + public MockGroupMappingServiceProvider(Multimap userToGroupMap) { + this.userToGroupMap = userToGroupMap; + } + @Override + public Set getGroups(String user) { + return Sets.newHashSet(userToGroupMap.get(user)); + } + +} diff --git a/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/SqoopPolicyFileProviderBackend.java b/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/SqoopPolicyFileProviderBackend.java new file mode 100644 index 000000000..5da63a372 --- /dev/null +++ b/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/SqoopPolicyFileProviderBackend.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.sentry.policy.sqoop; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.sentry.provider.file.SimpleFileProviderBackend; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class SqoopPolicyFileProviderBackend extends SimpleSqoopPolicyEngine { + private static final Logger LOGGER = LoggerFactory.getLogger(SqoopPolicyFileProviderBackend.class); + public SqoopPolicyFileProviderBackend(String sqoopServerName, + String resource) throws IOException { + super(sqoopServerName, new SimpleFileProviderBackend(new Configuration(), resource)); + LOGGER.warn("The DB providerbackend is the preferred option over file providerbackend as the sqoop policy engine"); + } +} diff --git a/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/TestServerNameRequiredMatch.java b/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/TestServerNameRequiredMatch.java new file mode 100644 index 000000000..b6e9893c8 --- /dev/null +++ b/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/TestServerNameRequiredMatch.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.sentry.policy.sqoop; + +import org.junit.Assert; + +import org.apache.sentry.policy.common.PrivilegeValidatorContext; +import org.apache.shiro.config.ConfigurationException; +import org.junit.Test; + +public class TestServerNameRequiredMatch { + @Test + public void testWithoutServerName() { + ServerNameRequiredMatch serverNameMatch = new ServerNameRequiredMatch("server1"); + try { + serverNameMatch.validate(new PrivilegeValidatorContext("connector=c1->action=read")); + Assert.fail("Expected ConfigurationException"); + } catch (ConfigurationException ex) { + } + } + @Test + public void testServerNameNotMatch() throws Exception { + ServerNameRequiredMatch serverNameMatch = new ServerNameRequiredMatch("server1"); + try { + serverNameMatch.validate(new PrivilegeValidatorContext("server=server2->connector=c1->action=read")); + Assert.fail("Expected ConfigurationException"); + } catch (ConfigurationException ex) { + } + } + @Test + public void testServerNameMatch() throws Exception { + ServerNameRequiredMatch serverNameMatch = new ServerNameRequiredMatch("server1"); + try { + serverNameMatch.validate(new PrivilegeValidatorContext("server=server1->connector=c1->action=read")); + } catch (ConfigurationException ex) { + Assert.fail("Not expected ConfigurationException"); + } + } + +} diff --git a/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/TestSqoopAuthorizationProviderGeneralCases.java b/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/TestSqoopAuthorizationProviderGeneralCases.java new file mode 100644 index 000000000..3bdf6f7c1 --- /dev/null +++ b/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/TestSqoopAuthorizationProviderGeneralCases.java @@ -0,0 +1,225 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.sentry.policy.sqoop; + +import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Set; + +import org.junit.Assert; + +import org.apache.commons.io.FileUtils; +import org.apache.sentry.core.common.Action; +import org.apache.sentry.core.common.ActiveRoleSet; +import org.apache.sentry.core.common.Authorizable; +import org.apache.sentry.core.common.Subject; +import org.apache.sentry.core.model.sqoop.Connector; +import org.apache.sentry.core.model.sqoop.Job; +import org.apache.sentry.core.model.sqoop.Link; +import org.apache.sentry.core.model.sqoop.Server; +import org.apache.sentry.core.model.sqoop.SqoopActionConstant; +import org.apache.sentry.core.model.sqoop.SqoopActionFactory.SqoopAction; +import org.apache.sentry.provider.common.ResourceAuthorizationProvider; +import org.apache.sentry.provider.common.HadoopGroupResourceAuthorizationProvider; +import org.apache.sentry.provider.file.PolicyFiles; +import org.junit.After; +import org.junit.Test; + +import com.google.common.base.Objects; +import com.google.common.collect.HashMultimap; +import com.google.common.collect.Multimap; +import com.google.common.collect.Sets; +import com.google.common.io.Files; + +public class TestSqoopAuthorizationProviderGeneralCases { + private static final Multimap USER_TO_GROUP_MAP = HashMultimap.create(); + + private static final Subject SUB_ADMIN = new Subject("admin1"); + private static final Subject SUB_DEVELOPER = new Subject("developer1"); + private static final Subject SUB_ANALYST = new Subject("analyst1"); + private static final Subject SUB_JOB_OPERATOR = new Subject("job_operator1"); + private static final Subject SUB_LINK_OPERATOR = new Subject("link_operator1"); + private static final Subject SUB_CONNECTOR_OPERATOR = new Subject("connector_operator1"); + + + + private static final Server server1 = new Server("server1"); + private static final Connector jdbc_connector = new Connector("generic-jdbc-connector"); + private static final Connector hdfs_connector = new Connector("hdfs-connector"); + private static final Connector kafka_connector = new Connector("kafka-connector"); + private static final Connector kite_connector = new Connector("kite-connector"); + private static final Link link1 = new Link("link1"); + private static final Link link2 = new Link("link2"); + private static final Job job1 = new Job("job1"); + private static final Job job2 = new Job("job2"); + + private static final SqoopAction ALL = new SqoopAction(SqoopActionConstant.ALL); + private static final SqoopAction READ = new SqoopAction(SqoopActionConstant.READ); + private static final SqoopAction WRITE = new SqoopAction(SqoopActionConstant.WRITE); + + private static final String ADMIN = "admin"; + private static final String DEVELOPER = "developer"; + private static final String ANALYST = "analyst"; + private static final String JOB_OPERATOR = "job1_2_operator"; + private static final String LINK_OPERATOR ="link1_2_operator"; + private static final String CONNECTOR_OPERATOR = "connectors_operator"; + + static { + USER_TO_GROUP_MAP.putAll(SUB_ADMIN.getName(), Arrays.asList(ADMIN)); + USER_TO_GROUP_MAP.putAll(SUB_DEVELOPER.getName(), Arrays.asList(DEVELOPER)); + USER_TO_GROUP_MAP.putAll(SUB_ANALYST.getName(), Arrays.asList(ANALYST)); + USER_TO_GROUP_MAP.putAll(SUB_JOB_OPERATOR.getName(),Arrays.asList(JOB_OPERATOR)); + USER_TO_GROUP_MAP.putAll(SUB_LINK_OPERATOR.getName(),Arrays.asList(LINK_OPERATOR)); + USER_TO_GROUP_MAP.putAll(SUB_CONNECTOR_OPERATOR.getName(),Arrays.asList(CONNECTOR_OPERATOR)); + } + + private final ResourceAuthorizationProvider authzProvider; + private File baseDir; + + public TestSqoopAuthorizationProviderGeneralCases() throws IOException { + baseDir = Files.createTempDir(); + PolicyFiles.copyToDir(baseDir, "test-authz-provider.ini"); + authzProvider = new HadoopGroupResourceAuthorizationProvider( + new SqoopPolicyFileProviderBackend(server1.getName(), new File(baseDir, "test-authz-provider.ini").getPath()), + new MockGroupMappingServiceProvider(USER_TO_GROUP_MAP)); + } + + @After + public void teardown() { + if(baseDir != null) { + FileUtils.deleteQuietly(baseDir); + } + } + + private void doTestResourceAuthorizationProvider(Subject subject, List authorizableHierarchy, + Set actions, boolean expected) throws Exception { + Objects.ToStringHelper helper = Objects.toStringHelper("TestParameters"); + helper.add("Subject", subject).add("authzHierarchy", authorizableHierarchy).add("action", actions); + Assert.assertEquals(helper.toString(), expected, + authzProvider.hasAccess(subject, authorizableHierarchy, actions, ActiveRoleSet.ALL)); + } + + @Test + public void testAdmin() throws Exception { + Set allActions = Sets.newHashSet(ALL, READ, WRITE); + doTestResourceAuthorizationProvider(SUB_ADMIN, Arrays.asList(server1), allActions, true); + doTestResourceAuthorizationProvider(SUB_ADMIN, Arrays.asList(server1,hdfs_connector), allActions, true); + doTestResourceAuthorizationProvider(SUB_ADMIN, Arrays.asList(server1,jdbc_connector), allActions, true); + doTestResourceAuthorizationProvider(SUB_ADMIN, Arrays.asList(server1,kafka_connector), allActions, true); + doTestResourceAuthorizationProvider(SUB_ADMIN, Arrays.asList(server1,kite_connector), allActions, true); + doTestResourceAuthorizationProvider(SUB_ADMIN, Arrays.asList(server1,link1), allActions, true); + doTestResourceAuthorizationProvider(SUB_ADMIN, Arrays.asList(server1,link2), allActions, true); + doTestResourceAuthorizationProvider(SUB_ADMIN, Arrays.asList(server1,job1), allActions, true); + doTestResourceAuthorizationProvider(SUB_ADMIN, Arrays.asList(server1,job2), allActions, true); + } + + @Test + public void testDeveloper() throws Exception { + Set allActions = Sets.newHashSet(ALL, READ, WRITE); + for (SqoopAction action : allActions) { + //developer only has the read action on all connectors + for (Connector connector : Sets.newHashSet(jdbc_connector, hdfs_connector, kafka_connector, kite_connector)) { + doTestResourceAuthorizationProvider(SUB_DEVELOPER, Arrays.asList(server1, connector), Sets.newHashSet(action), READ.equals(action)); + } + } + + for (Link link : Sets.newHashSet(link1, link2)) { + //developer has the all action on all links + doTestResourceAuthorizationProvider(SUB_DEVELOPER, Arrays.asList(server1, link), allActions, true); + } + + for (Job job : Sets.newHashSet(job1,job2)) { + //developer has the all action on all jobs + doTestResourceAuthorizationProvider(SUB_DEVELOPER, Arrays.asList(server1, job), allActions, true); + } + } + + @Test + public void testAnalyst() throws Exception { + Set allActions = Sets.newHashSet(ALL, READ, WRITE); + for (SqoopAction action : allActions) { + //analyst has not the any action on all connectors + for (Connector connector : Sets.newHashSet(jdbc_connector, hdfs_connector, kafka_connector, kite_connector)) { + doTestResourceAuthorizationProvider(SUB_ANALYST, Arrays.asList(server1, connector), Sets.newHashSet(action), false); + } + } + + for (Link link : Sets.newHashSet(link1, link2)) { + //analyst has the all action on all links + doTestResourceAuthorizationProvider(SUB_ANALYST, Arrays.asList(server1, link), allActions, true); + } + + for (Job job : Sets.newHashSet(job1,job2)) { + //analyst has the all action on all jobs + doTestResourceAuthorizationProvider(SUB_ANALYST, Arrays.asList(server1, job), allActions, true); + } + } + + @Test + public void testJobOperator() throws Exception { + Set allActions = Sets.newHashSet(ALL, READ, WRITE); + for (SqoopAction action : allActions) { + for (Job job : Sets.newHashSet(job1,job2)) { + //Job operator has the read action on all jobs + doTestResourceAuthorizationProvider(SUB_JOB_OPERATOR, Arrays.asList(server1, job), Sets.newHashSet(action), READ.equals(action)); + } + for (Link link : Sets.newHashSet(link1, link2)) { + doTestResourceAuthorizationProvider(SUB_JOB_OPERATOR, Arrays.asList(server1, link), Sets.newHashSet(action), false); + } + for (Connector connector : Sets.newHashSet(jdbc_connector, hdfs_connector, kafka_connector, kite_connector)) { + doTestResourceAuthorizationProvider(SUB_JOB_OPERATOR, Arrays.asList(server1, connector), Sets.newHashSet(action), false); + } + } + } + + @Test + public void testLinkOperator() throws Exception { + Set allActions = Sets.newHashSet(ALL, READ, WRITE); + for (SqoopAction action : allActions) { + for (Link link : Sets.newHashSet(link1, link2)) { + //Link operator has the read action on all links + doTestResourceAuthorizationProvider(SUB_LINK_OPERATOR, Arrays.asList(server1, link), Sets.newHashSet(action), READ.equals(action)); + } + for (Job job : Sets.newHashSet(job1,job2)) { + doTestResourceAuthorizationProvider(SUB_LINK_OPERATOR, Arrays.asList(server1, job), Sets.newHashSet(action), false); + } + for (Connector connector : Sets.newHashSet(jdbc_connector, hdfs_connector, kafka_connector, kite_connector)) { + doTestResourceAuthorizationProvider(SUB_LINK_OPERATOR, Arrays.asList(server1, connector), Sets.newHashSet(action), false); + } + } + } + + @Test + public void testConnectorOperator() throws Exception { + Set allActions = Sets.newHashSet(ALL, READ, WRITE); + for (SqoopAction action : allActions) { + for (Connector connector : Sets.newHashSet(jdbc_connector, hdfs_connector, kafka_connector, kite_connector)) { + doTestResourceAuthorizationProvider(SUB_CONNECTOR_OPERATOR, Arrays.asList(server1, connector), Sets.newHashSet(action), READ.equals(action)); + } + for (Job job : Sets.newHashSet(job1,job2)) { + doTestResourceAuthorizationProvider(SUB_CONNECTOR_OPERATOR, Arrays.asList(server1, job), Sets.newHashSet(action), false); + } + for (Link link : Sets.newHashSet(link1, link2)) { + doTestResourceAuthorizationProvider(SUB_CONNECTOR_OPERATOR, Arrays.asList(server1, link), Sets.newHashSet(action), false); + } + } + } +} diff --git a/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/TestSqoopAuthorizationProviderSpecialCases.java b/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/TestSqoopAuthorizationProviderSpecialCases.java new file mode 100644 index 000000000..9fee5a78e --- /dev/null +++ b/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/TestSqoopAuthorizationProviderSpecialCases.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.sentry.policy.sqoop; + +import java.io.File; +import java.io.IOException; +import java.util.List; +import java.util.Set; + +import org.junit.Assert; + +import org.apache.commons.io.FileUtils; +import org.apache.sentry.core.common.Action; +import org.apache.sentry.core.common.ActiveRoleSet; +import org.apache.sentry.core.common.Authorizable; +import org.apache.sentry.core.common.Subject; +import org.apache.sentry.core.model.sqoop.Connector; +import org.apache.sentry.core.model.sqoop.Server; +import org.apache.sentry.core.model.sqoop.SqoopActionConstant; +import org.apache.sentry.core.model.sqoop.SqoopActionFactory.SqoopAction; +import org.apache.sentry.provider.common.AuthorizationProvider; +import org.apache.sentry.provider.file.LocalGroupResourceAuthorizationProvider; +import org.apache.sentry.provider.file.PolicyFile; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Sets; +import com.google.common.io.Files; + +public class TestSqoopAuthorizationProviderSpecialCases { + private AuthorizationProvider authzProvider; + private PolicyFile policyFile; + private File baseDir; + private File iniFile; + private String initResource; + @Before + public void setup() throws IOException { + baseDir = Files.createTempDir(); + iniFile = new File(baseDir, "policy.ini"); + initResource = "file://" + iniFile.getPath(); + policyFile = new PolicyFile(); + } + + @After + public void teardown() throws IOException { + if(baseDir != null) { + FileUtils.deleteQuietly(baseDir); + } + } + + @Test + public void testDuplicateEntries() throws Exception { + Subject user1 = new Subject("user1"); + Server server1 = new Server("server1"); + Connector connector1 = new Connector("c1"); + Set actions = Sets.newHashSet(new SqoopAction(SqoopActionConstant.READ)); + policyFile.addGroupsToUser(user1.getName(), true, "group1", "group1") + .addRolesToGroup("group1", true, "role1", "role1") + .addPermissionsToRole("role1", true, "server=server1->connector=c1->action=read", + "server=server1->connector=c1->action=read"); + policyFile.write(iniFile); + SqoopPolicyFileProviderBackend policy = new SqoopPolicyFileProviderBackend(server1.getName(), initResource); + authzProvider = new LocalGroupResourceAuthorizationProvider(initResource, policy); + List authorizableHierarchy = ImmutableList.of(server1, connector1); + Assert.assertTrue(authorizableHierarchy.toString(), + authzProvider.hasAccess(user1, authorizableHierarchy, actions, ActiveRoleSet.ALL)); + } + +} diff --git a/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/TestSqoopModelAuthorizables.java b/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/TestSqoopModelAuthorizables.java new file mode 100644 index 000000000..99a5ae286 --- /dev/null +++ b/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/TestSqoopModelAuthorizables.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.sentry.policy.sqoop; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import org.apache.sentry.core.model.sqoop.Server; +import org.junit.Test; + +public class TestSqoopModelAuthorizables { + + @Test + public void testServer() throws Exception { + Server server1 = (Server)SqoopModelAuthorizables.from("SERVER=server1"); + assertEquals("server1", server1.getName()); + } + + @Test(expected=IllegalArgumentException.class) + public void testNoKV() throws Exception { + System.out.println(SqoopModelAuthorizables.from("nonsense")); + } + + @Test(expected=IllegalArgumentException.class) + public void testEmptyKey() throws Exception { + System.out.println(SqoopModelAuthorizables.from("=server1")); + } + + @Test(expected=IllegalArgumentException.class) + public void testEmptyValue() throws Exception { + System.out.println(SqoopModelAuthorizables.from("SERVER=")); + } + + @Test + public void testNotAuthorizable() throws Exception { + assertNull(SqoopModelAuthorizables.from("k=v")); + } +} diff --git a/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/TestSqoopPolicyEngineDFS.java b/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/TestSqoopPolicyEngineDFS.java new file mode 100644 index 000000000..ff4c9a8ff --- /dev/null +++ b/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/TestSqoopPolicyEngineDFS.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.sentry.policy.sqoop; + +import java.io.File; +import java.io.IOException; + +import org.junit.Assert; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.sentry.provider.file.PolicyFiles; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +public class TestSqoopPolicyEngineDFS extends AbstractTestSqoopPolicyEngine { + private static MiniDFSCluster dfsCluster; + private static FileSystem fileSystem; + private static Path root; + private static Path etc; + + @BeforeClass + public static void setupLocalClazz() throws IOException { + File baseDir = getBaseDir(); + Assert.assertNotNull(baseDir); + File dfsDir = new File(baseDir, "dfs"); + Assert.assertTrue(dfsDir.isDirectory() || dfsDir.mkdirs()); + Configuration conf = new Configuration(); + conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dfsDir.getPath()); + dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); + fileSystem = dfsCluster.getFileSystem(); + root = new Path(fileSystem.getUri().toString()); + etc = new Path(root, "/etc"); + fileSystem.mkdirs(etc); + } + + @AfterClass + public static void teardownLocalClazz() { + if(dfsCluster != null) { + dfsCluster.shutdown(); + } + } + + @Override + protected void afterSetup() throws IOException { + fileSystem.delete(etc, true); + fileSystem.mkdirs(etc); + PolicyFiles.copyToDir(fileSystem, etc, "test-authz-provider.ini"); + setPolicy(new SqoopPolicyFileProviderBackend(sqoopServerName, new Path(etc, + "test-authz-provider.ini").toString())); + } + + @Override + protected void beforeTeardown() throws IOException { + fileSystem.delete(etc, true); + } +} diff --git a/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/TestSqoopPolicyEngineLocalFS.java b/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/TestSqoopPolicyEngineLocalFS.java new file mode 100644 index 000000000..ca5a198c1 --- /dev/null +++ b/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/TestSqoopPolicyEngineLocalFS.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.sentry.policy.sqoop; + +import java.io.File; +import java.io.IOException; + +import org.junit.Assert; + +import org.apache.commons.io.FileUtils; +import org.apache.sentry.provider.file.PolicyFiles; + +public class TestSqoopPolicyEngineLocalFS extends AbstractTestSqoopPolicyEngine { + @Override + protected void afterSetup() throws IOException { + File baseDir = getBaseDir(); + Assert.assertNotNull(baseDir); + Assert.assertTrue(baseDir.isDirectory() || baseDir.mkdirs()); + PolicyFiles.copyToDir(baseDir, "test-authz-provider.ini"); + setPolicy(new SqoopPolicyFileProviderBackend(sqoopServerName, new File(baseDir, "test-authz-provider.ini").getPath())); + } + @Override + protected void beforeTeardown() throws IOException { + File baseDir = getBaseDir(); + Assert.assertNotNull(baseDir); + FileUtils.deleteQuietly(baseDir); + } +} diff --git a/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/TestSqoopPolicyNegative.java b/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/TestSqoopPolicyNegative.java new file mode 100644 index 000000000..da922a5d3 --- /dev/null +++ b/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/TestSqoopPolicyNegative.java @@ -0,0 +1,121 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.policy.sqoop; + +import java.io.File; +import java.io.IOException; + +import org.junit.Assert; + +import org.apache.commons.io.FileUtils; +import org.apache.sentry.core.common.ActiveRoleSet; +import org.apache.sentry.policy.common.PolicyEngine; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Charsets; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Sets; +import com.google.common.io.Files; + +public class TestSqoopPolicyNegative { + @SuppressWarnings("unused") + private static final Logger LOGGER = LoggerFactory + .getLogger(TestSqoopPolicyNegative.class); + + private File baseDir; + private File globalPolicyFile; + + @Before + public void setup() { + baseDir = Files.createTempDir(); + globalPolicyFile = new File(baseDir, "global.ini"); + } + + @After + public void teardown() { + if(baseDir != null) { + FileUtils.deleteQuietly(baseDir); + } + } + + private void append(String from, File to) throws IOException { + Files.append(from + "\n", to, Charsets.UTF_8); + } + + @Test + public void testauthorizedSqoopInPolicyFile() throws Exception { + append("[groups]", globalPolicyFile); + append("other_group = other_role", globalPolicyFile); + append("[roles]", globalPolicyFile); + append("other_role = server=server1->connector=c1->action=read, server=server1->link=l1->action=read", globalPolicyFile); + PolicyEngine policy = new SqoopPolicyFileProviderBackend("server1", globalPolicyFile.getPath()); + //malicious_group has no privilege + ImmutableSet permissions = policy.getAllPrivileges(Sets.newHashSet("malicious_group"), ActiveRoleSet.ALL); + Assert.assertTrue(permissions.toString(), permissions.isEmpty()); + //other_group has two privileges + permissions = policy.getAllPrivileges(Sets.newHashSet("other_group"), ActiveRoleSet.ALL); + Assert.assertTrue(permissions.toString(), permissions.size() == 2); + } + + @Test + public void testNoServerNameConfig() throws Exception { + append("[groups]", globalPolicyFile); + append("other_group = malicious_role", globalPolicyFile); + append("[roles]", globalPolicyFile); + append("malicious_role = connector=c1->action=read,link=l1->action=read", globalPolicyFile); + PolicyEngine policy = new SqoopPolicyFileProviderBackend("server1", globalPolicyFile.getPath()); + ImmutableSet permissions = policy.getAllPrivileges(Sets.newHashSet("other_group"), ActiveRoleSet.ALL); + Assert.assertTrue(permissions.toString(), permissions.isEmpty()); + } + + @Test + public void testServerAllName() throws Exception { + append("[groups]", globalPolicyFile); + append("group = malicious_role", globalPolicyFile); + append("[roles]", globalPolicyFile); + append("malicious_role = server=*", globalPolicyFile); + PolicyEngine policy = new SqoopPolicyFileProviderBackend("server1", globalPolicyFile.getPath()); + ImmutableSet permissions = policy.getAllPrivileges(Sets.newHashSet("group"), ActiveRoleSet.ALL); + Assert.assertTrue(permissions.toString(), permissions.isEmpty()); + } + + @Test + public void testServerIncorrect() throws Exception { + append("[groups]", globalPolicyFile); + append("group = malicious_role", globalPolicyFile); + append("[roles]", globalPolicyFile); + append("malicious_role = server=server2", globalPolicyFile); + PolicyEngine policy = new SqoopPolicyFileProviderBackend("server1", globalPolicyFile.getPath()); + ImmutableSet permissions = policy.getAllPrivileges(Sets.newHashSet("group"), ActiveRoleSet.ALL); + Assert.assertTrue(permissions.toString(), permissions.isEmpty()); + } + + @Test + public void testAll() throws Exception { + append("[groups]", globalPolicyFile); + append("group = malicious_role", globalPolicyFile); + append("[roles]", globalPolicyFile); + append("malicious_role = *", globalPolicyFile); + PolicyEngine policy = new SqoopPolicyFileProviderBackend("server1", globalPolicyFile.getPath()); + ImmutableSet permissions = policy.getAllPrivileges(Sets.newHashSet("group"), ActiveRoleSet.ALL); + Assert.assertTrue(permissions.toString(), permissions.isEmpty()); + } +} diff --git a/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/TestSqoopWildcardPrivilege.java b/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/TestSqoopWildcardPrivilege.java new file mode 100644 index 000000000..84a25a72f --- /dev/null +++ b/sentry-policy/sentry-policy-sqoop/src/test/java/org/apache/sentry/policy/sqoop/TestSqoopWildcardPrivilege.java @@ -0,0 +1,176 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.sentry.policy.sqoop; +import static org.apache.sentry.policy.common.PolicyConstants.AUTHORIZABLE_JOINER; +import static org.apache.sentry.policy.common.PolicyConstants.KV_JOINER; +import static org.apache.sentry.policy.common.PolicyConstants.KV_SEPARATOR; + +import org.apache.sentry.core.model.sqoop.SqoopActionConstant; +import org.apache.sentry.policy.common.Privilege; +import org.apache.sentry.policy.common.KeyValue; +import org.junit.Test; + +public class TestSqoopWildcardPrivilege extends org.junit.Assert { + private static final Privilege SQOOP_SERVER1_ALL = + create(new KeyValue("SERVER", "server1"), new KeyValue("action", SqoopActionConstant.ALL)); + private static final Privilege SQOOP_SERVER1_READ = + create(new KeyValue("SERVER", "server1"), new KeyValue("action", SqoopActionConstant.READ)); + private static final Privilege SQOOP_SERVER1_WRITE = + create(new KeyValue("SERVER", "server1"), new KeyValue("action", SqoopActionConstant.WRITE)); + + private static final Privilege SQOOP_SERVER1_JOB1_ALL = + create(new KeyValue("SERVER", "server1"), new KeyValue("JOB", "job1"), new KeyValue("action", SqoopActionConstant.ALL)); + private static final Privilege SQOOP_SERVER1_JOB1_READ = + create(new KeyValue("SERVER", "server1"), new KeyValue("JOB", "job1"), new KeyValue("action", SqoopActionConstant.READ)); + private static final Privilege SQOOP_SERVER1_JOB1_WRITE = + create(new KeyValue("SERVER", "server1"), new KeyValue("JOB", "job1"), new KeyValue("action", SqoopActionConstant.WRITE)); + + private static final Privilege SQOOP_SERVER1_LINK1_ALL = + create(new KeyValue("SERVER", "server1"), new KeyValue("LINK", "link1"), new KeyValue("action", SqoopActionConstant.ALL)); + private static final Privilege SQOOP_SERVER1_LINK1_READ = + create(new KeyValue("SERVER", "server1"), new KeyValue("LINK", "link1"), new KeyValue("action", SqoopActionConstant.READ)); + private static final Privilege SQOOP_SERVER1_LINK1_WRITE = + create(new KeyValue("SERVER", "server1"), new KeyValue("LINK", "link1"), new KeyValue("action", SqoopActionConstant.WRITE)); + + private static final Privilege SQOOP_SERVER1_CONNECTOR1_ALL = + create(new KeyValue("SERVER", "server1"), new KeyValue("CONNECTOR", "connector1"), new KeyValue("action", SqoopActionConstant.ALL)); + private static final Privilege SQOOP_SERVER1_CONNECTOR1_READ = + create(new KeyValue("SERVER", "server1"), new KeyValue("CONNECTOR", "connector1"), new KeyValue("action", SqoopActionConstant.READ)); + private static final Privilege SQOOP_SERVER1_CONNECTOR1_WRITE = + create(new KeyValue("SERVER", "server1"), new KeyValue("CONNECTOR", "connector1"), new KeyValue("action", SqoopActionConstant.WRITE)); + + + @Test + public void testSimpleAction() throws Exception { + //server + assertFalse(SQOOP_SERVER1_WRITE.implies(SQOOP_SERVER1_READ)); + assertFalse(SQOOP_SERVER1_READ.implies(SQOOP_SERVER1_WRITE)); + //connector + assertFalse(SQOOP_SERVER1_CONNECTOR1_WRITE.implies(SQOOP_SERVER1_CONNECTOR1_READ)); + assertFalse(SQOOP_SERVER1_CONNECTOR1_READ.implies(SQOOP_SERVER1_CONNECTOR1_WRITE)); + //job + assertFalse(SQOOP_SERVER1_JOB1_READ.implies(SQOOP_SERVER1_JOB1_WRITE)); + assertFalse(SQOOP_SERVER1_JOB1_WRITE.implies(SQOOP_SERVER1_JOB1_READ)); + //link + assertFalse(SQOOP_SERVER1_LINK1_READ.implies(SQOOP_SERVER1_LINK1_WRITE)); + assertFalse(SQOOP_SERVER1_LINK1_WRITE.implies(SQOOP_SERVER1_LINK1_READ)); + } + + @Test + public void testShorterThanRequest() throws Exception { + //job + assertTrue(SQOOP_SERVER1_ALL.implies(SQOOP_SERVER1_JOB1_ALL)); + assertTrue(SQOOP_SERVER1_ALL.implies(SQOOP_SERVER1_JOB1_READ)); + assertTrue(SQOOP_SERVER1_ALL.implies(SQOOP_SERVER1_JOB1_WRITE)); + + assertFalse(SQOOP_SERVER1_WRITE.implies(SQOOP_SERVER1_READ)); + assertTrue(SQOOP_SERVER1_READ.implies(SQOOP_SERVER1_JOB1_READ)); + assertTrue(SQOOP_SERVER1_WRITE.implies(SQOOP_SERVER1_JOB1_WRITE)); + + //link + assertTrue(SQOOP_SERVER1_ALL.implies(SQOOP_SERVER1_LINK1_ALL)); + assertTrue(SQOOP_SERVER1_ALL.implies(SQOOP_SERVER1_LINK1_READ)); + assertTrue(SQOOP_SERVER1_ALL.implies(SQOOP_SERVER1_LINK1_WRITE)); + + assertTrue(SQOOP_SERVER1_READ.implies(SQOOP_SERVER1_LINK1_READ)); + assertTrue(SQOOP_SERVER1_WRITE.implies(SQOOP_SERVER1_LINK1_WRITE)); + + //connector + assertTrue(SQOOP_SERVER1_ALL.implies(SQOOP_SERVER1_CONNECTOR1_ALL)); + assertTrue(SQOOP_SERVER1_ALL.implies(SQOOP_SERVER1_CONNECTOR1_READ)); + assertTrue(SQOOP_SERVER1_ALL.implies(SQOOP_SERVER1_CONNECTOR1_WRITE)); + + assertTrue(SQOOP_SERVER1_READ.implies(SQOOP_SERVER1_CONNECTOR1_READ)); + assertTrue(SQOOP_SERVER1_WRITE.implies(SQOOP_SERVER1_CONNECTOR1_WRITE)); + } + + @Test + public void testActionAll() throws Exception { + //server + assertTrue(SQOOP_SERVER1_ALL.implies(SQOOP_SERVER1_READ)); + assertTrue(SQOOP_SERVER1_ALL.implies(SQOOP_SERVER1_WRITE)); + + //job + assertTrue(SQOOP_SERVER1_JOB1_ALL.implies(SQOOP_SERVER1_JOB1_READ)); + assertTrue(SQOOP_SERVER1_JOB1_ALL.implies(SQOOP_SERVER1_JOB1_WRITE)); + + //link + assertTrue(SQOOP_SERVER1_LINK1_ALL.implies(SQOOP_SERVER1_LINK1_READ)); + assertTrue(SQOOP_SERVER1_LINK1_ALL.implies(SQOOP_SERVER1_LINK1_WRITE)); + + //connector + assertTrue(SQOOP_SERVER1_CONNECTOR1_ALL.implies(SQOOP_SERVER1_CONNECTOR1_READ)); + assertTrue(SQOOP_SERVER1_CONNECTOR1_ALL.implies(SQOOP_SERVER1_CONNECTOR1_WRITE)); + } + + @Test + public void testUnexpected() throws Exception { + Privilege p = new Privilege() { + @Override + public boolean implies(Privilege p) { + return false; + } + }; + Privilege job1 = create(new KeyValue("SERVER", "server"), new KeyValue("JOB", "job1")); + assertFalse(job1.implies(null)); + assertFalse(job1.implies(p)); + assertFalse(job1.equals(null)); + assertFalse(job1.equals(p)); + } + + @Test(expected=IllegalArgumentException.class) + public void testNullString() throws Exception { + System.out.println(create((String)null)); + } + + @Test(expected=IllegalArgumentException.class) + public void testEmptyString() throws Exception { + System.out.println(create("")); + } + + @Test(expected=IllegalArgumentException.class) + public void testEmptyKey() throws Exception { + System.out.println(create(KV_JOINER.join("", "server1"))); + } + + @Test(expected=IllegalArgumentException.class) + public void testEmptyValue() throws Exception { + System.out.println(create(KV_JOINER.join("SERVER", ""))); + } + + @Test(expected=IllegalArgumentException.class) + public void testEmptyPart() throws Exception { + System.out.println(create(AUTHORIZABLE_JOINER. + join(KV_JOINER.join("SERVER", "server1"), ""))); + } + + @Test(expected=IllegalArgumentException.class) + public void testOnlySeperators() throws Exception { + System.out.println(create(AUTHORIZABLE_JOINER. + join(KV_SEPARATOR, KV_SEPARATOR, KV_SEPARATOR))); + } + + static SqoopWildcardPrivilege create(KeyValue... keyValues) { + return create(AUTHORIZABLE_JOINER.join(keyValues)); + + } + static SqoopWildcardPrivilege create(String s) { + return new SqoopWildcardPrivilege(s); + } +} diff --git a/sentry-policy/sentry-policy-sqoop/src/test/resources/log4j.properties b/sentry-policy/sentry-policy-sqoop/src/test/resources/log4j.properties new file mode 100644 index 000000000..7703069e8 --- /dev/null +++ b/sentry-policy/sentry-policy-sqoop/src/test/resources/log4j.properties @@ -0,0 +1,31 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# Define some default values that can be overridden by system properties. +# +# For testing, it may also be convenient to specify + +log4j.rootLogger=DEBUG,console + +log4j.appender.console=org.apache.log4j.ConsoleAppender +log4j.appender.console.target=System.err +log4j.appender.console.layout=org.apache.log4j.PatternLayout +log4j.appender.console.layout.ConversionPattern=%d (%t) [%p - %l] %m%n + +log4j.logger.org.apache.hadoop.conf.Configuration=INFO \ No newline at end of file diff --git a/sentry-policy/sentry-policy-sqoop/src/test/resources/test-authz-provider.ini b/sentry-policy/sentry-policy-sqoop/src/test/resources/test-authz-provider.ini new file mode 100644 index 000000000..a4ab5d106 --- /dev/null +++ b/sentry-policy/sentry-policy-sqoop/src/test/resources/test-authz-provider.ini @@ -0,0 +1,40 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +[groups] +developer = jdbc_connector_role, hdfs_connector_role,kafka_connector_role,kite_connector_role,\ + jobs_analyst_role,links_analyst_role +analyst = jobs_analyst_role,links_analyst_role +connectors_operator = jdbc_connector_role, hdfs_connector_role,kafka_connector_role,kite_connector_role +jobs_analyst = jobs_analyst_role +job1_2_operator = job1_role,job2_role +links_analyst = links_analyst_role +link1_2_operator = link1_role,link2_role +admin = admin_role + +[roles] +admin_role = server=server1->action=* +jdbc_connector_role = server=server1->connector=generic-jdbc-connector->action=read +hdfs_connector_role = server=server1->connector=hdfs-connector->action=read +kafka_connector_role = server=server1->connector=kafka-connector->action=read +kite_connector_role = server=server1->connector=kite-connector->action=read +jobs_analyst_role = server=server1->job=all->action=* +job1_role = server=server1->job=job1->action=read +job2_role = server=server1->job=job2->action=read +links_analyst_role = server=server1->link=all->action=* +link1_role = server=server1->link=link1->action=read +link2_role = server=server1->link=link2->action=read \ No newline at end of file diff --git a/sentry-provider/pom.xml b/sentry-provider/pom.xml index 15a4f2cfc..f26f4d3fa 100644 --- a/sentry-provider/pom.xml +++ b/sentry-provider/pom.xml @@ -22,7 +22,7 @@ limitations under the License. org.apache.sentry sentry - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-provider diff --git a/sentry-provider/sentry-provider-cache/pom.xml b/sentry-provider/sentry-provider-cache/pom.xml index 2b147a9b2..c67f09429 100644 --- a/sentry-provider/sentry-provider-cache/pom.xml +++ b/sentry-provider/sentry-provider-cache/pom.xml @@ -21,7 +21,7 @@ limitations under the License. org.apache.sentry sentry-provider - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-provider-cache diff --git a/sentry-provider/sentry-provider-cache/src/main/java/org/apache/sentry/provider/cache/PrivilegeCache.java b/sentry-provider/sentry-provider-cache/src/main/java/org/apache/sentry/provider/cache/PrivilegeCache.java index 29c6c5cf2..811b9310f 100644 --- a/sentry-provider/sentry-provider-cache/src/main/java/org/apache/sentry/provider/cache/PrivilegeCache.java +++ b/sentry-provider/sentry-provider-cache/src/main/java/org/apache/sentry/provider/cache/PrivilegeCache.java @@ -26,8 +26,8 @@ public interface PrivilegeCache { * Get the privileges for the give set of groups with the give active roles * from the cache */ - public Set listPrivileges(Set groups, + Set listPrivileges(Set groups, ActiveRoleSet roleSet); - public void close(); + void close(); } diff --git a/sentry-provider/sentry-provider-cache/src/main/java/org/apache/sentry/provider/cache/SimpleCacheProviderBackend.java b/sentry-provider/sentry-provider-cache/src/main/java/org/apache/sentry/provider/cache/SimpleCacheProviderBackend.java index 4b98447b2..73ed6c201 100644 --- a/sentry-provider/sentry-provider-cache/src/main/java/org/apache/sentry/provider/cache/SimpleCacheProviderBackend.java +++ b/sentry-provider/sentry-provider-cache/src/main/java/org/apache/sentry/provider/cache/SimpleCacheProviderBackend.java @@ -31,11 +31,9 @@ public class SimpleCacheProviderBackend implements ProviderBackend { private PrivilegeCache cacheHandle; - private Configuration conf; private boolean isInitialized = false; - public SimpleCacheProviderBackend(Configuration conf, String resourcePath) { - this.conf = conf; + public SimpleCacheProviderBackend(Configuration conf, String resourcePath) { //NOPMD } /** @@ -44,7 +42,9 @@ public SimpleCacheProviderBackend(Configuration conf, String resourcePath) { */ @Override public void initialize(ProviderBackendContext context) { - if (isInitialized) return; + if (isInitialized) { + return; + } isInitialized = true; cacheHandle = (PrivilegeCache) context.getBindingHandle(); assert cacheHandle != null; diff --git a/sentry-provider/sentry-provider-cache/src/main/java/org/apache/sentry/provider/cache/SimplePrivilegeCache.java b/sentry-provider/sentry-provider-cache/src/main/java/org/apache/sentry/provider/cache/SimplePrivilegeCache.java new file mode 100644 index 000000000..2643a3205 --- /dev/null +++ b/sentry-provider/sentry-provider-cache/src/main/java/org/apache/sentry/provider/cache/SimplePrivilegeCache.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.cache; + +import org.apache.sentry.core.common.ActiveRoleSet; + +import java.util.HashSet; +import java.util.Set; + +/* + * The class is used for saving and getting user's privileges when do the hive command like "show tables". + * This will enhance the performance for the hive metadata filter. + */ +public class SimplePrivilegeCache implements PrivilegeCache { + + private Set cachedPrivileges; + + public SimplePrivilegeCache(Set cachedPrivileges) { + this.cachedPrivileges = cachedPrivileges; + } + + // return the cached privileges + @Override + public Set listPrivileges(Set groups, ActiveRoleSet roleSet) { + if (cachedPrivileges == null) { + cachedPrivileges = new HashSet(); + } + return cachedPrivileges; + } + + @Override + public void close() { + if (cachedPrivileges != null) { + cachedPrivileges.clear(); + } + } +} diff --git a/sentry-provider/sentry-provider-cache/src/test/java/org/apache/sentry/provider/cache/TestCacheProvider.java b/sentry-provider/sentry-provider-cache/src/test/java/org/apache/sentry/provider/cache/TestCacheProvider.java index e5b29b8fb..8c267601d 100644 --- a/sentry-provider/sentry-provider-cache/src/test/java/org/apache/sentry/provider/cache/TestCacheProvider.java +++ b/sentry-provider/sentry-provider-cache/src/test/java/org/apache/sentry/provider/cache/TestCacheProvider.java @@ -17,7 +17,7 @@ package org.apache.sentry.provider.cache; -import static junit.framework.Assert.assertEquals; +import static org.junit.Assert.assertEquals; import java.io.IOException; import java.util.HashSet; diff --git a/sentry-provider/sentry-provider-common/pom.xml b/sentry-provider/sentry-provider-common/pom.xml index 15535f171..de5a2c9bb 100644 --- a/sentry-provider/sentry-provider-common/pom.xml +++ b/sentry-provider/sentry-provider-common/pom.xml @@ -21,7 +21,7 @@ limitations under the License. org.apache.sentry sentry-provider - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-provider-common @@ -58,7 +58,7 @@ limitations under the License. org.apache.maven.plugins maven-jar-plugin - 2.2 + 2.4 diff --git a/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/AuthorizationComponent.java b/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/AuthorizationComponent.java index def34865d..c74641a69 100644 --- a/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/AuthorizationComponent.java +++ b/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/AuthorizationComponent.java @@ -21,4 +21,6 @@ */ public class AuthorizationComponent{ public static final String Search = "solr"; + public static final String SQOOP = "sqoop"; + public static final String KAFKA = "kafka"; } diff --git a/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/AuthorizationProvider.java b/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/AuthorizationProvider.java index a88d2f8f3..7141e818e 100644 --- a/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/AuthorizationProvider.java +++ b/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/AuthorizationProvider.java @@ -26,6 +26,7 @@ import org.apache.sentry.core.common.Authorizable; import org.apache.sentry.core.common.SentryConfigurationException; import org.apache.sentry.core.common.Subject; +import org.apache.sentry.policy.common.PolicyEngine; /** * Implementations of AuthorizationProvider must be threadsafe. @@ -33,7 +34,7 @@ @ThreadSafe public interface AuthorizationProvider { - public static String SENTRY_PROVIDER = "sentry.provider"; + String SENTRY_PROVIDER = "sentry.provider"; /*** * Returns validate subject privileges on given Authorizable object @@ -47,7 +48,7 @@ public interface AuthorizationProvider { * @return * True if the subject is authorized to perform requested action on the given object */ - public boolean hasAccess(Subject subject, List authorizableHierarchy, + boolean hasAccess(Subject subject, List authorizableHierarchy, Set actions, ActiveRoleSet roleSet); /*** @@ -55,14 +56,14 @@ public boolean hasAccess(Subject subject, List authoriza * * @return GroupMappingService used by the AuthorizationProvider */ - public GroupMappingService getGroupMapping(); + GroupMappingService getGroupMapping(); /*** * Validate the policy file format for syntax and semantic errors * @param strictValidation * @throws SentryConfigurationException */ - public void validateResource(boolean strictValidation) throws SentryConfigurationException; + void validateResource(boolean strictValidation) throws SentryConfigurationException; /*** * Returns the list privileges for the given subject @@ -70,7 +71,7 @@ public boolean hasAccess(Subject subject, List authoriza * @return * @throws SentryConfigurationException */ - public Set listPrivilegesForSubject(Subject subject) throws SentryConfigurationException; + Set listPrivilegesForSubject(Subject subject) throws SentryConfigurationException; /** * Returns the list privileges for the given group @@ -78,16 +79,21 @@ public boolean hasAccess(Subject subject, List authoriza * @return * @throws SentryConfigurationException */ - public Set listPrivilegesForGroup(String groupName) throws SentryConfigurationException; + Set listPrivilegesForGroup(String groupName) throws SentryConfigurationException; /*** * Returns the list of missing privileges of the last access request * @return */ - public List getLastFailedPrivileges(); + List getLastFailedPrivileges(); /** * Frees any resources held by the the provider */ - public void close(); + void close(); + + /** + * Get the policy engine + */ + PolicyEngine getPolicyEngine(); } diff --git a/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/GroupMappingService.java b/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/GroupMappingService.java index 22371d182..7e8526107 100644 --- a/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/GroupMappingService.java +++ b/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/GroupMappingService.java @@ -31,5 +31,5 @@ public interface GroupMappingService { /** * @return non-null list of groups for user */ - public Set getGroups(String user); + Set getGroups(String user); } diff --git a/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/HadoopGroupMappingService.java b/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/HadoopGroupMappingService.java index 14e2d05c9..f599dbbc2 100644 --- a/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/HadoopGroupMappingService.java +++ b/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/HadoopGroupMappingService.java @@ -17,19 +17,19 @@ package org.apache.sentry.provider.common; import java.io.IOException; -import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Set; +import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.Groups; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; public class HadoopGroupMappingService implements GroupMappingService { - private static final Logger LOGGER = LoggerFactory - .getLogger(HadoopGroupMappingService.class); + private static Configuration hadoopConf; private final Groups groups; public HadoopGroupMappingService(Groups groups) { @@ -37,16 +37,32 @@ public HadoopGroupMappingService(Groups groups) { } public HadoopGroupMappingService(Configuration conf, String resource) { - this(Groups.getUserToGroupsMappingService(conf)); + if (hadoopConf == null) { + synchronized (HadoopGroupMappingService.class) { + if (hadoopConf == null) { + // clone the current config and add resource path + hadoopConf = new Configuration(); + hadoopConf.addResource(conf); + if (!StringUtils.isEmpty(resource)) { + hadoopConf.addResource(resource); + } + } + } + } + this.groups = Groups.getUserToGroupsMappingService(hadoopConf); } @Override public Set getGroups(String user) { + List groupList = Lists.newArrayList(); try { - return new HashSet(groups.getGroups(user)); + groupList = groups.getGroups(user); } catch (IOException e) { - LOGGER.warn("Unable to obtain groups for " + user, e); + throw new SentryGroupNotFoundException("Unable to obtain groups for " + user, e); + } + if (groupList == null || groupList.isEmpty()) { + throw new SentryGroupNotFoundException("Unable to obtain groups for " + user); } - return Collections.emptySet(); + return new HashSet(groupList); } } diff --git a/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/HadoopGroupResourceAuthorizationProvider.java b/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/HadoopGroupResourceAuthorizationProvider.java index 626fd909c..bcd331230 100644 --- a/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/HadoopGroupResourceAuthorizationProvider.java +++ b/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/HadoopGroupResourceAuthorizationProvider.java @@ -22,24 +22,25 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.Groups; import org.apache.sentry.policy.common.PolicyEngine; -import org.apache.sentry.provider.common.GroupMappingService; -import org.apache.sentry.provider.common.HadoopGroupMappingService; import com.google.common.annotations.VisibleForTesting; public class HadoopGroupResourceAuthorizationProvider extends ResourceAuthorizationProvider { + // if set to true in the Configuration, constructs a new Group object + // for the GroupMappingService rather than using Hadoop's static mapping. + public static final String CONF_PREFIX = HadoopGroupResourceAuthorizationProvider.class.getName(); + public static final String USE_NEW_GROUPS = CONF_PREFIX + ".useNewGroups"; + // resource parameter present so that other AuthorizationProviders (e.g. // LocalGroupResourceAuthorizationProvider) has the same constructor params. public HadoopGroupResourceAuthorizationProvider(String resource, PolicyEngine policy) throws IOException { - this(policy, new HadoopGroupMappingService( - Groups.getUserToGroupsMappingService(new Configuration()))); + this(new Configuration(), resource, policy); } - public HadoopGroupResourceAuthorizationProvider(Configuration conf, String resource, PolicyEngine policy) throws IOException { - this(policy, new HadoopGroupMappingService( - Groups.getUserToGroupsMappingService(conf))); + public HadoopGroupResourceAuthorizationProvider(Configuration conf, String resource, PolicyEngine policy) throws IOException { //NOPMD + this(policy, new HadoopGroupMappingService(getGroups(conf))); } @VisibleForTesting @@ -48,4 +49,11 @@ public HadoopGroupResourceAuthorizationProvider(PolicyEngine policy, super(policy, groupService); } + private static Groups getGroups(Configuration conf) { + if (conf.getBoolean(USE_NEW_GROUPS, false)) { + return new Groups(conf); + } else { + return Groups.getUserToGroupsMappingService(conf); + } + } } diff --git a/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/NoAuthorizationProvider.java b/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/NoAuthorizationProvider.java index a81452747..82b215c9b 100644 --- a/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/NoAuthorizationProvider.java +++ b/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/NoAuthorizationProvider.java @@ -26,6 +26,7 @@ import org.apache.sentry.core.common.Authorizable; import org.apache.sentry.core.common.SentryConfigurationException; import org.apache.sentry.core.common.Subject; +import org.apache.sentry.policy.common.PolicyEngine; public class NoAuthorizationProvider implements AuthorizationProvider { private GroupMappingService noGroupMappingService = new NoGroupMappingService(); @@ -43,7 +44,6 @@ public GroupMappingService getGroupMapping() { @Override public void validateResource(boolean strictValidation) throws SentryConfigurationException { - return; } @Override @@ -67,4 +67,11 @@ public List getLastFailedPrivileges() { public void close() { } + + // the class is only for the test TestNoAuthorizationProvider. this method won't be called, + // just for override. Return null has no problem here. + @Override + public PolicyEngine getPolicyEngine() { + return null; + } } diff --git a/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/PolicyFileConstants.java b/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/PolicyFileConstants.java new file mode 100644 index 000000000..dfe4fe075 --- /dev/null +++ b/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/PolicyFileConstants.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.common; + +public class PolicyFileConstants { + public static final String DATABASES = "databases"; + public static final String GROUPS = "groups"; + public static final String ROLES = "roles"; + public static final String USERS = "users"; + public static final String PRIVILEGE_SERVER_NAME = "server"; + public static final String PRIVILEGE_DATABASE_NAME = "db"; + public static final String PRIVILEGE_TABLE_NAME = "table"; + public static final String PRIVILEGE_COLUMN_NAME = "column"; + public static final String PRIVILEGE_URI_NAME = "uri"; + public static final String PRIVILEGE_ACTION_NAME = "action"; + public static final String PRIVILEGE_GRANT_OPTION_NAME = "grantoption"; +} diff --git a/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/ProviderBackend.java b/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/ProviderBackend.java index ddb9cf9ce..b19a17098 100644 --- a/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/ProviderBackend.java +++ b/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/ProviderBackend.java @@ -42,17 +42,17 @@ public interface ProviderBackend { * that would be backwards incompatible. * @param validators */ - public void initialize(ProviderBackendContext context); + void initialize(ProviderBackendContext context); /** * Get the privileges from the backend. */ - public ImmutableSet getPrivileges(Set groups, ActiveRoleSet roleSet, Authorizable... authorizableHierarchy); + ImmutableSet getPrivileges(Set groups, ActiveRoleSet roleSet, Authorizable... authorizableHierarchy); /** * Get the roles associated with the groups from the backend. */ - public ImmutableSet getRoles(Set groups, ActiveRoleSet roleSet); + ImmutableSet getRoles(Set groups, ActiveRoleSet roleSet); /** * If strictValidation is true then an error is thrown for warnings @@ -61,7 +61,7 @@ public interface ProviderBackend { * @param strictValidation * @throws SentryConfigurationException */ - public void validatePolicy(boolean strictValidation) throws SentryConfigurationException; + void validatePolicy(boolean strictValidation) throws SentryConfigurationException; - public void close(); + void close(); } diff --git a/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/ResourceAuthorizationProvider.java b/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/ResourceAuthorizationProvider.java index 6449405da..0cf0b5de4 100644 --- a/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/ResourceAuthorizationProvider.java +++ b/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/ResourceAuthorizationProvider.java @@ -16,10 +16,10 @@ */ package org.apache.sentry.provider.common; -import static org.apache.sentry.provider.common.ProviderConstants.AUTHORIZABLE_JOINER; -import static org.apache.sentry.provider.common.ProviderConstants.AUTHORIZABLE_SPLITTER; -import static org.apache.sentry.provider.common.ProviderConstants.KV_JOINER; -import static org.apache.sentry.provider.common.ProviderConstants.PRIVILEGE_NAME; +import static org.apache.sentry.policy.common.PolicyConstants.AUTHORIZABLE_JOINER; +import static org.apache.sentry.policy.common.PolicyConstants.AUTHORIZABLE_SPLITTER; +import static org.apache.sentry.policy.common.PolicyConstants.KV_JOINER; +import static org.apache.sentry.policy.common.PolicyConstants.PRIVILEGE_NAME; import java.util.ArrayList; import java.util.HashSet; @@ -47,23 +47,23 @@ public abstract class ResourceAuthorizationProvider implements AuthorizationProvider { private static final Logger LOGGER = LoggerFactory .getLogger(ResourceAuthorizationProvider.class); + private final static ThreadLocal> lastFailedPrivileges = + new ThreadLocal>() { + @Override + protected List initialValue() { + return new ArrayList(); + } + }; private final GroupMappingService groupService; private final PolicyEngine policy; private final PrivilegeFactory privilegeFactory; - private final ThreadLocal> lastFailedPrivileges; public ResourceAuthorizationProvider(PolicyEngine policy, GroupMappingService groupService) { this.policy = policy; this.groupService = groupService; this.privilegeFactory = policy.getPrivilegeFactory(); - this.lastFailedPrivileges = new ThreadLocal>() { - @Override - protected List initialValue() { - return new ArrayList(); - } - }; } /*** @@ -109,7 +109,7 @@ private boolean doHasAccess(Subject subject, * Does the permission granted in the policy file imply the requested action? */ boolean result = permission.implies(privilegeFactory.createPrivilege(requestPrivilege)); - if(LOGGER.isDebugEnabled()) { + if (LOGGER.isDebugEnabled()) { LOGGER.debug("ProviderPrivilege {}, RequestPrivilege {}, RoleSet, {}, Result {}", new Object[]{ permission, requestPrivilege, roleSet, result}); } @@ -135,23 +135,22 @@ public Privilege apply(String privilege) { private ImmutableSet appendDefaultDBPriv(ImmutableSet privileges, Authorizable[] authorizables) { // Only for switch db - if ((authorizables != null)&&(authorizables.length == 4)&&(authorizables[2].getName().equals("+"))) { - if ((privileges.size() == 1) && hasOnlyServerPrivilege(privileges.asList().get(0))) { - // Assuming authorizable[0] will always be the server - // This Code is only reachable only when user fires a 'use default' - // and the user has a privilege on atleast 1 privilized Object - String defaultPriv = "Server=" + authorizables[0].getName() - + "->Db=default->Table=*->Column=*->action=select"; - HashSet newPrivs = Sets.newHashSet(defaultPriv); - return ImmutableSet.copyOf(newPrivs); - } + if (authorizables != null && authorizables.length == 4 && authorizables[2].getName().equals("+") + && privileges.size() == 1 && hasOnlyServerPrivilege(privileges.asList().get(0))) { + // Assuming authorizable[0] will always be the server + // This Code is only reachable only when user fires a 'use default' + // and the user has a privilege on atleast 1 privilized Object + String defaultPriv = "Server=" + authorizables[0].getName() + + "->Db=default->Table=*->Column=*->action=select"; + Set newPrivs = Sets.newHashSet(defaultPriv); + return ImmutableSet.copyOf(newPrivs); } return privileges; } private boolean hasOnlyServerPrivilege(String priv) { ArrayList l = Lists.newArrayList(AUTHORIZABLE_SPLITTER.split(priv)); - if ((l.size() == 1)&&(l.get(0).toLowerCase().startsWith("server"))) { + if (l.size() == 1 && l.get(0).toLowerCase().startsWith("server")) { return l.get(0).toLowerCase().split("=")[1].endsWith("+"); } return false; @@ -173,12 +172,12 @@ public void validateResource(boolean strictValidation) throws SentryConfiguratio @Override public Set listPrivilegesForSubject(Subject subject) throws SentryConfigurationException { - return policy.getPrivileges(getGroups(subject), ActiveRoleSet.ALL, null); + return policy.getPrivileges(getGroups(subject), ActiveRoleSet.ALL); } @Override public Set listPrivilegesForGroup(String groupName) throws SentryConfigurationException { - return policy.getPrivileges(Sets.newHashSet(groupName), ActiveRoleSet.ALL, null); + return policy.getPrivileges(Sets.newHashSet(groupName), ActiveRoleSet.ALL); } @Override @@ -210,4 +209,9 @@ private List buildPermissions(List authorizables } return requestedPermissions; } + + @Override + public PolicyEngine getPolicyEngine() { + return policy; + } } diff --git a/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/SentryGroupNotFoundException.java b/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/SentryGroupNotFoundException.java new file mode 100644 index 000000000..2609bd366 --- /dev/null +++ b/sentry-provider/sentry-provider-common/src/main/java/org/apache/sentry/provider/common/SentryGroupNotFoundException.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.provider.common; + +public class SentryGroupNotFoundException extends RuntimeException { + private static final long serialVersionUID = -116202866086371881L; + + /** + * Creates a new SentryGroupNotFoundException. + */ + public SentryGroupNotFoundException() { + super(); + } + + /** + * Constructs a new SentryGroupNotFoundException. + * + * @param message + * the reason for the exception + */ + public SentryGroupNotFoundException(String message) { + super(message); + } + + /** + * Constructs a new SentryGroupNotFoundException. + * + * @param cause + * the underlying Throwable that caused this exception to be thrown. + */ + public SentryGroupNotFoundException(Throwable cause) { + super(cause); + } + + /** + * Constructs a new SentryGroupNotFoundException. + * + * @param message + * the reason for the exception + * @param cause + * the underlying Throwable that caused this exception to be thrown. + */ + public SentryGroupNotFoundException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/sentry-provider/sentry-provider-common/src/test/java/org/apache/sentry/provider/common/TestGetGroupMapping.java b/sentry-provider/sentry-provider-common/src/test/java/org/apache/sentry/provider/common/TestGetGroupMapping.java index f57198a55..14af2d49f 100644 --- a/sentry-provider/sentry-provider-common/src/test/java/org/apache/sentry/provider/common/TestGetGroupMapping.java +++ b/sentry-provider/sentry-provider-common/src/test/java/org/apache/sentry/provider/common/TestGetGroupMapping.java @@ -19,15 +19,12 @@ import static org.junit.Assert.assertSame; import java.util.Set; -import java.util.List; import org.apache.sentry.core.common.Authorizable; import org.apache.sentry.core.common.SentryConfigurationException; import org.apache.sentry.core.common.ActiveRoleSet; import org.apache.sentry.policy.common.PrivilegeFactory; import org.apache.sentry.policy.common.PolicyEngine; -import org.apache.sentry.provider.common.GroupMappingService; -import org.apache.sentry.provider.common.ResourceAuthorizationProvider; import org.junit.Test; import com.google.common.collect.ImmutableSet; @@ -67,7 +64,6 @@ public ImmutableSet getPrivileges(Set groups, ActiveRoleSet role @Override public void validatePolicy(boolean strictValidation) throws SentryConfigurationException { - return; } @Override diff --git a/sentry-provider/sentry-provider-db/.gitignore b/sentry-provider/sentry-provider-db/.gitignore deleted file mode 100644 index a2f1f9626..000000000 --- a/sentry-provider/sentry-provider-db/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -sentry_policy_db -/target diff --git a/sentry-provider/sentry-provider-db/pom.xml b/sentry-provider/sentry-provider-db/pom.xml index 9f47b29c3..bf4dfdc1d 100644 --- a/sentry-provider/sentry-provider-db/pom.xml +++ b/sentry-provider/sentry-provider-db/pom.xml @@ -21,7 +21,7 @@ limitations under the License. org.apache.sentry sentry-provider - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-provider-db @@ -86,6 +86,14 @@ limitations under the License. org.apache.sentry sentry-core-model-search + + org.apache.sentry + sentry-core-model-sqoop + + + org.apache.sentry + sentry-core-model-kafka + org.apache.sentry sentry-provider-common @@ -96,8 +104,8 @@ limitations under the License. test - org.apache.hive - hive-exec + org.apache.sentry + sentry-policy-search org.apache.hive @@ -142,17 +150,17 @@ limitations under the License. datanucleus-rdbms - io.dropwizard.metrics + com.codahale.metrics metrics-core ${metrics.version} - io.dropwizard.metrics + com.codahale.metrics metrics-servlets ${metrics.version} - io.dropwizard.metrics + com.codahale.metrics metrics-jvm ${metrics.version} @@ -171,11 +179,6 @@ limitations under the License. mockito-all test - - org.apache.hive - hive-metastore - ${hive.version} - org.apache.curator curator-recipes @@ -188,6 +191,10 @@ limitations under the License. org.apache.curator curator-test + + org.apache.commons + commons-pool2 + @@ -200,6 +207,13 @@ limitations under the License. package.jdo + + ${basedir}/src/main + + webapp/* + webapp/css/* + + @@ -237,6 +251,24 @@ limitations under the License. + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + false + + diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericPolicyService.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericPolicyService.java index b42159852..5090fcaaa 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericPolicyService.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericPolicyService.java @@ -53,6 +53,8 @@ public interface Iface { public TListSentryPrivilegesForProviderResponse list_sentry_privileges_for_provider(TListSentryPrivilegesForProviderRequest request) throws org.apache.thrift.TException; + public TListSentryPrivilegesByAuthResponse list_sentry_privileges_by_authorizable(TListSentryPrivilegesByAuthRequest request) throws org.apache.thrift.TException; + public TDropPrivilegesResponse drop_sentry_privilege(TDropPrivilegesRequest request) throws org.apache.thrift.TException; public TRenamePrivilegesResponse rename_sentry_privilege(TRenamePrivilegesRequest request) throws org.apache.thrift.TException; @@ -79,6 +81,8 @@ public interface AsyncIface { public void list_sentry_privileges_for_provider(TListSentryPrivilegesForProviderRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void list_sentry_privileges_by_authorizable(TListSentryPrivilegesByAuthRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void drop_sentry_privilege(TDropPrivilegesRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void rename_sentry_privilege(TRenamePrivilegesRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -312,6 +316,29 @@ public TListSentryPrivilegesForProviderResponse recv_list_sentry_privileges_for_ throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "list_sentry_privileges_for_provider failed: unknown result"); } + public TListSentryPrivilegesByAuthResponse list_sentry_privileges_by_authorizable(TListSentryPrivilegesByAuthRequest request) throws org.apache.thrift.TException + { + send_list_sentry_privileges_by_authorizable(request); + return recv_list_sentry_privileges_by_authorizable(); + } + + public void send_list_sentry_privileges_by_authorizable(TListSentryPrivilegesByAuthRequest request) throws org.apache.thrift.TException + { + list_sentry_privileges_by_authorizable_args args = new list_sentry_privileges_by_authorizable_args(); + args.setRequest(request); + sendBase("list_sentry_privileges_by_authorizable", args); + } + + public TListSentryPrivilegesByAuthResponse recv_list_sentry_privileges_by_authorizable() throws org.apache.thrift.TException + { + list_sentry_privileges_by_authorizable_result result = new list_sentry_privileges_by_authorizable_result(); + receiveBase(result, "list_sentry_privileges_by_authorizable"); + if (result.isSetSuccess()) { + return result.success; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "list_sentry_privileges_by_authorizable failed: unknown result"); + } + public TDropPrivilegesResponse drop_sentry_privilege(TDropPrivilegesRequest request) throws org.apache.thrift.TException { send_drop_sentry_privilege(request); @@ -664,6 +691,38 @@ public TListSentryPrivilegesForProviderResponse getResult() throws org.apache.th } } + public void list_sentry_privileges_by_authorizable(TListSentryPrivilegesByAuthRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + list_sentry_privileges_by_authorizable_call method_call = new list_sentry_privileges_by_authorizable_call(request, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class list_sentry_privileges_by_authorizable_call extends org.apache.thrift.async.TAsyncMethodCall { + private TListSentryPrivilegesByAuthRequest request; + public list_sentry_privileges_by_authorizable_call(TListSentryPrivilegesByAuthRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.request = request; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("list_sentry_privileges_by_authorizable", org.apache.thrift.protocol.TMessageType.CALL, 0)); + list_sentry_privileges_by_authorizable_args args = new list_sentry_privileges_by_authorizable_args(); + args.setRequest(request); + args.write(prot); + prot.writeMessageEnd(); + } + + public TListSentryPrivilegesByAuthResponse getResult() throws org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_list_sentry_privileges_by_authorizable(); + } + } + public void drop_sentry_privilege(TDropPrivilegesRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); drop_sentry_privilege_call method_call = new drop_sentry_privilege_call(request, resultHandler, this, ___protocolFactory, ___transport); @@ -750,6 +809,7 @@ protected Processor(I iface, Map extends org.apache.thrift.ProcessFunction { + public list_sentry_privileges_by_authorizable() { + super("list_sentry_privileges_by_authorizable"); + } + + public list_sentry_privileges_by_authorizable_args getEmptyArgsInstance() { + return new list_sentry_privileges_by_authorizable_args(); + } + + protected boolean isOneway() { + return false; + } + + public list_sentry_privileges_by_authorizable_result getResult(I iface, list_sentry_privileges_by_authorizable_args args) throws org.apache.thrift.TException { + list_sentry_privileges_by_authorizable_result result = new list_sentry_privileges_by_authorizable_result(); + result.success = iface.list_sentry_privileges_by_authorizable(args.request); + return result; + } + } + public static class drop_sentry_privilege extends org.apache.thrift.ProcessFunction { public drop_sentry_privilege() { super("drop_sentry_privilege"); @@ -7511,6 +7591,732 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_sentry_privileg } + public static class list_sentry_privileges_by_authorizable_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("list_sentry_privileges_by_authorizable_args"); + + private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new list_sentry_privileges_by_authorizable_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new list_sentry_privileges_by_authorizable_argsTupleSchemeFactory()); + } + + private TListSentryPrivilegesByAuthRequest request; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + REQUEST((short)1, "request"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // REQUEST + return REQUEST; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TListSentryPrivilegesByAuthRequest.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(list_sentry_privileges_by_authorizable_args.class, metaDataMap); + } + + public list_sentry_privileges_by_authorizable_args() { + } + + public list_sentry_privileges_by_authorizable_args( + TListSentryPrivilegesByAuthRequest request) + { + this(); + this.request = request; + } + + /** + * Performs a deep copy on other. + */ + public list_sentry_privileges_by_authorizable_args(list_sentry_privileges_by_authorizable_args other) { + if (other.isSetRequest()) { + this.request = new TListSentryPrivilegesByAuthRequest(other.request); + } + } + + public list_sentry_privileges_by_authorizable_args deepCopy() { + return new list_sentry_privileges_by_authorizable_args(this); + } + + @Override + public void clear() { + this.request = null; + } + + public TListSentryPrivilegesByAuthRequest getRequest() { + return this.request; + } + + public void setRequest(TListSentryPrivilegesByAuthRequest request) { + this.request = request; + } + + public void unsetRequest() { + this.request = null; + } + + /** Returns true if field request is set (has been assigned a value) and false otherwise */ + public boolean isSetRequest() { + return this.request != null; + } + + public void setRequestIsSet(boolean value) { + if (!value) { + this.request = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case REQUEST: + if (value == null) { + unsetRequest(); + } else { + setRequest((TListSentryPrivilegesByAuthRequest)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case REQUEST: + return getRequest(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case REQUEST: + return isSetRequest(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof list_sentry_privileges_by_authorizable_args) + return this.equals((list_sentry_privileges_by_authorizable_args)that); + return false; + } + + public boolean equals(list_sentry_privileges_by_authorizable_args that) { + if (that == null) + return false; + + boolean this_present_request = true && this.isSetRequest(); + boolean that_present_request = true && that.isSetRequest(); + if (this_present_request || that_present_request) { + if (!(this_present_request && that_present_request)) + return false; + if (!this.request.equals(that.request)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_request = true && (isSetRequest()); + builder.append(present_request); + if (present_request) + builder.append(request); + + return builder.toHashCode(); + } + + public int compareTo(list_sentry_privileges_by_authorizable_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + list_sentry_privileges_by_authorizable_args typedOther = (list_sentry_privileges_by_authorizable_args)other; + + lastComparison = Boolean.valueOf(isSetRequest()).compareTo(typedOther.isSetRequest()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRequest()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.request, typedOther.request); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("list_sentry_privileges_by_authorizable_args("); + boolean first = true; + + sb.append("request:"); + if (this.request == null) { + sb.append("null"); + } else { + sb.append(this.request); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (request != null) { + request.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class list_sentry_privileges_by_authorizable_argsStandardSchemeFactory implements SchemeFactory { + public list_sentry_privileges_by_authorizable_argsStandardScheme getScheme() { + return new list_sentry_privileges_by_authorizable_argsStandardScheme(); + } + } + + private static class list_sentry_privileges_by_authorizable_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, list_sentry_privileges_by_authorizable_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // REQUEST + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.request = new TListSentryPrivilegesByAuthRequest(); + struct.request.read(iprot); + struct.setRequestIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, list_sentry_privileges_by_authorizable_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.request != null) { + oprot.writeFieldBegin(REQUEST_FIELD_DESC); + struct.request.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class list_sentry_privileges_by_authorizable_argsTupleSchemeFactory implements SchemeFactory { + public list_sentry_privileges_by_authorizable_argsTupleScheme getScheme() { + return new list_sentry_privileges_by_authorizable_argsTupleScheme(); + } + } + + private static class list_sentry_privileges_by_authorizable_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, list_sentry_privileges_by_authorizable_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetRequest()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetRequest()) { + struct.request.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, list_sentry_privileges_by_authorizable_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.request = new TListSentryPrivilegesByAuthRequest(); + struct.request.read(iprot); + struct.setRequestIsSet(true); + } + } + } + + } + + public static class list_sentry_privileges_by_authorizable_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("list_sentry_privileges_by_authorizable_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new list_sentry_privileges_by_authorizable_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new list_sentry_privileges_by_authorizable_resultTupleSchemeFactory()); + } + + private TListSentryPrivilegesByAuthResponse success; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TListSentryPrivilegesByAuthResponse.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(list_sentry_privileges_by_authorizable_result.class, metaDataMap); + } + + public list_sentry_privileges_by_authorizable_result() { + } + + public list_sentry_privileges_by_authorizable_result( + TListSentryPrivilegesByAuthResponse success) + { + this(); + this.success = success; + } + + /** + * Performs a deep copy on other. + */ + public list_sentry_privileges_by_authorizable_result(list_sentry_privileges_by_authorizable_result other) { + if (other.isSetSuccess()) { + this.success = new TListSentryPrivilegesByAuthResponse(other.success); + } + } + + public list_sentry_privileges_by_authorizable_result deepCopy() { + return new list_sentry_privileges_by_authorizable_result(this); + } + + @Override + public void clear() { + this.success = null; + } + + public TListSentryPrivilegesByAuthResponse getSuccess() { + return this.success; + } + + public void setSuccess(TListSentryPrivilegesByAuthResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((TListSentryPrivilegesByAuthResponse)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof list_sentry_privileges_by_authorizable_result) + return this.equals((list_sentry_privileges_by_authorizable_result)that); + return false; + } + + public boolean equals(list_sentry_privileges_by_authorizable_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true && (isSetSuccess()); + builder.append(present_success); + if (present_success) + builder.append(success); + + return builder.toHashCode(); + } + + public int compareTo(list_sentry_privileges_by_authorizable_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + list_sentry_privileges_by_authorizable_result typedOther = (list_sentry_privileges_by_authorizable_result)other; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("list_sentry_privileges_by_authorizable_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class list_sentry_privileges_by_authorizable_resultStandardSchemeFactory implements SchemeFactory { + public list_sentry_privileges_by_authorizable_resultStandardScheme getScheme() { + return new list_sentry_privileges_by_authorizable_resultStandardScheme(); + } + } + + private static class list_sentry_privileges_by_authorizable_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, list_sentry_privileges_by_authorizable_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new TListSentryPrivilegesByAuthResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, list_sentry_privileges_by_authorizable_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class list_sentry_privileges_by_authorizable_resultTupleSchemeFactory implements SchemeFactory { + public list_sentry_privileges_by_authorizable_resultTupleScheme getScheme() { + return new list_sentry_privileges_by_authorizable_resultTupleScheme(); + } + } + + private static class list_sentry_privileges_by_authorizable_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, list_sentry_privileges_by_authorizable_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, list_sentry_privileges_by_authorizable_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.success = new TListSentryPrivilegesByAuthResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + } + } + + } + public static class drop_sentry_privilege_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_sentry_privilege_args"); diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TAlterSentryRoleAddGroupsRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TAlterSentryRoleAddGroupsRequest.java index a0c30fec7..330d37c95 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TAlterSentryRoleAddGroupsRequest.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TAlterSentryRoleAddGroupsRequest.java @@ -144,7 +144,7 @@ public String getFieldName() { } public TAlterSentryRoleAddGroupsRequest() { - this.protocol_version = 1; + this.protocol_version = 2; } @@ -194,7 +194,7 @@ public TAlterSentryRoleAddGroupsRequest deepCopy() { @Override public void clear() { - this.protocol_version = 1; + this.protocol_version = 2; this.requestorUserName = null; this.roleName = null; diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TAlterSentryRoleDeleteGroupsRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TAlterSentryRoleDeleteGroupsRequest.java index 156688cc4..e7b65cdbf 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TAlterSentryRoleDeleteGroupsRequest.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TAlterSentryRoleDeleteGroupsRequest.java @@ -144,7 +144,7 @@ public String getFieldName() { } public TAlterSentryRoleDeleteGroupsRequest() { - this.protocol_version = 1; + this.protocol_version = 2; } @@ -194,7 +194,7 @@ public TAlterSentryRoleDeleteGroupsRequest deepCopy() { @Override public void clear() { - this.protocol_version = 1; + this.protocol_version = 2; this.requestorUserName = null; this.roleName = null; diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TAlterSentryRoleGrantPrivilegeRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TAlterSentryRoleGrantPrivilegeRequest.java index 51e10171f..4e245a354 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TAlterSentryRoleGrantPrivilegeRequest.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TAlterSentryRoleGrantPrivilegeRequest.java @@ -143,7 +143,7 @@ public String getFieldName() { } public TAlterSentryRoleGrantPrivilegeRequest() { - this.protocol_version = 1; + this.protocol_version = 2; } @@ -189,7 +189,7 @@ public TAlterSentryRoleGrantPrivilegeRequest deepCopy() { @Override public void clear() { - this.protocol_version = 1; + this.protocol_version = 2; this.requestorUserName = null; this.roleName = null; diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TAlterSentryRoleRevokePrivilegeRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TAlterSentryRoleRevokePrivilegeRequest.java index 07b155fb1..e9e06ace4 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TAlterSentryRoleRevokePrivilegeRequest.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TAlterSentryRoleRevokePrivilegeRequest.java @@ -143,7 +143,7 @@ public String getFieldName() { } public TAlterSentryRoleRevokePrivilegeRequest() { - this.protocol_version = 1; + this.protocol_version = 2; } @@ -189,7 +189,7 @@ public TAlterSentryRoleRevokePrivilegeRequest deepCopy() { @Override public void clear() { - this.protocol_version = 1; + this.protocol_version = 2; this.requestorUserName = null; this.roleName = null; diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TCreateSentryRoleRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TCreateSentryRoleRequest.java index 07f0ecab9..824361d7b 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TCreateSentryRoleRequest.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TCreateSentryRoleRequest.java @@ -136,7 +136,7 @@ public String getFieldName() { } public TCreateSentryRoleRequest() { - this.protocol_version = 1; + this.protocol_version = 2; } @@ -177,7 +177,7 @@ public TCreateSentryRoleRequest deepCopy() { @Override public void clear() { - this.protocol_version = 1; + this.protocol_version = 2; this.requestorUserName = null; this.roleName = null; diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TDropPrivilegesRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TDropPrivilegesRequest.java index 26b136aca..667be2ef6 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TDropPrivilegesRequest.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TDropPrivilegesRequest.java @@ -136,7 +136,7 @@ public String getFieldName() { } public TDropPrivilegesRequest() { - this.protocol_version = 1; + this.protocol_version = 2; } @@ -177,7 +177,7 @@ public TDropPrivilegesRequest deepCopy() { @Override public void clear() { - this.protocol_version = 1; + this.protocol_version = 2; this.requestorUserName = null; this.privilege = null; diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TDropSentryRoleRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TDropSentryRoleRequest.java index 69585424c..1e0c99709 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TDropSentryRoleRequest.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TDropSentryRoleRequest.java @@ -136,7 +136,7 @@ public String getFieldName() { } public TDropSentryRoleRequest() { - this.protocol_version = 1; + this.protocol_version = 2; } @@ -177,7 +177,7 @@ public TDropSentryRoleRequest deepCopy() { @Override public void clear() { - this.protocol_version = 1; + this.protocol_version = 2; this.requestorUserName = null; this.roleName = null; diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TListSentryPrivilegesByAuthRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TListSentryPrivilegesByAuthRequest.java new file mode 100644 index 000000000..7a341e43e --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TListSentryPrivilegesByAuthRequest.java @@ -0,0 +1,1114 @@ +/** + * Autogenerated by Thrift Compiler (0.9.0) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.sentry.provider.db.generic.service.thrift; + +import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TListSentryPrivilegesByAuthRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TListSentryPrivilegesByAuthRequest"); + + private static final org.apache.thrift.protocol.TField PROTOCOL_VERSION_FIELD_DESC = new org.apache.thrift.protocol.TField("protocol_version", org.apache.thrift.protocol.TType.I32, (short)1); + private static final org.apache.thrift.protocol.TField REQUESTOR_USER_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("requestorUserName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField COMPONENT_FIELD_DESC = new org.apache.thrift.protocol.TField("component", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField SERVICE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("serviceName", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField AUTHORIZABLES_SET_FIELD_DESC = new org.apache.thrift.protocol.TField("authorizablesSet", org.apache.thrift.protocol.TType.SET, (short)5); + private static final org.apache.thrift.protocol.TField GROUPS_FIELD_DESC = new org.apache.thrift.protocol.TField("groups", org.apache.thrift.protocol.TType.SET, (short)6); + private static final org.apache.thrift.protocol.TField ROLE_SET_FIELD_DESC = new org.apache.thrift.protocol.TField("roleSet", org.apache.thrift.protocol.TType.STRUCT, (short)7); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new TListSentryPrivilegesByAuthRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new TListSentryPrivilegesByAuthRequestTupleSchemeFactory()); + } + + private int protocol_version; // required + private String requestorUserName; // required + private String component; // required + private String serviceName; // required + private Set authorizablesSet; // required + private Set groups; // optional + private TSentryActiveRoleSet roleSet; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + PROTOCOL_VERSION((short)1, "protocol_version"), + REQUESTOR_USER_NAME((short)2, "requestorUserName"), + COMPONENT((short)3, "component"), + SERVICE_NAME((short)4, "serviceName"), + AUTHORIZABLES_SET((short)5, "authorizablesSet"), + GROUPS((short)6, "groups"), + ROLE_SET((short)7, "roleSet"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // PROTOCOL_VERSION + return PROTOCOL_VERSION; + case 2: // REQUESTOR_USER_NAME + return REQUESTOR_USER_NAME; + case 3: // COMPONENT + return COMPONENT; + case 4: // SERVICE_NAME + return SERVICE_NAME; + case 5: // AUTHORIZABLES_SET + return AUTHORIZABLES_SET; + case 6: // GROUPS + return GROUPS; + case 7: // ROLE_SET + return ROLE_SET; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __PROTOCOL_VERSION_ISSET_ID = 0; + private byte __isset_bitfield = 0; + private _Fields optionals[] = {_Fields.GROUPS,_Fields.ROLE_SET}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.PROTOCOL_VERSION, new org.apache.thrift.meta_data.FieldMetaData("protocol_version", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + tmpMap.put(_Fields.REQUESTOR_USER_NAME, new org.apache.thrift.meta_data.FieldMetaData("requestorUserName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.COMPONENT, new org.apache.thrift.meta_data.FieldMetaData("component", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.SERVICE_NAME, new org.apache.thrift.meta_data.FieldMetaData("serviceName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.AUTHORIZABLES_SET, new org.apache.thrift.meta_data.FieldMetaData("authorizablesSet", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.SetMetaData(org.apache.thrift.protocol.TType.SET, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.GROUPS, new org.apache.thrift.meta_data.FieldMetaData("groups", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.SetMetaData(org.apache.thrift.protocol.TType.SET, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.ROLE_SET, new org.apache.thrift.meta_data.FieldMetaData("roleSet", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSentryActiveRoleSet.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TListSentryPrivilegesByAuthRequest.class, metaDataMap); + } + + public TListSentryPrivilegesByAuthRequest() { + this.protocol_version = 2; + + } + + public TListSentryPrivilegesByAuthRequest( + int protocol_version, + String requestorUserName, + String component, + String serviceName, + Set authorizablesSet) + { + this(); + this.protocol_version = protocol_version; + setProtocol_versionIsSet(true); + this.requestorUserName = requestorUserName; + this.component = component; + this.serviceName = serviceName; + this.authorizablesSet = authorizablesSet; + } + + /** + * Performs a deep copy on other. + */ + public TListSentryPrivilegesByAuthRequest(TListSentryPrivilegesByAuthRequest other) { + __isset_bitfield = other.__isset_bitfield; + this.protocol_version = other.protocol_version; + if (other.isSetRequestorUserName()) { + this.requestorUserName = other.requestorUserName; + } + if (other.isSetComponent()) { + this.component = other.component; + } + if (other.isSetServiceName()) { + this.serviceName = other.serviceName; + } + if (other.isSetAuthorizablesSet()) { + Set __this__authorizablesSet = new HashSet(); + for (String other_element : other.authorizablesSet) { + __this__authorizablesSet.add(other_element); + } + this.authorizablesSet = __this__authorizablesSet; + } + if (other.isSetGroups()) { + Set __this__groups = new HashSet(); + for (String other_element : other.groups) { + __this__groups.add(other_element); + } + this.groups = __this__groups; + } + if (other.isSetRoleSet()) { + this.roleSet = new TSentryActiveRoleSet(other.roleSet); + } + } + + public TListSentryPrivilegesByAuthRequest deepCopy() { + return new TListSentryPrivilegesByAuthRequest(this); + } + + @Override + public void clear() { + this.protocol_version = 2; + + this.requestorUserName = null; + this.component = null; + this.serviceName = null; + this.authorizablesSet = null; + this.groups = null; + this.roleSet = null; + } + + public int getProtocol_version() { + return this.protocol_version; + } + + public void setProtocol_version(int protocol_version) { + this.protocol_version = protocol_version; + setProtocol_versionIsSet(true); + } + + public void unsetProtocol_version() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __PROTOCOL_VERSION_ISSET_ID); + } + + /** Returns true if field protocol_version is set (has been assigned a value) and false otherwise */ + public boolean isSetProtocol_version() { + return EncodingUtils.testBit(__isset_bitfield, __PROTOCOL_VERSION_ISSET_ID); + } + + public void setProtocol_versionIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __PROTOCOL_VERSION_ISSET_ID, value); + } + + public String getRequestorUserName() { + return this.requestorUserName; + } + + public void setRequestorUserName(String requestorUserName) { + this.requestorUserName = requestorUserName; + } + + public void unsetRequestorUserName() { + this.requestorUserName = null; + } + + /** Returns true if field requestorUserName is set (has been assigned a value) and false otherwise */ + public boolean isSetRequestorUserName() { + return this.requestorUserName != null; + } + + public void setRequestorUserNameIsSet(boolean value) { + if (!value) { + this.requestorUserName = null; + } + } + + public String getComponent() { + return this.component; + } + + public void setComponent(String component) { + this.component = component; + } + + public void unsetComponent() { + this.component = null; + } + + /** Returns true if field component is set (has been assigned a value) and false otherwise */ + public boolean isSetComponent() { + return this.component != null; + } + + public void setComponentIsSet(boolean value) { + if (!value) { + this.component = null; + } + } + + public String getServiceName() { + return this.serviceName; + } + + public void setServiceName(String serviceName) { + this.serviceName = serviceName; + } + + public void unsetServiceName() { + this.serviceName = null; + } + + /** Returns true if field serviceName is set (has been assigned a value) and false otherwise */ + public boolean isSetServiceName() { + return this.serviceName != null; + } + + public void setServiceNameIsSet(boolean value) { + if (!value) { + this.serviceName = null; + } + } + + public int getAuthorizablesSetSize() { + return (this.authorizablesSet == null) ? 0 : this.authorizablesSet.size(); + } + + public java.util.Iterator getAuthorizablesSetIterator() { + return (this.authorizablesSet == null) ? null : this.authorizablesSet.iterator(); + } + + public void addToAuthorizablesSet(String elem) { + if (this.authorizablesSet == null) { + this.authorizablesSet = new HashSet(); + } + this.authorizablesSet.add(elem); + } + + public Set getAuthorizablesSet() { + return this.authorizablesSet; + } + + public void setAuthorizablesSet(Set authorizablesSet) { + this.authorizablesSet = authorizablesSet; + } + + public void unsetAuthorizablesSet() { + this.authorizablesSet = null; + } + + /** Returns true if field authorizablesSet is set (has been assigned a value) and false otherwise */ + public boolean isSetAuthorizablesSet() { + return this.authorizablesSet != null; + } + + public void setAuthorizablesSetIsSet(boolean value) { + if (!value) { + this.authorizablesSet = null; + } + } + + public int getGroupsSize() { + return (this.groups == null) ? 0 : this.groups.size(); + } + + public java.util.Iterator getGroupsIterator() { + return (this.groups == null) ? null : this.groups.iterator(); + } + + public void addToGroups(String elem) { + if (this.groups == null) { + this.groups = new HashSet(); + } + this.groups.add(elem); + } + + public Set getGroups() { + return this.groups; + } + + public void setGroups(Set groups) { + this.groups = groups; + } + + public void unsetGroups() { + this.groups = null; + } + + /** Returns true if field groups is set (has been assigned a value) and false otherwise */ + public boolean isSetGroups() { + return this.groups != null; + } + + public void setGroupsIsSet(boolean value) { + if (!value) { + this.groups = null; + } + } + + public TSentryActiveRoleSet getRoleSet() { + return this.roleSet; + } + + public void setRoleSet(TSentryActiveRoleSet roleSet) { + this.roleSet = roleSet; + } + + public void unsetRoleSet() { + this.roleSet = null; + } + + /** Returns true if field roleSet is set (has been assigned a value) and false otherwise */ + public boolean isSetRoleSet() { + return this.roleSet != null; + } + + public void setRoleSetIsSet(boolean value) { + if (!value) { + this.roleSet = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case PROTOCOL_VERSION: + if (value == null) { + unsetProtocol_version(); + } else { + setProtocol_version((Integer)value); + } + break; + + case REQUESTOR_USER_NAME: + if (value == null) { + unsetRequestorUserName(); + } else { + setRequestorUserName((String)value); + } + break; + + case COMPONENT: + if (value == null) { + unsetComponent(); + } else { + setComponent((String)value); + } + break; + + case SERVICE_NAME: + if (value == null) { + unsetServiceName(); + } else { + setServiceName((String)value); + } + break; + + case AUTHORIZABLES_SET: + if (value == null) { + unsetAuthorizablesSet(); + } else { + setAuthorizablesSet((Set)value); + } + break; + + case GROUPS: + if (value == null) { + unsetGroups(); + } else { + setGroups((Set)value); + } + break; + + case ROLE_SET: + if (value == null) { + unsetRoleSet(); + } else { + setRoleSet((TSentryActiveRoleSet)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case PROTOCOL_VERSION: + return Integer.valueOf(getProtocol_version()); + + case REQUESTOR_USER_NAME: + return getRequestorUserName(); + + case COMPONENT: + return getComponent(); + + case SERVICE_NAME: + return getServiceName(); + + case AUTHORIZABLES_SET: + return getAuthorizablesSet(); + + case GROUPS: + return getGroups(); + + case ROLE_SET: + return getRoleSet(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case PROTOCOL_VERSION: + return isSetProtocol_version(); + case REQUESTOR_USER_NAME: + return isSetRequestorUserName(); + case COMPONENT: + return isSetComponent(); + case SERVICE_NAME: + return isSetServiceName(); + case AUTHORIZABLES_SET: + return isSetAuthorizablesSet(); + case GROUPS: + return isSetGroups(); + case ROLE_SET: + return isSetRoleSet(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TListSentryPrivilegesByAuthRequest) + return this.equals((TListSentryPrivilegesByAuthRequest)that); + return false; + } + + public boolean equals(TListSentryPrivilegesByAuthRequest that) { + if (that == null) + return false; + + boolean this_present_protocol_version = true; + boolean that_present_protocol_version = true; + if (this_present_protocol_version || that_present_protocol_version) { + if (!(this_present_protocol_version && that_present_protocol_version)) + return false; + if (this.protocol_version != that.protocol_version) + return false; + } + + boolean this_present_requestorUserName = true && this.isSetRequestorUserName(); + boolean that_present_requestorUserName = true && that.isSetRequestorUserName(); + if (this_present_requestorUserName || that_present_requestorUserName) { + if (!(this_present_requestorUserName && that_present_requestorUserName)) + return false; + if (!this.requestorUserName.equals(that.requestorUserName)) + return false; + } + + boolean this_present_component = true && this.isSetComponent(); + boolean that_present_component = true && that.isSetComponent(); + if (this_present_component || that_present_component) { + if (!(this_present_component && that_present_component)) + return false; + if (!this.component.equals(that.component)) + return false; + } + + boolean this_present_serviceName = true && this.isSetServiceName(); + boolean that_present_serviceName = true && that.isSetServiceName(); + if (this_present_serviceName || that_present_serviceName) { + if (!(this_present_serviceName && that_present_serviceName)) + return false; + if (!this.serviceName.equals(that.serviceName)) + return false; + } + + boolean this_present_authorizablesSet = true && this.isSetAuthorizablesSet(); + boolean that_present_authorizablesSet = true && that.isSetAuthorizablesSet(); + if (this_present_authorizablesSet || that_present_authorizablesSet) { + if (!(this_present_authorizablesSet && that_present_authorizablesSet)) + return false; + if (!this.authorizablesSet.equals(that.authorizablesSet)) + return false; + } + + boolean this_present_groups = true && this.isSetGroups(); + boolean that_present_groups = true && that.isSetGroups(); + if (this_present_groups || that_present_groups) { + if (!(this_present_groups && that_present_groups)) + return false; + if (!this.groups.equals(that.groups)) + return false; + } + + boolean this_present_roleSet = true && this.isSetRoleSet(); + boolean that_present_roleSet = true && that.isSetRoleSet(); + if (this_present_roleSet || that_present_roleSet) { + if (!(this_present_roleSet && that_present_roleSet)) + return false; + if (!this.roleSet.equals(that.roleSet)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_protocol_version = true; + builder.append(present_protocol_version); + if (present_protocol_version) + builder.append(protocol_version); + + boolean present_requestorUserName = true && (isSetRequestorUserName()); + builder.append(present_requestorUserName); + if (present_requestorUserName) + builder.append(requestorUserName); + + boolean present_component = true && (isSetComponent()); + builder.append(present_component); + if (present_component) + builder.append(component); + + boolean present_serviceName = true && (isSetServiceName()); + builder.append(present_serviceName); + if (present_serviceName) + builder.append(serviceName); + + boolean present_authorizablesSet = true && (isSetAuthorizablesSet()); + builder.append(present_authorizablesSet); + if (present_authorizablesSet) + builder.append(authorizablesSet); + + boolean present_groups = true && (isSetGroups()); + builder.append(present_groups); + if (present_groups) + builder.append(groups); + + boolean present_roleSet = true && (isSetRoleSet()); + builder.append(present_roleSet); + if (present_roleSet) + builder.append(roleSet); + + return builder.toHashCode(); + } + + public int compareTo(TListSentryPrivilegesByAuthRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + TListSentryPrivilegesByAuthRequest typedOther = (TListSentryPrivilegesByAuthRequest)other; + + lastComparison = Boolean.valueOf(isSetProtocol_version()).compareTo(typedOther.isSetProtocol_version()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetProtocol_version()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.protocol_version, typedOther.protocol_version); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetRequestorUserName()).compareTo(typedOther.isSetRequestorUserName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRequestorUserName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.requestorUserName, typedOther.requestorUserName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetComponent()).compareTo(typedOther.isSetComponent()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetComponent()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.component, typedOther.component); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetServiceName()).compareTo(typedOther.isSetServiceName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetServiceName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.serviceName, typedOther.serviceName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetAuthorizablesSet()).compareTo(typedOther.isSetAuthorizablesSet()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetAuthorizablesSet()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.authorizablesSet, typedOther.authorizablesSet); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetGroups()).compareTo(typedOther.isSetGroups()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetGroups()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.groups, typedOther.groups); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetRoleSet()).compareTo(typedOther.isSetRoleSet()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRoleSet()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.roleSet, typedOther.roleSet); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TListSentryPrivilegesByAuthRequest("); + boolean first = true; + + sb.append("protocol_version:"); + sb.append(this.protocol_version); + first = false; + if (!first) sb.append(", "); + sb.append("requestorUserName:"); + if (this.requestorUserName == null) { + sb.append("null"); + } else { + sb.append(this.requestorUserName); + } + first = false; + if (!first) sb.append(", "); + sb.append("component:"); + if (this.component == null) { + sb.append("null"); + } else { + sb.append(this.component); + } + first = false; + if (!first) sb.append(", "); + sb.append("serviceName:"); + if (this.serviceName == null) { + sb.append("null"); + } else { + sb.append(this.serviceName); + } + first = false; + if (!first) sb.append(", "); + sb.append("authorizablesSet:"); + if (this.authorizablesSet == null) { + sb.append("null"); + } else { + sb.append(this.authorizablesSet); + } + first = false; + if (isSetGroups()) { + if (!first) sb.append(", "); + sb.append("groups:"); + if (this.groups == null) { + sb.append("null"); + } else { + sb.append(this.groups); + } + first = false; + } + if (isSetRoleSet()) { + if (!first) sb.append(", "); + sb.append("roleSet:"); + if (this.roleSet == null) { + sb.append("null"); + } else { + sb.append(this.roleSet); + } + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetProtocol_version()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'protocol_version' is unset! Struct:" + toString()); + } + + if (!isSetRequestorUserName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'requestorUserName' is unset! Struct:" + toString()); + } + + if (!isSetComponent()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'component' is unset! Struct:" + toString()); + } + + if (!isSetServiceName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'serviceName' is unset! Struct:" + toString()); + } + + if (!isSetAuthorizablesSet()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'authorizablesSet' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + if (roleSet != null) { + roleSet.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TListSentryPrivilegesByAuthRequestStandardSchemeFactory implements SchemeFactory { + public TListSentryPrivilegesByAuthRequestStandardScheme getScheme() { + return new TListSentryPrivilegesByAuthRequestStandardScheme(); + } + } + + private static class TListSentryPrivilegesByAuthRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, TListSentryPrivilegesByAuthRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // PROTOCOL_VERSION + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.protocol_version = iprot.readI32(); + struct.setProtocol_versionIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // REQUESTOR_USER_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.requestorUserName = iprot.readString(); + struct.setRequestorUserNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // COMPONENT + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.component = iprot.readString(); + struct.setComponentIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // SERVICE_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.serviceName = iprot.readString(); + struct.setServiceNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // AUTHORIZABLES_SET + if (schemeField.type == org.apache.thrift.protocol.TType.SET) { + { + org.apache.thrift.protocol.TSet _set122 = iprot.readSetBegin(); + struct.authorizablesSet = new HashSet(2*_set122.size); + for (int _i123 = 0; _i123 < _set122.size; ++_i123) + { + String _elem124; // required + _elem124 = iprot.readString(); + struct.authorizablesSet.add(_elem124); + } + iprot.readSetEnd(); + } + struct.setAuthorizablesSetIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 6: // GROUPS + if (schemeField.type == org.apache.thrift.protocol.TType.SET) { + { + org.apache.thrift.protocol.TSet _set125 = iprot.readSetBegin(); + struct.groups = new HashSet(2*_set125.size); + for (int _i126 = 0; _i126 < _set125.size; ++_i126) + { + String _elem127; // required + _elem127 = iprot.readString(); + struct.groups.add(_elem127); + } + iprot.readSetEnd(); + } + struct.setGroupsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 7: // ROLE_SET + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.roleSet = new TSentryActiveRoleSet(); + struct.roleSet.read(iprot); + struct.setRoleSetIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, TListSentryPrivilegesByAuthRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldBegin(PROTOCOL_VERSION_FIELD_DESC); + oprot.writeI32(struct.protocol_version); + oprot.writeFieldEnd(); + if (struct.requestorUserName != null) { + oprot.writeFieldBegin(REQUESTOR_USER_NAME_FIELD_DESC); + oprot.writeString(struct.requestorUserName); + oprot.writeFieldEnd(); + } + if (struct.component != null) { + oprot.writeFieldBegin(COMPONENT_FIELD_DESC); + oprot.writeString(struct.component); + oprot.writeFieldEnd(); + } + if (struct.serviceName != null) { + oprot.writeFieldBegin(SERVICE_NAME_FIELD_DESC); + oprot.writeString(struct.serviceName); + oprot.writeFieldEnd(); + } + if (struct.authorizablesSet != null) { + oprot.writeFieldBegin(AUTHORIZABLES_SET_FIELD_DESC); + { + oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, struct.authorizablesSet.size())); + for (String _iter128 : struct.authorizablesSet) + { + oprot.writeString(_iter128); + } + oprot.writeSetEnd(); + } + oprot.writeFieldEnd(); + } + if (struct.groups != null) { + if (struct.isSetGroups()) { + oprot.writeFieldBegin(GROUPS_FIELD_DESC); + { + oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, struct.groups.size())); + for (String _iter129 : struct.groups) + { + oprot.writeString(_iter129); + } + oprot.writeSetEnd(); + } + oprot.writeFieldEnd(); + } + } + if (struct.roleSet != null) { + if (struct.isSetRoleSet()) { + oprot.writeFieldBegin(ROLE_SET_FIELD_DESC); + struct.roleSet.write(oprot); + oprot.writeFieldEnd(); + } + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TListSentryPrivilegesByAuthRequestTupleSchemeFactory implements SchemeFactory { + public TListSentryPrivilegesByAuthRequestTupleScheme getScheme() { + return new TListSentryPrivilegesByAuthRequestTupleScheme(); + } + } + + private static class TListSentryPrivilegesByAuthRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TListSentryPrivilegesByAuthRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeI32(struct.protocol_version); + oprot.writeString(struct.requestorUserName); + oprot.writeString(struct.component); + oprot.writeString(struct.serviceName); + { + oprot.writeI32(struct.authorizablesSet.size()); + for (String _iter130 : struct.authorizablesSet) + { + oprot.writeString(_iter130); + } + } + BitSet optionals = new BitSet(); + if (struct.isSetGroups()) { + optionals.set(0); + } + if (struct.isSetRoleSet()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetGroups()) { + { + oprot.writeI32(struct.groups.size()); + for (String _iter131 : struct.groups) + { + oprot.writeString(_iter131); + } + } + } + if (struct.isSetRoleSet()) { + struct.roleSet.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TListSentryPrivilegesByAuthRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.protocol_version = iprot.readI32(); + struct.setProtocol_versionIsSet(true); + struct.requestorUserName = iprot.readString(); + struct.setRequestorUserNameIsSet(true); + struct.component = iprot.readString(); + struct.setComponentIsSet(true); + struct.serviceName = iprot.readString(); + struct.setServiceNameIsSet(true); + { + org.apache.thrift.protocol.TSet _set132 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.authorizablesSet = new HashSet(2*_set132.size); + for (int _i133 = 0; _i133 < _set132.size; ++_i133) + { + String _elem134; // required + _elem134 = iprot.readString(); + struct.authorizablesSet.add(_elem134); + } + } + struct.setAuthorizablesSetIsSet(true); + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + { + org.apache.thrift.protocol.TSet _set135 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.groups = new HashSet(2*_set135.size); + for (int _i136 = 0; _i136 < _set135.size; ++_i136) + { + String _elem137; // required + _elem137 = iprot.readString(); + struct.groups.add(_elem137); + } + } + struct.setGroupsIsSet(true); + } + if (incoming.get(1)) { + struct.roleSet = new TSentryActiveRoleSet(); + struct.roleSet.read(iprot); + struct.setRoleSetIsSet(true); + } + } + } + +} + diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TListSentryPrivilegesByAuthResponse.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TListSentryPrivilegesByAuthResponse.java new file mode 100644 index 000000000..5309da1a9 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TListSentryPrivilegesByAuthResponse.java @@ -0,0 +1,565 @@ +/** + * Autogenerated by Thrift Compiler (0.9.0) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.sentry.provider.db.generic.service.thrift; + +import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TListSentryPrivilegesByAuthResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TListSentryPrivilegesByAuthResponse"); + + private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField PRIVILEGES_MAP_BY_AUTH_FIELD_DESC = new org.apache.thrift.protocol.TField("privilegesMapByAuth", org.apache.thrift.protocol.TType.MAP, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new TListSentryPrivilegesByAuthResponseStandardSchemeFactory()); + schemes.put(TupleScheme.class, new TListSentryPrivilegesByAuthResponseTupleSchemeFactory()); + } + + private org.apache.sentry.service.thrift.TSentryResponseStatus status; // required + private Map privilegesMapByAuth; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + STATUS((short)1, "status"), + PRIVILEGES_MAP_BY_AUTH((short)2, "privilegesMapByAuth"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // STATUS + return STATUS; + case 2: // PRIVILEGES_MAP_BY_AUTH + return PRIVILEGES_MAP_BY_AUTH; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private _Fields optionals[] = {_Fields.PRIVILEGES_MAP_BY_AUTH}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, org.apache.sentry.service.thrift.TSentryResponseStatus.class))); + tmpMap.put(_Fields.PRIVILEGES_MAP_BY_AUTH, new org.apache.thrift.meta_data.FieldMetaData("privilegesMapByAuth", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSentryPrivilegeMap.class)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TListSentryPrivilegesByAuthResponse.class, metaDataMap); + } + + public TListSentryPrivilegesByAuthResponse() { + } + + public TListSentryPrivilegesByAuthResponse( + org.apache.sentry.service.thrift.TSentryResponseStatus status) + { + this(); + this.status = status; + } + + /** + * Performs a deep copy on other. + */ + public TListSentryPrivilegesByAuthResponse(TListSentryPrivilegesByAuthResponse other) { + if (other.isSetStatus()) { + this.status = new org.apache.sentry.service.thrift.TSentryResponseStatus(other.status); + } + if (other.isSetPrivilegesMapByAuth()) { + Map __this__privilegesMapByAuth = new HashMap(); + for (Map.Entry other_element : other.privilegesMapByAuth.entrySet()) { + + String other_element_key = other_element.getKey(); + TSentryPrivilegeMap other_element_value = other_element.getValue(); + + String __this__privilegesMapByAuth_copy_key = other_element_key; + + TSentryPrivilegeMap __this__privilegesMapByAuth_copy_value = new TSentryPrivilegeMap(other_element_value); + + __this__privilegesMapByAuth.put(__this__privilegesMapByAuth_copy_key, __this__privilegesMapByAuth_copy_value); + } + this.privilegesMapByAuth = __this__privilegesMapByAuth; + } + } + + public TListSentryPrivilegesByAuthResponse deepCopy() { + return new TListSentryPrivilegesByAuthResponse(this); + } + + @Override + public void clear() { + this.status = null; + this.privilegesMapByAuth = null; + } + + public org.apache.sentry.service.thrift.TSentryResponseStatus getStatus() { + return this.status; + } + + public void setStatus(org.apache.sentry.service.thrift.TSentryResponseStatus status) { + this.status = status; + } + + public void unsetStatus() { + this.status = null; + } + + /** Returns true if field status is set (has been assigned a value) and false otherwise */ + public boolean isSetStatus() { + return this.status != null; + } + + public void setStatusIsSet(boolean value) { + if (!value) { + this.status = null; + } + } + + public int getPrivilegesMapByAuthSize() { + return (this.privilegesMapByAuth == null) ? 0 : this.privilegesMapByAuth.size(); + } + + public void putToPrivilegesMapByAuth(String key, TSentryPrivilegeMap val) { + if (this.privilegesMapByAuth == null) { + this.privilegesMapByAuth = new HashMap(); + } + this.privilegesMapByAuth.put(key, val); + } + + public Map getPrivilegesMapByAuth() { + return this.privilegesMapByAuth; + } + + public void setPrivilegesMapByAuth(Map privilegesMapByAuth) { + this.privilegesMapByAuth = privilegesMapByAuth; + } + + public void unsetPrivilegesMapByAuth() { + this.privilegesMapByAuth = null; + } + + /** Returns true if field privilegesMapByAuth is set (has been assigned a value) and false otherwise */ + public boolean isSetPrivilegesMapByAuth() { + return this.privilegesMapByAuth != null; + } + + public void setPrivilegesMapByAuthIsSet(boolean value) { + if (!value) { + this.privilegesMapByAuth = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case STATUS: + if (value == null) { + unsetStatus(); + } else { + setStatus((org.apache.sentry.service.thrift.TSentryResponseStatus)value); + } + break; + + case PRIVILEGES_MAP_BY_AUTH: + if (value == null) { + unsetPrivilegesMapByAuth(); + } else { + setPrivilegesMapByAuth((Map)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case STATUS: + return getStatus(); + + case PRIVILEGES_MAP_BY_AUTH: + return getPrivilegesMapByAuth(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case STATUS: + return isSetStatus(); + case PRIVILEGES_MAP_BY_AUTH: + return isSetPrivilegesMapByAuth(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TListSentryPrivilegesByAuthResponse) + return this.equals((TListSentryPrivilegesByAuthResponse)that); + return false; + } + + public boolean equals(TListSentryPrivilegesByAuthResponse that) { + if (that == null) + return false; + + boolean this_present_status = true && this.isSetStatus(); + boolean that_present_status = true && that.isSetStatus(); + if (this_present_status || that_present_status) { + if (!(this_present_status && that_present_status)) + return false; + if (!this.status.equals(that.status)) + return false; + } + + boolean this_present_privilegesMapByAuth = true && this.isSetPrivilegesMapByAuth(); + boolean that_present_privilegesMapByAuth = true && that.isSetPrivilegesMapByAuth(); + if (this_present_privilegesMapByAuth || that_present_privilegesMapByAuth) { + if (!(this_present_privilegesMapByAuth && that_present_privilegesMapByAuth)) + return false; + if (!this.privilegesMapByAuth.equals(that.privilegesMapByAuth)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_status = true && (isSetStatus()); + builder.append(present_status); + if (present_status) + builder.append(status); + + boolean present_privilegesMapByAuth = true && (isSetPrivilegesMapByAuth()); + builder.append(present_privilegesMapByAuth); + if (present_privilegesMapByAuth) + builder.append(privilegesMapByAuth); + + return builder.toHashCode(); + } + + public int compareTo(TListSentryPrivilegesByAuthResponse other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + TListSentryPrivilegesByAuthResponse typedOther = (TListSentryPrivilegesByAuthResponse)other; + + lastComparison = Boolean.valueOf(isSetStatus()).compareTo(typedOther.isSetStatus()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetStatus()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, typedOther.status); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetPrivilegesMapByAuth()).compareTo(typedOther.isSetPrivilegesMapByAuth()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPrivilegesMapByAuth()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.privilegesMapByAuth, typedOther.privilegesMapByAuth); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TListSentryPrivilegesByAuthResponse("); + boolean first = true; + + sb.append("status:"); + if (this.status == null) { + sb.append("null"); + } else { + sb.append(this.status); + } + first = false; + if (isSetPrivilegesMapByAuth()) { + if (!first) sb.append(", "); + sb.append("privilegesMapByAuth:"); + if (this.privilegesMapByAuth == null) { + sb.append("null"); + } else { + sb.append(this.privilegesMapByAuth); + } + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetStatus()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + if (status != null) { + status.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TListSentryPrivilegesByAuthResponseStandardSchemeFactory implements SchemeFactory { + public TListSentryPrivilegesByAuthResponseStandardScheme getScheme() { + return new TListSentryPrivilegesByAuthResponseStandardScheme(); + } + } + + private static class TListSentryPrivilegesByAuthResponseStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, TListSentryPrivilegesByAuthResponse struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // STATUS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.status = new org.apache.sentry.service.thrift.TSentryResponseStatus(); + struct.status.read(iprot); + struct.setStatusIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // PRIVILEGES_MAP_BY_AUTH + if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { + { + org.apache.thrift.protocol.TMap _map138 = iprot.readMapBegin(); + struct.privilegesMapByAuth = new HashMap(2*_map138.size); + for (int _i139 = 0; _i139 < _map138.size; ++_i139) + { + String _key140; // required + TSentryPrivilegeMap _val141; // required + _key140 = iprot.readString(); + _val141 = new TSentryPrivilegeMap(); + _val141.read(iprot); + struct.privilegesMapByAuth.put(_key140, _val141); + } + iprot.readMapEnd(); + } + struct.setPrivilegesMapByAuthIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, TListSentryPrivilegesByAuthResponse struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.status != null) { + oprot.writeFieldBegin(STATUS_FIELD_DESC); + struct.status.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.privilegesMapByAuth != null) { + if (struct.isSetPrivilegesMapByAuth()) { + oprot.writeFieldBegin(PRIVILEGES_MAP_BY_AUTH_FIELD_DESC); + { + oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.privilegesMapByAuth.size())); + for (Map.Entry _iter142 : struct.privilegesMapByAuth.entrySet()) + { + oprot.writeString(_iter142.getKey()); + _iter142.getValue().write(oprot); + } + oprot.writeMapEnd(); + } + oprot.writeFieldEnd(); + } + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TListSentryPrivilegesByAuthResponseTupleSchemeFactory implements SchemeFactory { + public TListSentryPrivilegesByAuthResponseTupleScheme getScheme() { + return new TListSentryPrivilegesByAuthResponseTupleScheme(); + } + } + + private static class TListSentryPrivilegesByAuthResponseTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TListSentryPrivilegesByAuthResponse struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + struct.status.write(oprot); + BitSet optionals = new BitSet(); + if (struct.isSetPrivilegesMapByAuth()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetPrivilegesMapByAuth()) { + { + oprot.writeI32(struct.privilegesMapByAuth.size()); + for (Map.Entry _iter143 : struct.privilegesMapByAuth.entrySet()) + { + oprot.writeString(_iter143.getKey()); + _iter143.getValue().write(oprot); + } + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TListSentryPrivilegesByAuthResponse struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.status = new org.apache.sentry.service.thrift.TSentryResponseStatus(); + struct.status.read(iprot); + struct.setStatusIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + { + org.apache.thrift.protocol.TMap _map144 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.privilegesMapByAuth = new HashMap(2*_map144.size); + for (int _i145 = 0; _i145 < _map144.size; ++_i145) + { + String _key146; // required + TSentryPrivilegeMap _val147; // required + _key146 = iprot.readString(); + _val147 = new TSentryPrivilegeMap(); + _val147.read(iprot); + struct.privilegesMapByAuth.put(_key146, _val147); + } + } + struct.setPrivilegesMapByAuthIsSet(true); + } + } + } + +} + diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TListSentryPrivilegesForProviderRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TListSentryPrivilegesForProviderRequest.java index d1dd6a11f..5e443b4a4 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TListSentryPrivilegesForProviderRequest.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TListSentryPrivilegesForProviderRequest.java @@ -153,7 +153,7 @@ public String getFieldName() { } public TListSentryPrivilegesForProviderRequest() { - this.protocol_version = 1; + this.protocol_version = 2; } @@ -210,7 +210,7 @@ public TListSentryPrivilegesForProviderRequest deepCopy() { @Override public void clear() { - this.protocol_version = 1; + this.protocol_version = 2; this.component = null; this.serviceName = null; diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TListSentryPrivilegesRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TListSentryPrivilegesRequest.java index 505c54822..d6afe5a5a 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TListSentryPrivilegesRequest.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TListSentryPrivilegesRequest.java @@ -152,7 +152,7 @@ public String getFieldName() { } public TListSentryPrivilegesRequest() { - this.protocol_version = 1; + this.protocol_version = 2; } @@ -205,7 +205,7 @@ public TListSentryPrivilegesRequest deepCopy() { @Override public void clear() { - this.protocol_version = 1; + this.protocol_version = 2; this.requestorUserName = null; this.roleName = null; diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TListSentryRolesRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TListSentryRolesRequest.java index 078cb6ba0..08a4e3612 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TListSentryRolesRequest.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TListSentryRolesRequest.java @@ -137,7 +137,7 @@ public String getFieldName() { } public TListSentryRolesRequest() { - this.protocol_version = 1; + this.protocol_version = 2; } @@ -176,7 +176,7 @@ public TListSentryRolesRequest deepCopy() { @Override public void clear() { - this.protocol_version = 1; + this.protocol_version = 2; this.requestorUserName = null; this.groupName = null; diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TRenamePrivilegesRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TRenamePrivilegesRequest.java index 22d9b4c6a..6b2ec0aa3 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TRenamePrivilegesRequest.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TRenamePrivilegesRequest.java @@ -152,7 +152,7 @@ public String getFieldName() { } public TRenamePrivilegesRequest() { - this.protocol_version = 1; + this.protocol_version = 2; } @@ -211,7 +211,7 @@ public TRenamePrivilegesRequest deepCopy() { @Override public void clear() { - this.protocol_version = 1; + this.protocol_version = 2; this.requestorUserName = null; this.component = null; diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TSentryPrivilegeMap.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TSentryPrivilegeMap.java new file mode 100644 index 000000000..a2945a258 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/generic/service/thrift/TSentryPrivilegeMap.java @@ -0,0 +1,486 @@ +/** + * Autogenerated by Thrift Compiler (0.9.0) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.sentry.provider.db.generic.service.thrift; + +import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TSentryPrivilegeMap implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TSentryPrivilegeMap"); + + private static final org.apache.thrift.protocol.TField PRIVILEGE_MAP_FIELD_DESC = new org.apache.thrift.protocol.TField("privilegeMap", org.apache.thrift.protocol.TType.MAP, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new TSentryPrivilegeMapStandardSchemeFactory()); + schemes.put(TupleScheme.class, new TSentryPrivilegeMapTupleSchemeFactory()); + } + + private Map> privilegeMap; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + PRIVILEGE_MAP((short)1, "privilegeMap"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // PRIVILEGE_MAP + return PRIVILEGE_MAP; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.PRIVILEGE_MAP, new org.apache.thrift.meta_data.FieldMetaData("privilegeMap", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), + new org.apache.thrift.meta_data.SetMetaData(org.apache.thrift.protocol.TType.SET, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSentryPrivilege.class))))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TSentryPrivilegeMap.class, metaDataMap); + } + + public TSentryPrivilegeMap() { + } + + public TSentryPrivilegeMap( + Map> privilegeMap) + { + this(); + this.privilegeMap = privilegeMap; + } + + /** + * Performs a deep copy on other. + */ + public TSentryPrivilegeMap(TSentryPrivilegeMap other) { + if (other.isSetPrivilegeMap()) { + Map> __this__privilegeMap = new HashMap>(); + for (Map.Entry> other_element : other.privilegeMap.entrySet()) { + + String other_element_key = other_element.getKey(); + Set other_element_value = other_element.getValue(); + + String __this__privilegeMap_copy_key = other_element_key; + + Set __this__privilegeMap_copy_value = new HashSet(); + for (TSentryPrivilege other_element_value_element : other_element_value) { + __this__privilegeMap_copy_value.add(new TSentryPrivilege(other_element_value_element)); + } + + __this__privilegeMap.put(__this__privilegeMap_copy_key, __this__privilegeMap_copy_value); + } + this.privilegeMap = __this__privilegeMap; + } + } + + public TSentryPrivilegeMap deepCopy() { + return new TSentryPrivilegeMap(this); + } + + @Override + public void clear() { + this.privilegeMap = null; + } + + public int getPrivilegeMapSize() { + return (this.privilegeMap == null) ? 0 : this.privilegeMap.size(); + } + + public void putToPrivilegeMap(String key, Set val) { + if (this.privilegeMap == null) { + this.privilegeMap = new HashMap>(); + } + this.privilegeMap.put(key, val); + } + + public Map> getPrivilegeMap() { + return this.privilegeMap; + } + + public void setPrivilegeMap(Map> privilegeMap) { + this.privilegeMap = privilegeMap; + } + + public void unsetPrivilegeMap() { + this.privilegeMap = null; + } + + /** Returns true if field privilegeMap is set (has been assigned a value) and false otherwise */ + public boolean isSetPrivilegeMap() { + return this.privilegeMap != null; + } + + public void setPrivilegeMapIsSet(boolean value) { + if (!value) { + this.privilegeMap = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case PRIVILEGE_MAP: + if (value == null) { + unsetPrivilegeMap(); + } else { + setPrivilegeMap((Map>)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case PRIVILEGE_MAP: + return getPrivilegeMap(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case PRIVILEGE_MAP: + return isSetPrivilegeMap(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TSentryPrivilegeMap) + return this.equals((TSentryPrivilegeMap)that); + return false; + } + + public boolean equals(TSentryPrivilegeMap that) { + if (that == null) + return false; + + boolean this_present_privilegeMap = true && this.isSetPrivilegeMap(); + boolean that_present_privilegeMap = true && that.isSetPrivilegeMap(); + if (this_present_privilegeMap || that_present_privilegeMap) { + if (!(this_present_privilegeMap && that_present_privilegeMap)) + return false; + if (!this.privilegeMap.equals(that.privilegeMap)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_privilegeMap = true && (isSetPrivilegeMap()); + builder.append(present_privilegeMap); + if (present_privilegeMap) + builder.append(privilegeMap); + + return builder.toHashCode(); + } + + public int compareTo(TSentryPrivilegeMap other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + TSentryPrivilegeMap typedOther = (TSentryPrivilegeMap)other; + + lastComparison = Boolean.valueOf(isSetPrivilegeMap()).compareTo(typedOther.isSetPrivilegeMap()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPrivilegeMap()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.privilegeMap, typedOther.privilegeMap); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TSentryPrivilegeMap("); + boolean first = true; + + sb.append("privilegeMap:"); + if (this.privilegeMap == null) { + sb.append("null"); + } else { + sb.append(this.privilegeMap); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetPrivilegeMap()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'privilegeMap' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TSentryPrivilegeMapStandardSchemeFactory implements SchemeFactory { + public TSentryPrivilegeMapStandardScheme getScheme() { + return new TSentryPrivilegeMapStandardScheme(); + } + } + + private static class TSentryPrivilegeMapStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, TSentryPrivilegeMap struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // PRIVILEGE_MAP + if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { + { + org.apache.thrift.protocol.TMap _map104 = iprot.readMapBegin(); + struct.privilegeMap = new HashMap>(2*_map104.size); + for (int _i105 = 0; _i105 < _map104.size; ++_i105) + { + String _key106; // required + Set _val107; // required + _key106 = iprot.readString(); + { + org.apache.thrift.protocol.TSet _set108 = iprot.readSetBegin(); + _val107 = new HashSet(2*_set108.size); + for (int _i109 = 0; _i109 < _set108.size; ++_i109) + { + TSentryPrivilege _elem110; // required + _elem110 = new TSentryPrivilege(); + _elem110.read(iprot); + _val107.add(_elem110); + } + iprot.readSetEnd(); + } + struct.privilegeMap.put(_key106, _val107); + } + iprot.readMapEnd(); + } + struct.setPrivilegeMapIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, TSentryPrivilegeMap struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.privilegeMap != null) { + oprot.writeFieldBegin(PRIVILEGE_MAP_FIELD_DESC); + { + oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.SET, struct.privilegeMap.size())); + for (Map.Entry> _iter111 : struct.privilegeMap.entrySet()) + { + oprot.writeString(_iter111.getKey()); + { + oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRUCT, _iter111.getValue().size())); + for (TSentryPrivilege _iter112 : _iter111.getValue()) + { + _iter112.write(oprot); + } + oprot.writeSetEnd(); + } + } + oprot.writeMapEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TSentryPrivilegeMapTupleSchemeFactory implements SchemeFactory { + public TSentryPrivilegeMapTupleScheme getScheme() { + return new TSentryPrivilegeMapTupleScheme(); + } + } + + private static class TSentryPrivilegeMapTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TSentryPrivilegeMap struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + { + oprot.writeI32(struct.privilegeMap.size()); + for (Map.Entry> _iter113 : struct.privilegeMap.entrySet()) + { + oprot.writeString(_iter113.getKey()); + { + oprot.writeI32(_iter113.getValue().size()); + for (TSentryPrivilege _iter114 : _iter113.getValue()) + { + _iter114.write(oprot); + } + } + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TSentryPrivilegeMap struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + { + org.apache.thrift.protocol.TMap _map115 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.SET, iprot.readI32()); + struct.privilegeMap = new HashMap>(2*_map115.size); + for (int _i116 = 0; _i116 < _map115.size; ++_i116) + { + String _key117; // required + Set _val118; // required + _key117 = iprot.readString(); + { + org.apache.thrift.protocol.TSet _set119 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + _val118 = new HashSet(2*_set119.size); + for (int _i120 = 0; _i120 < _set119.size; ++_i120) + { + TSentryPrivilege _elem121; // required + _elem121 = new TSentryPrivilege(); + _elem121.read(iprot); + _val118.add(_elem121); + } + } + struct.privilegeMap.put(_key117, _val118); + } + } + struct.setPrivilegeMapIsSet(true); + } + } + +} + diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/SentryPolicyService.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/SentryPolicyService.java index c47f64a98..0c2444953 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/SentryPolicyService.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/SentryPolicyService.java @@ -61,6 +61,10 @@ public interface Iface { public TSentryConfigValueResponse get_sentry_config_value(TSentryConfigValueRequest request) throws org.apache.thrift.TException; + public TSentryExportMappingDataResponse export_sentry_mapping_data(TSentryExportMappingDataRequest request) throws org.apache.thrift.TException; + + public TSentryImportMappingDataResponse import_sentry_mapping_data(TSentryImportMappingDataRequest request) throws org.apache.thrift.TException; + } public interface AsyncIface { @@ -91,6 +95,10 @@ public interface AsyncIface { public void get_sentry_config_value(TSentryConfigValueRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void export_sentry_mapping_data(TSentryExportMappingDataRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void import_sentry_mapping_data(TSentryImportMappingDataRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + } public static class Client extends org.apache.thrift.TServiceClient implements Iface { @@ -412,6 +420,52 @@ public TSentryConfigValueResponse recv_get_sentry_config_value() throws org.apac throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_sentry_config_value failed: unknown result"); } + public TSentryExportMappingDataResponse export_sentry_mapping_data(TSentryExportMappingDataRequest request) throws org.apache.thrift.TException + { + send_export_sentry_mapping_data(request); + return recv_export_sentry_mapping_data(); + } + + public void send_export_sentry_mapping_data(TSentryExportMappingDataRequest request) throws org.apache.thrift.TException + { + export_sentry_mapping_data_args args = new export_sentry_mapping_data_args(); + args.setRequest(request); + sendBase("export_sentry_mapping_data", args); + } + + public TSentryExportMappingDataResponse recv_export_sentry_mapping_data() throws org.apache.thrift.TException + { + export_sentry_mapping_data_result result = new export_sentry_mapping_data_result(); + receiveBase(result, "export_sentry_mapping_data"); + if (result.isSetSuccess()) { + return result.success; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "export_sentry_mapping_data failed: unknown result"); + } + + public TSentryImportMappingDataResponse import_sentry_mapping_data(TSentryImportMappingDataRequest request) throws org.apache.thrift.TException + { + send_import_sentry_mapping_data(request); + return recv_import_sentry_mapping_data(); + } + + public void send_import_sentry_mapping_data(TSentryImportMappingDataRequest request) throws org.apache.thrift.TException + { + import_sentry_mapping_data_args args = new import_sentry_mapping_data_args(); + args.setRequest(request); + sendBase("import_sentry_mapping_data", args); + } + + public TSentryImportMappingDataResponse recv_import_sentry_mapping_data() throws org.apache.thrift.TException + { + import_sentry_mapping_data_result result = new import_sentry_mapping_data_result(); + receiveBase(result, "import_sentry_mapping_data"); + if (result.isSetSuccess()) { + return result.success; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "import_sentry_mapping_data failed: unknown result"); + } + } public static class AsyncClient extends org.apache.thrift.async.TAsyncClient implements AsyncIface { public static class Factory implements org.apache.thrift.async.TAsyncClientFactory { @@ -846,6 +900,70 @@ public TSentryConfigValueResponse getResult() throws org.apache.thrift.TExceptio } } + public void export_sentry_mapping_data(TSentryExportMappingDataRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + export_sentry_mapping_data_call method_call = new export_sentry_mapping_data_call(request, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class export_sentry_mapping_data_call extends org.apache.thrift.async.TAsyncMethodCall { + private TSentryExportMappingDataRequest request; + public export_sentry_mapping_data_call(TSentryExportMappingDataRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.request = request; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("export_sentry_mapping_data", org.apache.thrift.protocol.TMessageType.CALL, 0)); + export_sentry_mapping_data_args args = new export_sentry_mapping_data_args(); + args.setRequest(request); + args.write(prot); + prot.writeMessageEnd(); + } + + public TSentryExportMappingDataResponse getResult() throws org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_export_sentry_mapping_data(); + } + } + + public void import_sentry_mapping_data(TSentryImportMappingDataRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + import_sentry_mapping_data_call method_call = new import_sentry_mapping_data_call(request, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class import_sentry_mapping_data_call extends org.apache.thrift.async.TAsyncMethodCall { + private TSentryImportMappingDataRequest request; + public import_sentry_mapping_data_call(TSentryImportMappingDataRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.request = request; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("import_sentry_mapping_data", org.apache.thrift.protocol.TMessageType.CALL, 0)); + import_sentry_mapping_data_args args = new import_sentry_mapping_data_args(); + args.setRequest(request); + args.write(prot); + prot.writeMessageEnd(); + } + + public TSentryImportMappingDataResponse getResult() throws org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_import_sentry_mapping_data(); + } + } + } public static class Processor extends org.apache.thrift.TBaseProcessor implements org.apache.thrift.TProcessor { @@ -872,6 +990,8 @@ protected Processor(I iface, Map extends org.apache.thrift.ProcessFunction { + public export_sentry_mapping_data() { + super("export_sentry_mapping_data"); + } + + public export_sentry_mapping_data_args getEmptyArgsInstance() { + return new export_sentry_mapping_data_args(); + } + + protected boolean isOneway() { + return false; + } + + public export_sentry_mapping_data_result getResult(I iface, export_sentry_mapping_data_args args) throws org.apache.thrift.TException { + export_sentry_mapping_data_result result = new export_sentry_mapping_data_result(); + result.success = iface.export_sentry_mapping_data(args.request); + return result; + } + } + + public static class import_sentry_mapping_data extends org.apache.thrift.ProcessFunction { + public import_sentry_mapping_data() { + super("import_sentry_mapping_data"); + } + + public import_sentry_mapping_data_args getEmptyArgsInstance() { + return new import_sentry_mapping_data_args(); + } + + protected boolean isOneway() { + return false; + } + + public import_sentry_mapping_data_result getResult(I iface, import_sentry_mapping_data_args args) throws org.apache.thrift.TException { + import_sentry_mapping_data_result result = new import_sentry_mapping_data_result(); + result.success = iface.import_sentry_mapping_data(args.request); + return result; + } + } + } public static class create_sentry_role_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { @@ -10575,4 +10735,1456 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_sentry_config_va } + public static class export_sentry_mapping_data_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("export_sentry_mapping_data_args"); + + private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new export_sentry_mapping_data_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new export_sentry_mapping_data_argsTupleSchemeFactory()); + } + + private TSentryExportMappingDataRequest request; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + REQUEST((short)1, "request"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // REQUEST + return REQUEST; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSentryExportMappingDataRequest.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(export_sentry_mapping_data_args.class, metaDataMap); + } + + public export_sentry_mapping_data_args() { + } + + public export_sentry_mapping_data_args( + TSentryExportMappingDataRequest request) + { + this(); + this.request = request; + } + + /** + * Performs a deep copy on other. + */ + public export_sentry_mapping_data_args(export_sentry_mapping_data_args other) { + if (other.isSetRequest()) { + this.request = new TSentryExportMappingDataRequest(other.request); + } + } + + public export_sentry_mapping_data_args deepCopy() { + return new export_sentry_mapping_data_args(this); + } + + @Override + public void clear() { + this.request = null; + } + + public TSentryExportMappingDataRequest getRequest() { + return this.request; + } + + public void setRequest(TSentryExportMappingDataRequest request) { + this.request = request; + } + + public void unsetRequest() { + this.request = null; + } + + /** Returns true if field request is set (has been assigned a value) and false otherwise */ + public boolean isSetRequest() { + return this.request != null; + } + + public void setRequestIsSet(boolean value) { + if (!value) { + this.request = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case REQUEST: + if (value == null) { + unsetRequest(); + } else { + setRequest((TSentryExportMappingDataRequest)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case REQUEST: + return getRequest(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case REQUEST: + return isSetRequest(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof export_sentry_mapping_data_args) + return this.equals((export_sentry_mapping_data_args)that); + return false; + } + + public boolean equals(export_sentry_mapping_data_args that) { + if (that == null) + return false; + + boolean this_present_request = true && this.isSetRequest(); + boolean that_present_request = true && that.isSetRequest(); + if (this_present_request || that_present_request) { + if (!(this_present_request && that_present_request)) + return false; + if (!this.request.equals(that.request)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_request = true && (isSetRequest()); + builder.append(present_request); + if (present_request) + builder.append(request); + + return builder.toHashCode(); + } + + public int compareTo(export_sentry_mapping_data_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + export_sentry_mapping_data_args typedOther = (export_sentry_mapping_data_args)other; + + lastComparison = Boolean.valueOf(isSetRequest()).compareTo(typedOther.isSetRequest()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRequest()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.request, typedOther.request); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("export_sentry_mapping_data_args("); + boolean first = true; + + sb.append("request:"); + if (this.request == null) { + sb.append("null"); + } else { + sb.append(this.request); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (request != null) { + request.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class export_sentry_mapping_data_argsStandardSchemeFactory implements SchemeFactory { + public export_sentry_mapping_data_argsStandardScheme getScheme() { + return new export_sentry_mapping_data_argsStandardScheme(); + } + } + + private static class export_sentry_mapping_data_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, export_sentry_mapping_data_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // REQUEST + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.request = new TSentryExportMappingDataRequest(); + struct.request.read(iprot); + struct.setRequestIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, export_sentry_mapping_data_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.request != null) { + oprot.writeFieldBegin(REQUEST_FIELD_DESC); + struct.request.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class export_sentry_mapping_data_argsTupleSchemeFactory implements SchemeFactory { + public export_sentry_mapping_data_argsTupleScheme getScheme() { + return new export_sentry_mapping_data_argsTupleScheme(); + } + } + + private static class export_sentry_mapping_data_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, export_sentry_mapping_data_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetRequest()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetRequest()) { + struct.request.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, export_sentry_mapping_data_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.request = new TSentryExportMappingDataRequest(); + struct.request.read(iprot); + struct.setRequestIsSet(true); + } + } + } + + } + + public static class export_sentry_mapping_data_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("export_sentry_mapping_data_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new export_sentry_mapping_data_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new export_sentry_mapping_data_resultTupleSchemeFactory()); + } + + private TSentryExportMappingDataResponse success; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSentryExportMappingDataResponse.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(export_sentry_mapping_data_result.class, metaDataMap); + } + + public export_sentry_mapping_data_result() { + } + + public export_sentry_mapping_data_result( + TSentryExportMappingDataResponse success) + { + this(); + this.success = success; + } + + /** + * Performs a deep copy on other. + */ + public export_sentry_mapping_data_result(export_sentry_mapping_data_result other) { + if (other.isSetSuccess()) { + this.success = new TSentryExportMappingDataResponse(other.success); + } + } + + public export_sentry_mapping_data_result deepCopy() { + return new export_sentry_mapping_data_result(this); + } + + @Override + public void clear() { + this.success = null; + } + + public TSentryExportMappingDataResponse getSuccess() { + return this.success; + } + + public void setSuccess(TSentryExportMappingDataResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((TSentryExportMappingDataResponse)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof export_sentry_mapping_data_result) + return this.equals((export_sentry_mapping_data_result)that); + return false; + } + + public boolean equals(export_sentry_mapping_data_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true && (isSetSuccess()); + builder.append(present_success); + if (present_success) + builder.append(success); + + return builder.toHashCode(); + } + + public int compareTo(export_sentry_mapping_data_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + export_sentry_mapping_data_result typedOther = (export_sentry_mapping_data_result)other; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("export_sentry_mapping_data_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class export_sentry_mapping_data_resultStandardSchemeFactory implements SchemeFactory { + public export_sentry_mapping_data_resultStandardScheme getScheme() { + return new export_sentry_mapping_data_resultStandardScheme(); + } + } + + private static class export_sentry_mapping_data_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, export_sentry_mapping_data_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new TSentryExportMappingDataResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, export_sentry_mapping_data_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class export_sentry_mapping_data_resultTupleSchemeFactory implements SchemeFactory { + public export_sentry_mapping_data_resultTupleScheme getScheme() { + return new export_sentry_mapping_data_resultTupleScheme(); + } + } + + private static class export_sentry_mapping_data_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, export_sentry_mapping_data_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, export_sentry_mapping_data_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.success = new TSentryExportMappingDataResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + } + } + + } + + public static class import_sentry_mapping_data_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("import_sentry_mapping_data_args"); + + private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new import_sentry_mapping_data_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new import_sentry_mapping_data_argsTupleSchemeFactory()); + } + + private TSentryImportMappingDataRequest request; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + REQUEST((short)1, "request"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // REQUEST + return REQUEST; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSentryImportMappingDataRequest.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(import_sentry_mapping_data_args.class, metaDataMap); + } + + public import_sentry_mapping_data_args() { + } + + public import_sentry_mapping_data_args( + TSentryImportMappingDataRequest request) + { + this(); + this.request = request; + } + + /** + * Performs a deep copy on other. + */ + public import_sentry_mapping_data_args(import_sentry_mapping_data_args other) { + if (other.isSetRequest()) { + this.request = new TSentryImportMappingDataRequest(other.request); + } + } + + public import_sentry_mapping_data_args deepCopy() { + return new import_sentry_mapping_data_args(this); + } + + @Override + public void clear() { + this.request = null; + } + + public TSentryImportMappingDataRequest getRequest() { + return this.request; + } + + public void setRequest(TSentryImportMappingDataRequest request) { + this.request = request; + } + + public void unsetRequest() { + this.request = null; + } + + /** Returns true if field request is set (has been assigned a value) and false otherwise */ + public boolean isSetRequest() { + return this.request != null; + } + + public void setRequestIsSet(boolean value) { + if (!value) { + this.request = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case REQUEST: + if (value == null) { + unsetRequest(); + } else { + setRequest((TSentryImportMappingDataRequest)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case REQUEST: + return getRequest(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case REQUEST: + return isSetRequest(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof import_sentry_mapping_data_args) + return this.equals((import_sentry_mapping_data_args)that); + return false; + } + + public boolean equals(import_sentry_mapping_data_args that) { + if (that == null) + return false; + + boolean this_present_request = true && this.isSetRequest(); + boolean that_present_request = true && that.isSetRequest(); + if (this_present_request || that_present_request) { + if (!(this_present_request && that_present_request)) + return false; + if (!this.request.equals(that.request)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_request = true && (isSetRequest()); + builder.append(present_request); + if (present_request) + builder.append(request); + + return builder.toHashCode(); + } + + public int compareTo(import_sentry_mapping_data_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + import_sentry_mapping_data_args typedOther = (import_sentry_mapping_data_args)other; + + lastComparison = Boolean.valueOf(isSetRequest()).compareTo(typedOther.isSetRequest()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRequest()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.request, typedOther.request); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("import_sentry_mapping_data_args("); + boolean first = true; + + sb.append("request:"); + if (this.request == null) { + sb.append("null"); + } else { + sb.append(this.request); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (request != null) { + request.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class import_sentry_mapping_data_argsStandardSchemeFactory implements SchemeFactory { + public import_sentry_mapping_data_argsStandardScheme getScheme() { + return new import_sentry_mapping_data_argsStandardScheme(); + } + } + + private static class import_sentry_mapping_data_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, import_sentry_mapping_data_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // REQUEST + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.request = new TSentryImportMappingDataRequest(); + struct.request.read(iprot); + struct.setRequestIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, import_sentry_mapping_data_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.request != null) { + oprot.writeFieldBegin(REQUEST_FIELD_DESC); + struct.request.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class import_sentry_mapping_data_argsTupleSchemeFactory implements SchemeFactory { + public import_sentry_mapping_data_argsTupleScheme getScheme() { + return new import_sentry_mapping_data_argsTupleScheme(); + } + } + + private static class import_sentry_mapping_data_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, import_sentry_mapping_data_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetRequest()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetRequest()) { + struct.request.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, import_sentry_mapping_data_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.request = new TSentryImportMappingDataRequest(); + struct.request.read(iprot); + struct.setRequestIsSet(true); + } + } + } + + } + + public static class import_sentry_mapping_data_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("import_sentry_mapping_data_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new import_sentry_mapping_data_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new import_sentry_mapping_data_resultTupleSchemeFactory()); + } + + private TSentryImportMappingDataResponse success; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSentryImportMappingDataResponse.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(import_sentry_mapping_data_result.class, metaDataMap); + } + + public import_sentry_mapping_data_result() { + } + + public import_sentry_mapping_data_result( + TSentryImportMappingDataResponse success) + { + this(); + this.success = success; + } + + /** + * Performs a deep copy on other. + */ + public import_sentry_mapping_data_result(import_sentry_mapping_data_result other) { + if (other.isSetSuccess()) { + this.success = new TSentryImportMappingDataResponse(other.success); + } + } + + public import_sentry_mapping_data_result deepCopy() { + return new import_sentry_mapping_data_result(this); + } + + @Override + public void clear() { + this.success = null; + } + + public TSentryImportMappingDataResponse getSuccess() { + return this.success; + } + + public void setSuccess(TSentryImportMappingDataResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((TSentryImportMappingDataResponse)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof import_sentry_mapping_data_result) + return this.equals((import_sentry_mapping_data_result)that); + return false; + } + + public boolean equals(import_sentry_mapping_data_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true && (isSetSuccess()); + builder.append(present_success); + if (present_success) + builder.append(success); + + return builder.toHashCode(); + } + + public int compareTo(import_sentry_mapping_data_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + import_sentry_mapping_data_result typedOther = (import_sentry_mapping_data_result)other; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("import_sentry_mapping_data_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class import_sentry_mapping_data_resultStandardSchemeFactory implements SchemeFactory { + public import_sentry_mapping_data_resultStandardScheme getScheme() { + return new import_sentry_mapping_data_resultStandardScheme(); + } + } + + private static class import_sentry_mapping_data_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, import_sentry_mapping_data_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new TSentryImportMappingDataResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, import_sentry_mapping_data_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class import_sentry_mapping_data_resultTupleSchemeFactory implements SchemeFactory { + public import_sentry_mapping_data_resultTupleScheme getScheme() { + return new import_sentry_mapping_data_resultTupleScheme(); + } + } + + private static class import_sentry_mapping_data_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, import_sentry_mapping_data_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, import_sentry_mapping_data_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.success = new TSentryImportMappingDataResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + } + } + + } + } diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TAlterSentryRoleAddGroupsRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TAlterSentryRoleAddGroupsRequest.java index 21efbd051..7ac2069f2 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TAlterSentryRoleAddGroupsRequest.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TAlterSentryRoleAddGroupsRequest.java @@ -137,7 +137,7 @@ public String getFieldName() { } public TAlterSentryRoleAddGroupsRequest() { - this.protocol_version = 1; + this.protocol_version = 2; } @@ -182,7 +182,7 @@ public TAlterSentryRoleAddGroupsRequest deepCopy() { @Override public void clear() { - this.protocol_version = 1; + this.protocol_version = 2; this.requestorUserName = null; this.roleName = null; diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TAlterSentryRoleDeleteGroupsRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TAlterSentryRoleDeleteGroupsRequest.java index 58e987083..da4d76c2e 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TAlterSentryRoleDeleteGroupsRequest.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TAlterSentryRoleDeleteGroupsRequest.java @@ -137,7 +137,7 @@ public String getFieldName() { } public TAlterSentryRoleDeleteGroupsRequest() { - this.protocol_version = 1; + this.protocol_version = 2; } @@ -182,7 +182,7 @@ public TAlterSentryRoleDeleteGroupsRequest deepCopy() { @Override public void clear() { - this.protocol_version = 1; + this.protocol_version = 2; this.requestorUserName = null; this.roleName = null; diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TAlterSentryRoleGrantPrivilegeRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TAlterSentryRoleGrantPrivilegeRequest.java index 6b051a178..aafa91e88 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TAlterSentryRoleGrantPrivilegeRequest.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TAlterSentryRoleGrantPrivilegeRequest.java @@ -145,7 +145,7 @@ public String getFieldName() { } public TAlterSentryRoleGrantPrivilegeRequest() { - this.protocol_version = 1; + this.protocol_version = 2; } @@ -191,7 +191,7 @@ public TAlterSentryRoleGrantPrivilegeRequest deepCopy() { @Override public void clear() { - this.protocol_version = 1; + this.protocol_version = 2; this.requestorUserName = null; this.roleName = null; diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TAlterSentryRoleRevokePrivilegeRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TAlterSentryRoleRevokePrivilegeRequest.java index 71cc12e24..034a061eb 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TAlterSentryRoleRevokePrivilegeRequest.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TAlterSentryRoleRevokePrivilegeRequest.java @@ -145,7 +145,7 @@ public String getFieldName() { } public TAlterSentryRoleRevokePrivilegeRequest() { - this.protocol_version = 1; + this.protocol_version = 2; } @@ -191,7 +191,7 @@ public TAlterSentryRoleRevokePrivilegeRequest deepCopy() { @Override public void clear() { - this.protocol_version = 1; + this.protocol_version = 2; this.requestorUserName = null; this.roleName = null; diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TCreateSentryRoleRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TCreateSentryRoleRequest.java index fc7c5dd70..5bf7cb331 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TCreateSentryRoleRequest.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TCreateSentryRoleRequest.java @@ -129,7 +129,7 @@ public String getFieldName() { } public TCreateSentryRoleRequest() { - this.protocol_version = 1; + this.protocol_version = 2; } @@ -165,7 +165,7 @@ public TCreateSentryRoleRequest deepCopy() { @Override public void clear() { - this.protocol_version = 1; + this.protocol_version = 2; this.requestorUserName = null; this.roleName = null; diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TDropPrivilegesRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TDropPrivilegesRequest.java index 3df92355e..8f5a2b32f 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TDropPrivilegesRequest.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TDropPrivilegesRequest.java @@ -129,7 +129,7 @@ public String getFieldName() { } public TDropPrivilegesRequest() { - this.protocol_version = 1; + this.protocol_version = 2; } @@ -165,7 +165,7 @@ public TDropPrivilegesRequest deepCopy() { @Override public void clear() { - this.protocol_version = 1; + this.protocol_version = 2; this.requestorUserName = null; this.authorizable = null; diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TDropSentryRoleRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TDropSentryRoleRequest.java index e2971ec64..753f86c05 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TDropSentryRoleRequest.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TDropSentryRoleRequest.java @@ -129,7 +129,7 @@ public String getFieldName() { } public TDropSentryRoleRequest() { - this.protocol_version = 1; + this.protocol_version = 2; } @@ -165,7 +165,7 @@ public TDropSentryRoleRequest deepCopy() { @Override public void clear() { - this.protocol_version = 1; + this.protocol_version = 2; this.requestorUserName = null; this.roleName = null; diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TListSentryPrivilegesByAuthRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TListSentryPrivilegesByAuthRequest.java index 1a5d3cfa8..0f3c6d825 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TListSentryPrivilegesByAuthRequest.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TListSentryPrivilegesByAuthRequest.java @@ -146,7 +146,7 @@ public String getFieldName() { } public TListSentryPrivilegesByAuthRequest() { - this.protocol_version = 1; + this.protocol_version = 2; } @@ -196,7 +196,7 @@ public TListSentryPrivilegesByAuthRequest deepCopy() { @Override public void clear() { - this.protocol_version = 1; + this.protocol_version = 2; this.requestorUserName = null; this.authorizableSet = null; diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TListSentryPrivilegesForProviderRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TListSentryPrivilegesForProviderRequest.java index 6ff6b482c..51fa953b2 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TListSentryPrivilegesForProviderRequest.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TListSentryPrivilegesForProviderRequest.java @@ -138,7 +138,7 @@ public String getFieldName() { } public TListSentryPrivilegesForProviderRequest() { - this.protocol_version = 1; + this.protocol_version = 2; } @@ -181,7 +181,7 @@ public TListSentryPrivilegesForProviderRequest deepCopy() { @Override public void clear() { - this.protocol_version = 1; + this.protocol_version = 2; this.groups = null; this.roleSet = null; diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TListSentryPrivilegesRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TListSentryPrivilegesRequest.java index 393ff91b7..0b9301bca 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TListSentryPrivilegesRequest.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TListSentryPrivilegesRequest.java @@ -137,7 +137,7 @@ public String getFieldName() { } public TListSentryPrivilegesRequest() { - this.protocol_version = 1; + this.protocol_version = 2; } @@ -176,7 +176,7 @@ public TListSentryPrivilegesRequest deepCopy() { @Override public void clear() { - this.protocol_version = 1; + this.protocol_version = 2; this.requestorUserName = null; this.roleName = null; diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TListSentryRolesRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TListSentryRolesRequest.java index 4eec1ed33..bdab2b73a 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TListSentryRolesRequest.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TListSentryRolesRequest.java @@ -130,7 +130,7 @@ public String getFieldName() { } public TListSentryRolesRequest() { - this.protocol_version = 1; + this.protocol_version = 2; } @@ -164,7 +164,7 @@ public TListSentryRolesRequest deepCopy() { @Override public void clear() { - this.protocol_version = 1; + this.protocol_version = 2; this.requestorUserName = null; this.groupName = null; diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TRenamePrivilegesRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TRenamePrivilegesRequest.java index a2bc80583..989a6c6d9 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TRenamePrivilegesRequest.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TRenamePrivilegesRequest.java @@ -136,7 +136,7 @@ public String getFieldName() { } public TRenamePrivilegesRequest() { - this.protocol_version = 1; + this.protocol_version = 2; } @@ -177,7 +177,7 @@ public TRenamePrivilegesRequest deepCopy() { @Override public void clear() { - this.protocol_version = 1; + this.protocol_version = 2; this.requestorUserName = null; this.oldAuthorizable = null; diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TSentryConfigValueRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TSentryConfigValueRequest.java index c14393fb7..995cbe291 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TSentryConfigValueRequest.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TSentryConfigValueRequest.java @@ -130,7 +130,7 @@ public String getFieldName() { } public TSentryConfigValueRequest() { - this.protocol_version = 1; + this.protocol_version = 2; } @@ -164,7 +164,7 @@ public TSentryConfigValueRequest deepCopy() { @Override public void clear() { - this.protocol_version = 1; + this.protocol_version = 2; this.propertyName = null; this.defaultValue = null; diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TSentryExportMappingDataRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TSentryExportMappingDataRequest.java new file mode 100644 index 000000000..81452527b --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TSentryExportMappingDataRequest.java @@ -0,0 +1,486 @@ +/** + * Autogenerated by Thrift Compiler (0.9.0) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.sentry.provider.db.service.thrift; + +import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TSentryExportMappingDataRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TSentryExportMappingDataRequest"); + + private static final org.apache.thrift.protocol.TField PROTOCOL_VERSION_FIELD_DESC = new org.apache.thrift.protocol.TField("protocol_version", org.apache.thrift.protocol.TType.I32, (short)1); + private static final org.apache.thrift.protocol.TField REQUESTOR_USER_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("requestorUserName", org.apache.thrift.protocol.TType.STRING, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new TSentryExportMappingDataRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new TSentryExportMappingDataRequestTupleSchemeFactory()); + } + + private int protocol_version; // required + private String requestorUserName; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + PROTOCOL_VERSION((short)1, "protocol_version"), + REQUESTOR_USER_NAME((short)2, "requestorUserName"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // PROTOCOL_VERSION + return PROTOCOL_VERSION; + case 2: // REQUESTOR_USER_NAME + return REQUESTOR_USER_NAME; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __PROTOCOL_VERSION_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.PROTOCOL_VERSION, new org.apache.thrift.meta_data.FieldMetaData("protocol_version", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + tmpMap.put(_Fields.REQUESTOR_USER_NAME, new org.apache.thrift.meta_data.FieldMetaData("requestorUserName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TSentryExportMappingDataRequest.class, metaDataMap); + } + + public TSentryExportMappingDataRequest() { + this.protocol_version = 1; + + } + + public TSentryExportMappingDataRequest( + int protocol_version, + String requestorUserName) + { + this(); + this.protocol_version = protocol_version; + setProtocol_versionIsSet(true); + this.requestorUserName = requestorUserName; + } + + /** + * Performs a deep copy on other. + */ + public TSentryExportMappingDataRequest(TSentryExportMappingDataRequest other) { + __isset_bitfield = other.__isset_bitfield; + this.protocol_version = other.protocol_version; + if (other.isSetRequestorUserName()) { + this.requestorUserName = other.requestorUserName; + } + } + + public TSentryExportMappingDataRequest deepCopy() { + return new TSentryExportMappingDataRequest(this); + } + + @Override + public void clear() { + this.protocol_version = 1; + + this.requestorUserName = null; + } + + public int getProtocol_version() { + return this.protocol_version; + } + + public void setProtocol_version(int protocol_version) { + this.protocol_version = protocol_version; + setProtocol_versionIsSet(true); + } + + public void unsetProtocol_version() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __PROTOCOL_VERSION_ISSET_ID); + } + + /** Returns true if field protocol_version is set (has been assigned a value) and false otherwise */ + public boolean isSetProtocol_version() { + return EncodingUtils.testBit(__isset_bitfield, __PROTOCOL_VERSION_ISSET_ID); + } + + public void setProtocol_versionIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __PROTOCOL_VERSION_ISSET_ID, value); + } + + public String getRequestorUserName() { + return this.requestorUserName; + } + + public void setRequestorUserName(String requestorUserName) { + this.requestorUserName = requestorUserName; + } + + public void unsetRequestorUserName() { + this.requestorUserName = null; + } + + /** Returns true if field requestorUserName is set (has been assigned a value) and false otherwise */ + public boolean isSetRequestorUserName() { + return this.requestorUserName != null; + } + + public void setRequestorUserNameIsSet(boolean value) { + if (!value) { + this.requestorUserName = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case PROTOCOL_VERSION: + if (value == null) { + unsetProtocol_version(); + } else { + setProtocol_version((Integer)value); + } + break; + + case REQUESTOR_USER_NAME: + if (value == null) { + unsetRequestorUserName(); + } else { + setRequestorUserName((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case PROTOCOL_VERSION: + return Integer.valueOf(getProtocol_version()); + + case REQUESTOR_USER_NAME: + return getRequestorUserName(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case PROTOCOL_VERSION: + return isSetProtocol_version(); + case REQUESTOR_USER_NAME: + return isSetRequestorUserName(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TSentryExportMappingDataRequest) + return this.equals((TSentryExportMappingDataRequest)that); + return false; + } + + public boolean equals(TSentryExportMappingDataRequest that) { + if (that == null) + return false; + + boolean this_present_protocol_version = true; + boolean that_present_protocol_version = true; + if (this_present_protocol_version || that_present_protocol_version) { + if (!(this_present_protocol_version && that_present_protocol_version)) + return false; + if (this.protocol_version != that.protocol_version) + return false; + } + + boolean this_present_requestorUserName = true && this.isSetRequestorUserName(); + boolean that_present_requestorUserName = true && that.isSetRequestorUserName(); + if (this_present_requestorUserName || that_present_requestorUserName) { + if (!(this_present_requestorUserName && that_present_requestorUserName)) + return false; + if (!this.requestorUserName.equals(that.requestorUserName)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_protocol_version = true; + builder.append(present_protocol_version); + if (present_protocol_version) + builder.append(protocol_version); + + boolean present_requestorUserName = true && (isSetRequestorUserName()); + builder.append(present_requestorUserName); + if (present_requestorUserName) + builder.append(requestorUserName); + + return builder.toHashCode(); + } + + public int compareTo(TSentryExportMappingDataRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + TSentryExportMappingDataRequest typedOther = (TSentryExportMappingDataRequest)other; + + lastComparison = Boolean.valueOf(isSetProtocol_version()).compareTo(typedOther.isSetProtocol_version()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetProtocol_version()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.protocol_version, typedOther.protocol_version); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetRequestorUserName()).compareTo(typedOther.isSetRequestorUserName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRequestorUserName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.requestorUserName, typedOther.requestorUserName); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TSentryExportMappingDataRequest("); + boolean first = true; + + sb.append("protocol_version:"); + sb.append(this.protocol_version); + first = false; + if (!first) sb.append(", "); + sb.append("requestorUserName:"); + if (this.requestorUserName == null) { + sb.append("null"); + } else { + sb.append(this.requestorUserName); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetProtocol_version()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'protocol_version' is unset! Struct:" + toString()); + } + + if (!isSetRequestorUserName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'requestorUserName' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TSentryExportMappingDataRequestStandardSchemeFactory implements SchemeFactory { + public TSentryExportMappingDataRequestStandardScheme getScheme() { + return new TSentryExportMappingDataRequestStandardScheme(); + } + } + + private static class TSentryExportMappingDataRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, TSentryExportMappingDataRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // PROTOCOL_VERSION + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.protocol_version = iprot.readI32(); + struct.setProtocol_versionIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // REQUESTOR_USER_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.requestorUserName = iprot.readString(); + struct.setRequestorUserNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, TSentryExportMappingDataRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldBegin(PROTOCOL_VERSION_FIELD_DESC); + oprot.writeI32(struct.protocol_version); + oprot.writeFieldEnd(); + if (struct.requestorUserName != null) { + oprot.writeFieldBegin(REQUESTOR_USER_NAME_FIELD_DESC); + oprot.writeString(struct.requestorUserName); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TSentryExportMappingDataRequestTupleSchemeFactory implements SchemeFactory { + public TSentryExportMappingDataRequestTupleScheme getScheme() { + return new TSentryExportMappingDataRequestTupleScheme(); + } + } + + private static class TSentryExportMappingDataRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TSentryExportMappingDataRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeI32(struct.protocol_version); + oprot.writeString(struct.requestorUserName); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TSentryExportMappingDataRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.protocol_version = iprot.readI32(); + struct.setProtocol_versionIsSet(true); + struct.requestorUserName = iprot.readString(); + struct.setRequestorUserNameIsSet(true); + } + } + +} + diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TSentryExportMappingDataResponse.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TSentryExportMappingDataResponse.java new file mode 100644 index 000000000..3809df3e1 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TSentryExportMappingDataResponse.java @@ -0,0 +1,496 @@ +/** + * Autogenerated by Thrift Compiler (0.9.0) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.sentry.provider.db.service.thrift; + +import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TSentryExportMappingDataResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TSentryExportMappingDataResponse"); + + private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField MAPPING_DATA_FIELD_DESC = new org.apache.thrift.protocol.TField("mappingData", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new TSentryExportMappingDataResponseStandardSchemeFactory()); + schemes.put(TupleScheme.class, new TSentryExportMappingDataResponseTupleSchemeFactory()); + } + + private org.apache.sentry.service.thrift.TSentryResponseStatus status; // required + private TSentryMappingData mappingData; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + STATUS((short)1, "status"), + MAPPING_DATA((short)2, "mappingData"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // STATUS + return STATUS; + case 2: // MAPPING_DATA + return MAPPING_DATA; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, org.apache.sentry.service.thrift.TSentryResponseStatus.class))); + tmpMap.put(_Fields.MAPPING_DATA, new org.apache.thrift.meta_data.FieldMetaData("mappingData", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSentryMappingData.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TSentryExportMappingDataResponse.class, metaDataMap); + } + + public TSentryExportMappingDataResponse() { + } + + public TSentryExportMappingDataResponse( + org.apache.sentry.service.thrift.TSentryResponseStatus status, + TSentryMappingData mappingData) + { + this(); + this.status = status; + this.mappingData = mappingData; + } + + /** + * Performs a deep copy on other. + */ + public TSentryExportMappingDataResponse(TSentryExportMappingDataResponse other) { + if (other.isSetStatus()) { + this.status = new org.apache.sentry.service.thrift.TSentryResponseStatus(other.status); + } + if (other.isSetMappingData()) { + this.mappingData = new TSentryMappingData(other.mappingData); + } + } + + public TSentryExportMappingDataResponse deepCopy() { + return new TSentryExportMappingDataResponse(this); + } + + @Override + public void clear() { + this.status = null; + this.mappingData = null; + } + + public org.apache.sentry.service.thrift.TSentryResponseStatus getStatus() { + return this.status; + } + + public void setStatus(org.apache.sentry.service.thrift.TSentryResponseStatus status) { + this.status = status; + } + + public void unsetStatus() { + this.status = null; + } + + /** Returns true if field status is set (has been assigned a value) and false otherwise */ + public boolean isSetStatus() { + return this.status != null; + } + + public void setStatusIsSet(boolean value) { + if (!value) { + this.status = null; + } + } + + public TSentryMappingData getMappingData() { + return this.mappingData; + } + + public void setMappingData(TSentryMappingData mappingData) { + this.mappingData = mappingData; + } + + public void unsetMappingData() { + this.mappingData = null; + } + + /** Returns true if field mappingData is set (has been assigned a value) and false otherwise */ + public boolean isSetMappingData() { + return this.mappingData != null; + } + + public void setMappingDataIsSet(boolean value) { + if (!value) { + this.mappingData = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case STATUS: + if (value == null) { + unsetStatus(); + } else { + setStatus((org.apache.sentry.service.thrift.TSentryResponseStatus)value); + } + break; + + case MAPPING_DATA: + if (value == null) { + unsetMappingData(); + } else { + setMappingData((TSentryMappingData)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case STATUS: + return getStatus(); + + case MAPPING_DATA: + return getMappingData(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case STATUS: + return isSetStatus(); + case MAPPING_DATA: + return isSetMappingData(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TSentryExportMappingDataResponse) + return this.equals((TSentryExportMappingDataResponse)that); + return false; + } + + public boolean equals(TSentryExportMappingDataResponse that) { + if (that == null) + return false; + + boolean this_present_status = true && this.isSetStatus(); + boolean that_present_status = true && that.isSetStatus(); + if (this_present_status || that_present_status) { + if (!(this_present_status && that_present_status)) + return false; + if (!this.status.equals(that.status)) + return false; + } + + boolean this_present_mappingData = true && this.isSetMappingData(); + boolean that_present_mappingData = true && that.isSetMappingData(); + if (this_present_mappingData || that_present_mappingData) { + if (!(this_present_mappingData && that_present_mappingData)) + return false; + if (!this.mappingData.equals(that.mappingData)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_status = true && (isSetStatus()); + builder.append(present_status); + if (present_status) + builder.append(status); + + boolean present_mappingData = true && (isSetMappingData()); + builder.append(present_mappingData); + if (present_mappingData) + builder.append(mappingData); + + return builder.toHashCode(); + } + + public int compareTo(TSentryExportMappingDataResponse other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + TSentryExportMappingDataResponse typedOther = (TSentryExportMappingDataResponse)other; + + lastComparison = Boolean.valueOf(isSetStatus()).compareTo(typedOther.isSetStatus()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetStatus()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, typedOther.status); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetMappingData()).compareTo(typedOther.isSetMappingData()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMappingData()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.mappingData, typedOther.mappingData); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TSentryExportMappingDataResponse("); + boolean first = true; + + sb.append("status:"); + if (this.status == null) { + sb.append("null"); + } else { + sb.append(this.status); + } + first = false; + if (!first) sb.append(", "); + sb.append("mappingData:"); + if (this.mappingData == null) { + sb.append("null"); + } else { + sb.append(this.mappingData); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetStatus()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); + } + + if (!isSetMappingData()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'mappingData' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + if (status != null) { + status.validate(); + } + if (mappingData != null) { + mappingData.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TSentryExportMappingDataResponseStandardSchemeFactory implements SchemeFactory { + public TSentryExportMappingDataResponseStandardScheme getScheme() { + return new TSentryExportMappingDataResponseStandardScheme(); + } + } + + private static class TSentryExportMappingDataResponseStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, TSentryExportMappingDataResponse struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // STATUS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.status = new org.apache.sentry.service.thrift.TSentryResponseStatus(); + struct.status.read(iprot); + struct.setStatusIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // MAPPING_DATA + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.mappingData = new TSentryMappingData(); + struct.mappingData.read(iprot); + struct.setMappingDataIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, TSentryExportMappingDataResponse struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.status != null) { + oprot.writeFieldBegin(STATUS_FIELD_DESC); + struct.status.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.mappingData != null) { + oprot.writeFieldBegin(MAPPING_DATA_FIELD_DESC); + struct.mappingData.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TSentryExportMappingDataResponseTupleSchemeFactory implements SchemeFactory { + public TSentryExportMappingDataResponseTupleScheme getScheme() { + return new TSentryExportMappingDataResponseTupleScheme(); + } + } + + private static class TSentryExportMappingDataResponseTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TSentryExportMappingDataResponse struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + struct.status.write(oprot); + struct.mappingData.write(oprot); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TSentryExportMappingDataResponse struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.status = new org.apache.sentry.service.thrift.TSentryResponseStatus(); + struct.status.read(iprot); + struct.setStatusIsSet(true); + struct.mappingData = new TSentryMappingData(); + struct.mappingData.read(iprot); + struct.setMappingDataIsSet(true); + } + } + +} + diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TSentryImportMappingDataRequest.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TSentryImportMappingDataRequest.java new file mode 100644 index 000000000..23ad56c33 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TSentryImportMappingDataRequest.java @@ -0,0 +1,689 @@ +/** + * Autogenerated by Thrift Compiler (0.9.0) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.sentry.provider.db.service.thrift; + +import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TSentryImportMappingDataRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TSentryImportMappingDataRequest"); + + private static final org.apache.thrift.protocol.TField PROTOCOL_VERSION_FIELD_DESC = new org.apache.thrift.protocol.TField("protocol_version", org.apache.thrift.protocol.TType.I32, (short)1); + private static final org.apache.thrift.protocol.TField REQUESTOR_USER_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("requestorUserName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField OVERWRITE_ROLE_FIELD_DESC = new org.apache.thrift.protocol.TField("overwriteRole", org.apache.thrift.protocol.TType.BOOL, (short)3); + private static final org.apache.thrift.protocol.TField MAPPING_DATA_FIELD_DESC = new org.apache.thrift.protocol.TField("mappingData", org.apache.thrift.protocol.TType.STRUCT, (short)4); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new TSentryImportMappingDataRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new TSentryImportMappingDataRequestTupleSchemeFactory()); + } + + private int protocol_version; // required + private String requestorUserName; // required + private boolean overwriteRole; // required + private TSentryMappingData mappingData; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + PROTOCOL_VERSION((short)1, "protocol_version"), + REQUESTOR_USER_NAME((short)2, "requestorUserName"), + OVERWRITE_ROLE((short)3, "overwriteRole"), + MAPPING_DATA((short)4, "mappingData"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // PROTOCOL_VERSION + return PROTOCOL_VERSION; + case 2: // REQUESTOR_USER_NAME + return REQUESTOR_USER_NAME; + case 3: // OVERWRITE_ROLE + return OVERWRITE_ROLE; + case 4: // MAPPING_DATA + return MAPPING_DATA; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __PROTOCOL_VERSION_ISSET_ID = 0; + private static final int __OVERWRITEROLE_ISSET_ID = 1; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.PROTOCOL_VERSION, new org.apache.thrift.meta_data.FieldMetaData("protocol_version", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + tmpMap.put(_Fields.REQUESTOR_USER_NAME, new org.apache.thrift.meta_data.FieldMetaData("requestorUserName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.OVERWRITE_ROLE, new org.apache.thrift.meta_data.FieldMetaData("overwriteRole", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.MAPPING_DATA, new org.apache.thrift.meta_data.FieldMetaData("mappingData", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSentryMappingData.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TSentryImportMappingDataRequest.class, metaDataMap); + } + + public TSentryImportMappingDataRequest() { + this.protocol_version = 1; + + this.overwriteRole = false; + + } + + public TSentryImportMappingDataRequest( + int protocol_version, + String requestorUserName, + boolean overwriteRole, + TSentryMappingData mappingData) + { + this(); + this.protocol_version = protocol_version; + setProtocol_versionIsSet(true); + this.requestorUserName = requestorUserName; + this.overwriteRole = overwriteRole; + setOverwriteRoleIsSet(true); + this.mappingData = mappingData; + } + + /** + * Performs a deep copy on other. + */ + public TSentryImportMappingDataRequest(TSentryImportMappingDataRequest other) { + __isset_bitfield = other.__isset_bitfield; + this.protocol_version = other.protocol_version; + if (other.isSetRequestorUserName()) { + this.requestorUserName = other.requestorUserName; + } + this.overwriteRole = other.overwriteRole; + if (other.isSetMappingData()) { + this.mappingData = new TSentryMappingData(other.mappingData); + } + } + + public TSentryImportMappingDataRequest deepCopy() { + return new TSentryImportMappingDataRequest(this); + } + + @Override + public void clear() { + this.protocol_version = 1; + + this.requestorUserName = null; + this.overwriteRole = false; + + this.mappingData = null; + } + + public int getProtocol_version() { + return this.protocol_version; + } + + public void setProtocol_version(int protocol_version) { + this.protocol_version = protocol_version; + setProtocol_versionIsSet(true); + } + + public void unsetProtocol_version() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __PROTOCOL_VERSION_ISSET_ID); + } + + /** Returns true if field protocol_version is set (has been assigned a value) and false otherwise */ + public boolean isSetProtocol_version() { + return EncodingUtils.testBit(__isset_bitfield, __PROTOCOL_VERSION_ISSET_ID); + } + + public void setProtocol_versionIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __PROTOCOL_VERSION_ISSET_ID, value); + } + + public String getRequestorUserName() { + return this.requestorUserName; + } + + public void setRequestorUserName(String requestorUserName) { + this.requestorUserName = requestorUserName; + } + + public void unsetRequestorUserName() { + this.requestorUserName = null; + } + + /** Returns true if field requestorUserName is set (has been assigned a value) and false otherwise */ + public boolean isSetRequestorUserName() { + return this.requestorUserName != null; + } + + public void setRequestorUserNameIsSet(boolean value) { + if (!value) { + this.requestorUserName = null; + } + } + + public boolean isOverwriteRole() { + return this.overwriteRole; + } + + public void setOverwriteRole(boolean overwriteRole) { + this.overwriteRole = overwriteRole; + setOverwriteRoleIsSet(true); + } + + public void unsetOverwriteRole() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __OVERWRITEROLE_ISSET_ID); + } + + /** Returns true if field overwriteRole is set (has been assigned a value) and false otherwise */ + public boolean isSetOverwriteRole() { + return EncodingUtils.testBit(__isset_bitfield, __OVERWRITEROLE_ISSET_ID); + } + + public void setOverwriteRoleIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __OVERWRITEROLE_ISSET_ID, value); + } + + public TSentryMappingData getMappingData() { + return this.mappingData; + } + + public void setMappingData(TSentryMappingData mappingData) { + this.mappingData = mappingData; + } + + public void unsetMappingData() { + this.mappingData = null; + } + + /** Returns true if field mappingData is set (has been assigned a value) and false otherwise */ + public boolean isSetMappingData() { + return this.mappingData != null; + } + + public void setMappingDataIsSet(boolean value) { + if (!value) { + this.mappingData = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case PROTOCOL_VERSION: + if (value == null) { + unsetProtocol_version(); + } else { + setProtocol_version((Integer)value); + } + break; + + case REQUESTOR_USER_NAME: + if (value == null) { + unsetRequestorUserName(); + } else { + setRequestorUserName((String)value); + } + break; + + case OVERWRITE_ROLE: + if (value == null) { + unsetOverwriteRole(); + } else { + setOverwriteRole((Boolean)value); + } + break; + + case MAPPING_DATA: + if (value == null) { + unsetMappingData(); + } else { + setMappingData((TSentryMappingData)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case PROTOCOL_VERSION: + return Integer.valueOf(getProtocol_version()); + + case REQUESTOR_USER_NAME: + return getRequestorUserName(); + + case OVERWRITE_ROLE: + return Boolean.valueOf(isOverwriteRole()); + + case MAPPING_DATA: + return getMappingData(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case PROTOCOL_VERSION: + return isSetProtocol_version(); + case REQUESTOR_USER_NAME: + return isSetRequestorUserName(); + case OVERWRITE_ROLE: + return isSetOverwriteRole(); + case MAPPING_DATA: + return isSetMappingData(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TSentryImportMappingDataRequest) + return this.equals((TSentryImportMappingDataRequest)that); + return false; + } + + public boolean equals(TSentryImportMappingDataRequest that) { + if (that == null) + return false; + + boolean this_present_protocol_version = true; + boolean that_present_protocol_version = true; + if (this_present_protocol_version || that_present_protocol_version) { + if (!(this_present_protocol_version && that_present_protocol_version)) + return false; + if (this.protocol_version != that.protocol_version) + return false; + } + + boolean this_present_requestorUserName = true && this.isSetRequestorUserName(); + boolean that_present_requestorUserName = true && that.isSetRequestorUserName(); + if (this_present_requestorUserName || that_present_requestorUserName) { + if (!(this_present_requestorUserName && that_present_requestorUserName)) + return false; + if (!this.requestorUserName.equals(that.requestorUserName)) + return false; + } + + boolean this_present_overwriteRole = true; + boolean that_present_overwriteRole = true; + if (this_present_overwriteRole || that_present_overwriteRole) { + if (!(this_present_overwriteRole && that_present_overwriteRole)) + return false; + if (this.overwriteRole != that.overwriteRole) + return false; + } + + boolean this_present_mappingData = true && this.isSetMappingData(); + boolean that_present_mappingData = true && that.isSetMappingData(); + if (this_present_mappingData || that_present_mappingData) { + if (!(this_present_mappingData && that_present_mappingData)) + return false; + if (!this.mappingData.equals(that.mappingData)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_protocol_version = true; + builder.append(present_protocol_version); + if (present_protocol_version) + builder.append(protocol_version); + + boolean present_requestorUserName = true && (isSetRequestorUserName()); + builder.append(present_requestorUserName); + if (present_requestorUserName) + builder.append(requestorUserName); + + boolean present_overwriteRole = true; + builder.append(present_overwriteRole); + if (present_overwriteRole) + builder.append(overwriteRole); + + boolean present_mappingData = true && (isSetMappingData()); + builder.append(present_mappingData); + if (present_mappingData) + builder.append(mappingData); + + return builder.toHashCode(); + } + + public int compareTo(TSentryImportMappingDataRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + TSentryImportMappingDataRequest typedOther = (TSentryImportMappingDataRequest)other; + + lastComparison = Boolean.valueOf(isSetProtocol_version()).compareTo(typedOther.isSetProtocol_version()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetProtocol_version()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.protocol_version, typedOther.protocol_version); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetRequestorUserName()).compareTo(typedOther.isSetRequestorUserName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRequestorUserName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.requestorUserName, typedOther.requestorUserName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetOverwriteRole()).compareTo(typedOther.isSetOverwriteRole()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetOverwriteRole()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.overwriteRole, typedOther.overwriteRole); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetMappingData()).compareTo(typedOther.isSetMappingData()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMappingData()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.mappingData, typedOther.mappingData); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TSentryImportMappingDataRequest("); + boolean first = true; + + sb.append("protocol_version:"); + sb.append(this.protocol_version); + first = false; + if (!first) sb.append(", "); + sb.append("requestorUserName:"); + if (this.requestorUserName == null) { + sb.append("null"); + } else { + sb.append(this.requestorUserName); + } + first = false; + if (!first) sb.append(", "); + sb.append("overwriteRole:"); + sb.append(this.overwriteRole); + first = false; + if (!first) sb.append(", "); + sb.append("mappingData:"); + if (this.mappingData == null) { + sb.append("null"); + } else { + sb.append(this.mappingData); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetProtocol_version()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'protocol_version' is unset! Struct:" + toString()); + } + + if (!isSetRequestorUserName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'requestorUserName' is unset! Struct:" + toString()); + } + + if (!isSetOverwriteRole()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'overwriteRole' is unset! Struct:" + toString()); + } + + if (!isSetMappingData()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'mappingData' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + if (mappingData != null) { + mappingData.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TSentryImportMappingDataRequestStandardSchemeFactory implements SchemeFactory { + public TSentryImportMappingDataRequestStandardScheme getScheme() { + return new TSentryImportMappingDataRequestStandardScheme(); + } + } + + private static class TSentryImportMappingDataRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, TSentryImportMappingDataRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // PROTOCOL_VERSION + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.protocol_version = iprot.readI32(); + struct.setProtocol_versionIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // REQUESTOR_USER_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.requestorUserName = iprot.readString(); + struct.setRequestorUserNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // OVERWRITE_ROLE + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.overwriteRole = iprot.readBool(); + struct.setOverwriteRoleIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // MAPPING_DATA + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.mappingData = new TSentryMappingData(); + struct.mappingData.read(iprot); + struct.setMappingDataIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, TSentryImportMappingDataRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldBegin(PROTOCOL_VERSION_FIELD_DESC); + oprot.writeI32(struct.protocol_version); + oprot.writeFieldEnd(); + if (struct.requestorUserName != null) { + oprot.writeFieldBegin(REQUESTOR_USER_NAME_FIELD_DESC); + oprot.writeString(struct.requestorUserName); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(OVERWRITE_ROLE_FIELD_DESC); + oprot.writeBool(struct.overwriteRole); + oprot.writeFieldEnd(); + if (struct.mappingData != null) { + oprot.writeFieldBegin(MAPPING_DATA_FIELD_DESC); + struct.mappingData.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TSentryImportMappingDataRequestTupleSchemeFactory implements SchemeFactory { + public TSentryImportMappingDataRequestTupleScheme getScheme() { + return new TSentryImportMappingDataRequestTupleScheme(); + } + } + + private static class TSentryImportMappingDataRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TSentryImportMappingDataRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeI32(struct.protocol_version); + oprot.writeString(struct.requestorUserName); + oprot.writeBool(struct.overwriteRole); + struct.mappingData.write(oprot); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TSentryImportMappingDataRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.protocol_version = iprot.readI32(); + struct.setProtocol_versionIsSet(true); + struct.requestorUserName = iprot.readString(); + struct.setRequestorUserNameIsSet(true); + struct.overwriteRole = iprot.readBool(); + struct.setOverwriteRoleIsSet(true); + struct.mappingData = new TSentryMappingData(); + struct.mappingData.read(iprot); + struct.setMappingDataIsSet(true); + } + } + +} + diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TSentryImportMappingDataResponse.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TSentryImportMappingDataResponse.java new file mode 100644 index 000000000..8276fcf45 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TSentryImportMappingDataResponse.java @@ -0,0 +1,390 @@ +/** + * Autogenerated by Thrift Compiler (0.9.0) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.sentry.provider.db.service.thrift; + +import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TSentryImportMappingDataResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TSentryImportMappingDataResponse"); + + private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new TSentryImportMappingDataResponseStandardSchemeFactory()); + schemes.put(TupleScheme.class, new TSentryImportMappingDataResponseTupleSchemeFactory()); + } + + private org.apache.sentry.service.thrift.TSentryResponseStatus status; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + STATUS((short)1, "status"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // STATUS + return STATUS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, org.apache.sentry.service.thrift.TSentryResponseStatus.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TSentryImportMappingDataResponse.class, metaDataMap); + } + + public TSentryImportMappingDataResponse() { + } + + public TSentryImportMappingDataResponse( + org.apache.sentry.service.thrift.TSentryResponseStatus status) + { + this(); + this.status = status; + } + + /** + * Performs a deep copy on other. + */ + public TSentryImportMappingDataResponse(TSentryImportMappingDataResponse other) { + if (other.isSetStatus()) { + this.status = new org.apache.sentry.service.thrift.TSentryResponseStatus(other.status); + } + } + + public TSentryImportMappingDataResponse deepCopy() { + return new TSentryImportMappingDataResponse(this); + } + + @Override + public void clear() { + this.status = null; + } + + public org.apache.sentry.service.thrift.TSentryResponseStatus getStatus() { + return this.status; + } + + public void setStatus(org.apache.sentry.service.thrift.TSentryResponseStatus status) { + this.status = status; + } + + public void unsetStatus() { + this.status = null; + } + + /** Returns true if field status is set (has been assigned a value) and false otherwise */ + public boolean isSetStatus() { + return this.status != null; + } + + public void setStatusIsSet(boolean value) { + if (!value) { + this.status = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case STATUS: + if (value == null) { + unsetStatus(); + } else { + setStatus((org.apache.sentry.service.thrift.TSentryResponseStatus)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case STATUS: + return getStatus(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case STATUS: + return isSetStatus(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TSentryImportMappingDataResponse) + return this.equals((TSentryImportMappingDataResponse)that); + return false; + } + + public boolean equals(TSentryImportMappingDataResponse that) { + if (that == null) + return false; + + boolean this_present_status = true && this.isSetStatus(); + boolean that_present_status = true && that.isSetStatus(); + if (this_present_status || that_present_status) { + if (!(this_present_status && that_present_status)) + return false; + if (!this.status.equals(that.status)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_status = true && (isSetStatus()); + builder.append(present_status); + if (present_status) + builder.append(status); + + return builder.toHashCode(); + } + + public int compareTo(TSentryImportMappingDataResponse other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + TSentryImportMappingDataResponse typedOther = (TSentryImportMappingDataResponse)other; + + lastComparison = Boolean.valueOf(isSetStatus()).compareTo(typedOther.isSetStatus()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetStatus()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, typedOther.status); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TSentryImportMappingDataResponse("); + boolean first = true; + + sb.append("status:"); + if (this.status == null) { + sb.append("null"); + } else { + sb.append(this.status); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetStatus()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + if (status != null) { + status.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TSentryImportMappingDataResponseStandardSchemeFactory implements SchemeFactory { + public TSentryImportMappingDataResponseStandardScheme getScheme() { + return new TSentryImportMappingDataResponseStandardScheme(); + } + } + + private static class TSentryImportMappingDataResponseStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, TSentryImportMappingDataResponse struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // STATUS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.status = new org.apache.sentry.service.thrift.TSentryResponseStatus(); + struct.status.read(iprot); + struct.setStatusIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, TSentryImportMappingDataResponse struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.status != null) { + oprot.writeFieldBegin(STATUS_FIELD_DESC); + struct.status.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TSentryImportMappingDataResponseTupleSchemeFactory implements SchemeFactory { + public TSentryImportMappingDataResponseTupleScheme getScheme() { + return new TSentryImportMappingDataResponseTupleScheme(); + } + } + + private static class TSentryImportMappingDataResponseTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TSentryImportMappingDataResponse struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + struct.status.write(oprot); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TSentryImportMappingDataResponse struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.status = new org.apache.sentry.service.thrift.TSentryResponseStatus(); + struct.status.read(iprot); + struct.setStatusIsSet(true); + } + } + +} + diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TSentryMappingData.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TSentryMappingData.java new file mode 100644 index 000000000..05d1dd422 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/provider/db/service/thrift/TSentryMappingData.java @@ -0,0 +1,695 @@ +/** + * Autogenerated by Thrift Compiler (0.9.0) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.sentry.provider.db.service.thrift; + +import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TSentryMappingData implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TSentryMappingData"); + + private static final org.apache.thrift.protocol.TField GROUP_ROLES_MAP_FIELD_DESC = new org.apache.thrift.protocol.TField("groupRolesMap", org.apache.thrift.protocol.TType.MAP, (short)1); + private static final org.apache.thrift.protocol.TField ROLE_PRIVILEGES_MAP_FIELD_DESC = new org.apache.thrift.protocol.TField("rolePrivilegesMap", org.apache.thrift.protocol.TType.MAP, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new TSentryMappingDataStandardSchemeFactory()); + schemes.put(TupleScheme.class, new TSentryMappingDataTupleSchemeFactory()); + } + + private Map> groupRolesMap; // optional + private Map> rolePrivilegesMap; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + GROUP_ROLES_MAP((short)1, "groupRolesMap"), + ROLE_PRIVILEGES_MAP((short)2, "rolePrivilegesMap"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // GROUP_ROLES_MAP + return GROUP_ROLES_MAP; + case 2: // ROLE_PRIVILEGES_MAP + return ROLE_PRIVILEGES_MAP; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private _Fields optionals[] = {_Fields.GROUP_ROLES_MAP,_Fields.ROLE_PRIVILEGES_MAP}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.GROUP_ROLES_MAP, new org.apache.thrift.meta_data.FieldMetaData("groupRolesMap", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), + new org.apache.thrift.meta_data.SetMetaData(org.apache.thrift.protocol.TType.SET, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))))); + tmpMap.put(_Fields.ROLE_PRIVILEGES_MAP, new org.apache.thrift.meta_data.FieldMetaData("rolePrivilegesMap", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), + new org.apache.thrift.meta_data.SetMetaData(org.apache.thrift.protocol.TType.SET, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSentryPrivilege.class))))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TSentryMappingData.class, metaDataMap); + } + + public TSentryMappingData() { + } + + /** + * Performs a deep copy on other. + */ + public TSentryMappingData(TSentryMappingData other) { + if (other.isSetGroupRolesMap()) { + Map> __this__groupRolesMap = new HashMap>(); + for (Map.Entry> other_element : other.groupRolesMap.entrySet()) { + + String other_element_key = other_element.getKey(); + Set other_element_value = other_element.getValue(); + + String __this__groupRolesMap_copy_key = other_element_key; + + Set __this__groupRolesMap_copy_value = new HashSet(); + for (String other_element_value_element : other_element_value) { + __this__groupRolesMap_copy_value.add(other_element_value_element); + } + + __this__groupRolesMap.put(__this__groupRolesMap_copy_key, __this__groupRolesMap_copy_value); + } + this.groupRolesMap = __this__groupRolesMap; + } + if (other.isSetRolePrivilegesMap()) { + Map> __this__rolePrivilegesMap = new HashMap>(); + for (Map.Entry> other_element : other.rolePrivilegesMap.entrySet()) { + + String other_element_key = other_element.getKey(); + Set other_element_value = other_element.getValue(); + + String __this__rolePrivilegesMap_copy_key = other_element_key; + + Set __this__rolePrivilegesMap_copy_value = new HashSet(); + for (TSentryPrivilege other_element_value_element : other_element_value) { + __this__rolePrivilegesMap_copy_value.add(new TSentryPrivilege(other_element_value_element)); + } + + __this__rolePrivilegesMap.put(__this__rolePrivilegesMap_copy_key, __this__rolePrivilegesMap_copy_value); + } + this.rolePrivilegesMap = __this__rolePrivilegesMap; + } + } + + public TSentryMappingData deepCopy() { + return new TSentryMappingData(this); + } + + @Override + public void clear() { + this.groupRolesMap = null; + this.rolePrivilegesMap = null; + } + + public int getGroupRolesMapSize() { + return (this.groupRolesMap == null) ? 0 : this.groupRolesMap.size(); + } + + public void putToGroupRolesMap(String key, Set val) { + if (this.groupRolesMap == null) { + this.groupRolesMap = new HashMap>(); + } + this.groupRolesMap.put(key, val); + } + + public Map> getGroupRolesMap() { + return this.groupRolesMap; + } + + public void setGroupRolesMap(Map> groupRolesMap) { + this.groupRolesMap = groupRolesMap; + } + + public void unsetGroupRolesMap() { + this.groupRolesMap = null; + } + + /** Returns true if field groupRolesMap is set (has been assigned a value) and false otherwise */ + public boolean isSetGroupRolesMap() { + return this.groupRolesMap != null; + } + + public void setGroupRolesMapIsSet(boolean value) { + if (!value) { + this.groupRolesMap = null; + } + } + + public int getRolePrivilegesMapSize() { + return (this.rolePrivilegesMap == null) ? 0 : this.rolePrivilegesMap.size(); + } + + public void putToRolePrivilegesMap(String key, Set val) { + if (this.rolePrivilegesMap == null) { + this.rolePrivilegesMap = new HashMap>(); + } + this.rolePrivilegesMap.put(key, val); + } + + public Map> getRolePrivilegesMap() { + return this.rolePrivilegesMap; + } + + public void setRolePrivilegesMap(Map> rolePrivilegesMap) { + this.rolePrivilegesMap = rolePrivilegesMap; + } + + public void unsetRolePrivilegesMap() { + this.rolePrivilegesMap = null; + } + + /** Returns true if field rolePrivilegesMap is set (has been assigned a value) and false otherwise */ + public boolean isSetRolePrivilegesMap() { + return this.rolePrivilegesMap != null; + } + + public void setRolePrivilegesMapIsSet(boolean value) { + if (!value) { + this.rolePrivilegesMap = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case GROUP_ROLES_MAP: + if (value == null) { + unsetGroupRolesMap(); + } else { + setGroupRolesMap((Map>)value); + } + break; + + case ROLE_PRIVILEGES_MAP: + if (value == null) { + unsetRolePrivilegesMap(); + } else { + setRolePrivilegesMap((Map>)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case GROUP_ROLES_MAP: + return getGroupRolesMap(); + + case ROLE_PRIVILEGES_MAP: + return getRolePrivilegesMap(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case GROUP_ROLES_MAP: + return isSetGroupRolesMap(); + case ROLE_PRIVILEGES_MAP: + return isSetRolePrivilegesMap(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TSentryMappingData) + return this.equals((TSentryMappingData)that); + return false; + } + + public boolean equals(TSentryMappingData that) { + if (that == null) + return false; + + boolean this_present_groupRolesMap = true && this.isSetGroupRolesMap(); + boolean that_present_groupRolesMap = true && that.isSetGroupRolesMap(); + if (this_present_groupRolesMap || that_present_groupRolesMap) { + if (!(this_present_groupRolesMap && that_present_groupRolesMap)) + return false; + if (!this.groupRolesMap.equals(that.groupRolesMap)) + return false; + } + + boolean this_present_rolePrivilegesMap = true && this.isSetRolePrivilegesMap(); + boolean that_present_rolePrivilegesMap = true && that.isSetRolePrivilegesMap(); + if (this_present_rolePrivilegesMap || that_present_rolePrivilegesMap) { + if (!(this_present_rolePrivilegesMap && that_present_rolePrivilegesMap)) + return false; + if (!this.rolePrivilegesMap.equals(that.rolePrivilegesMap)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_groupRolesMap = true && (isSetGroupRolesMap()); + builder.append(present_groupRolesMap); + if (present_groupRolesMap) + builder.append(groupRolesMap); + + boolean present_rolePrivilegesMap = true && (isSetRolePrivilegesMap()); + builder.append(present_rolePrivilegesMap); + if (present_rolePrivilegesMap) + builder.append(rolePrivilegesMap); + + return builder.toHashCode(); + } + + public int compareTo(TSentryMappingData other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + TSentryMappingData typedOther = (TSentryMappingData)other; + + lastComparison = Boolean.valueOf(isSetGroupRolesMap()).compareTo(typedOther.isSetGroupRolesMap()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetGroupRolesMap()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.groupRolesMap, typedOther.groupRolesMap); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetRolePrivilegesMap()).compareTo(typedOther.isSetRolePrivilegesMap()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRolePrivilegesMap()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rolePrivilegesMap, typedOther.rolePrivilegesMap); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TSentryMappingData("); + boolean first = true; + + if (isSetGroupRolesMap()) { + sb.append("groupRolesMap:"); + if (this.groupRolesMap == null) { + sb.append("null"); + } else { + sb.append(this.groupRolesMap); + } + first = false; + } + if (isSetRolePrivilegesMap()) { + if (!first) sb.append(", "); + sb.append("rolePrivilegesMap:"); + if (this.rolePrivilegesMap == null) { + sb.append("null"); + } else { + sb.append(this.rolePrivilegesMap); + } + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TSentryMappingDataStandardSchemeFactory implements SchemeFactory { + public TSentryMappingDataStandardScheme getScheme() { + return new TSentryMappingDataStandardScheme(); + } + } + + private static class TSentryMappingDataStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, TSentryMappingData struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // GROUP_ROLES_MAP + if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { + { + org.apache.thrift.protocol.TMap _map132 = iprot.readMapBegin(); + struct.groupRolesMap = new HashMap>(2*_map132.size); + for (int _i133 = 0; _i133 < _map132.size; ++_i133) + { + String _key134; // required + Set _val135; // required + _key134 = iprot.readString(); + { + org.apache.thrift.protocol.TSet _set136 = iprot.readSetBegin(); + _val135 = new HashSet(2*_set136.size); + for (int _i137 = 0; _i137 < _set136.size; ++_i137) + { + String _elem138; // required + _elem138 = iprot.readString(); + _val135.add(_elem138); + } + iprot.readSetEnd(); + } + struct.groupRolesMap.put(_key134, _val135); + } + iprot.readMapEnd(); + } + struct.setGroupRolesMapIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // ROLE_PRIVILEGES_MAP + if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { + { + org.apache.thrift.protocol.TMap _map139 = iprot.readMapBegin(); + struct.rolePrivilegesMap = new HashMap>(2*_map139.size); + for (int _i140 = 0; _i140 < _map139.size; ++_i140) + { + String _key141; // required + Set _val142; // required + _key141 = iprot.readString(); + { + org.apache.thrift.protocol.TSet _set143 = iprot.readSetBegin(); + _val142 = new HashSet(2*_set143.size); + for (int _i144 = 0; _i144 < _set143.size; ++_i144) + { + TSentryPrivilege _elem145; // required + _elem145 = new TSentryPrivilege(); + _elem145.read(iprot); + _val142.add(_elem145); + } + iprot.readSetEnd(); + } + struct.rolePrivilegesMap.put(_key141, _val142); + } + iprot.readMapEnd(); + } + struct.setRolePrivilegesMapIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, TSentryMappingData struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.groupRolesMap != null) { + if (struct.isSetGroupRolesMap()) { + oprot.writeFieldBegin(GROUP_ROLES_MAP_FIELD_DESC); + { + oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.SET, struct.groupRolesMap.size())); + for (Map.Entry> _iter146 : struct.groupRolesMap.entrySet()) + { + oprot.writeString(_iter146.getKey()); + { + oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, _iter146.getValue().size())); + for (String _iter147 : _iter146.getValue()) + { + oprot.writeString(_iter147); + } + oprot.writeSetEnd(); + } + } + oprot.writeMapEnd(); + } + oprot.writeFieldEnd(); + } + } + if (struct.rolePrivilegesMap != null) { + if (struct.isSetRolePrivilegesMap()) { + oprot.writeFieldBegin(ROLE_PRIVILEGES_MAP_FIELD_DESC); + { + oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.SET, struct.rolePrivilegesMap.size())); + for (Map.Entry> _iter148 : struct.rolePrivilegesMap.entrySet()) + { + oprot.writeString(_iter148.getKey()); + { + oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRUCT, _iter148.getValue().size())); + for (TSentryPrivilege _iter149 : _iter148.getValue()) + { + _iter149.write(oprot); + } + oprot.writeSetEnd(); + } + } + oprot.writeMapEnd(); + } + oprot.writeFieldEnd(); + } + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TSentryMappingDataTupleSchemeFactory implements SchemeFactory { + public TSentryMappingDataTupleScheme getScheme() { + return new TSentryMappingDataTupleScheme(); + } + } + + private static class TSentryMappingDataTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TSentryMappingData struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetGroupRolesMap()) { + optionals.set(0); + } + if (struct.isSetRolePrivilegesMap()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetGroupRolesMap()) { + { + oprot.writeI32(struct.groupRolesMap.size()); + for (Map.Entry> _iter150 : struct.groupRolesMap.entrySet()) + { + oprot.writeString(_iter150.getKey()); + { + oprot.writeI32(_iter150.getValue().size()); + for (String _iter151 : _iter150.getValue()) + { + oprot.writeString(_iter151); + } + } + } + } + } + if (struct.isSetRolePrivilegesMap()) { + { + oprot.writeI32(struct.rolePrivilegesMap.size()); + for (Map.Entry> _iter152 : struct.rolePrivilegesMap.entrySet()) + { + oprot.writeString(_iter152.getKey()); + { + oprot.writeI32(_iter152.getValue().size()); + for (TSentryPrivilege _iter153 : _iter152.getValue()) + { + _iter153.write(oprot); + } + } + } + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TSentryMappingData struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + { + org.apache.thrift.protocol.TMap _map154 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.SET, iprot.readI32()); + struct.groupRolesMap = new HashMap>(2*_map154.size); + for (int _i155 = 0; _i155 < _map154.size; ++_i155) + { + String _key156; // required + Set _val157; // required + _key156 = iprot.readString(); + { + org.apache.thrift.protocol.TSet _set158 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + _val157 = new HashSet(2*_set158.size); + for (int _i159 = 0; _i159 < _set158.size; ++_i159) + { + String _elem160; // required + _elem160 = iprot.readString(); + _val157.add(_elem160); + } + } + struct.groupRolesMap.put(_key156, _val157); + } + } + struct.setGroupRolesMapIsSet(true); + } + if (incoming.get(1)) { + { + org.apache.thrift.protocol.TMap _map161 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.SET, iprot.readI32()); + struct.rolePrivilegesMap = new HashMap>(2*_map161.size); + for (int _i162 = 0; _i162 < _map161.size; ++_i162) + { + String _key163; // required + Set _val164; // required + _key163 = iprot.readString(); + { + org.apache.thrift.protocol.TSet _set165 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + _val164 = new HashSet(2*_set165.size); + for (int _i166 = 0; _i166 < _set165.size; ++_i166) + { + TSentryPrivilege _elem167; // required + _elem167 = new TSentryPrivilege(); + _elem167.read(iprot); + _val164.add(_elem167); + } + } + struct.rolePrivilegesMap.put(_key163, _val164); + } + } + struct.setRolePrivilegesMapIsSet(true); + } + } + } + +} + diff --git a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/service/thrift/sentry_common_serviceConstants.java b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/service/thrift/sentry_common_serviceConstants.java index 6c3d17159..ff2ddb77b 100644 --- a/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/service/thrift/sentry_common_serviceConstants.java +++ b/sentry-provider/sentry-provider-db/src/gen/thrift/gen-javabean/org/apache/sentry/service/thrift/sentry_common_serviceConstants.java @@ -35,7 +35,7 @@ public class sentry_common_serviceConstants { public static final int TSENTRY_SERVICE_V1 = 1; - public static final int TSENTRY_SERVICE_V2 = 1; + public static final int TSENTRY_SERVICE_V2 = 2; public static final int TSENTRY_STATUS_OK = 0; @@ -49,4 +49,6 @@ public class sentry_common_serviceConstants { public static final int TSENTRY_STATUS_ACCESS_DENIED = 5; + public static final int TSENTRY_STATUS_THRIFT_VERSION_MISMATCH = 6; + } diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/SentryPolicyStorePlugin.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/SentryPolicyStorePlugin.java index 998a48bf2..fe1ea1f1f 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/SentryPolicyStorePlugin.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/SentryPolicyStorePlugin.java @@ -32,7 +32,7 @@ public interface SentryPolicyStorePlugin { @SuppressWarnings("serial") - public static class SentryPluginException extends SentryUserException { + class SentryPluginException extends SentryUserException { public SentryPluginException(String msg) { super(msg); } @@ -41,20 +41,20 @@ public SentryPluginException(String msg, Throwable t) { } } - public void initialize(Configuration conf, SentryStore sentryStore) throws SentryPluginException; + void initialize(Configuration conf, SentryStore sentryStore) throws SentryPluginException; - public void onAlterSentryRoleAddGroups(TAlterSentryRoleAddGroupsRequest tRequest) throws SentryPluginException; + void onAlterSentryRoleAddGroups(TAlterSentryRoleAddGroupsRequest tRequest) throws SentryPluginException; - public void onAlterSentryRoleDeleteGroups(TAlterSentryRoleDeleteGroupsRequest tRequest) throws SentryPluginException; + void onAlterSentryRoleDeleteGroups(TAlterSentryRoleDeleteGroupsRequest tRequest) throws SentryPluginException; - public void onAlterSentryRoleGrantPrivilege(TAlterSentryRoleGrantPrivilegeRequest tRequest) throws SentryPluginException; + void onAlterSentryRoleGrantPrivilege(TAlterSentryRoleGrantPrivilegeRequest tRequest) throws SentryPluginException; - public void onAlterSentryRoleRevokePrivilege(TAlterSentryRoleRevokePrivilegeRequest tRequest) throws SentryPluginException; + void onAlterSentryRoleRevokePrivilege(TAlterSentryRoleRevokePrivilegeRequest tRequest) throws SentryPluginException; - public void onDropSentryRole(TDropSentryRoleRequest tRequest) throws SentryPluginException; + void onDropSentryRole(TDropSentryRoleRequest tRequest) throws SentryPluginException; - public void onRenameSentryPrivilege(TRenamePrivilegesRequest request) throws SentryPluginException; + void onRenameSentryPrivilege(TRenamePrivilegesRequest request) throws SentryPluginException; - public void onDropSentryPrivilege(TDropPrivilegesRequest request) throws SentryPluginException; + void onDropSentryPrivilege(TDropPrivilegesRequest request) throws SentryPluginException; } diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/SentryThriftAPIMismatchException.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/SentryThriftAPIMismatchException.java new file mode 100644 index 000000000..104616004 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/SentryThriftAPIMismatchException.java @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.db; + +import org.apache.sentry.SentryUserException; + +public class SentryThriftAPIMismatchException extends SentryUserException { + private static final long serialVersionUID = 7535410604425511738L; + public SentryThriftAPIMismatchException(String msg) { + super(msg); + } + public SentryThriftAPIMismatchException(String msg, String reason) { + super(msg, reason); + } +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/SimpleDBProviderBackend.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/SimpleDBProviderBackend.java index ea8eb795f..b99609506 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/SimpleDBProviderBackend.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/SimpleDBProviderBackend.java @@ -16,11 +16,9 @@ */ package org.apache.sentry.provider.db; -import java.io.IOException; import java.util.Set; import org.apache.hadoop.conf.Configuration; -import org.apache.sentry.SentryUserException; import org.apache.sentry.core.common.ActiveRoleSet; import org.apache.sentry.core.common.Authorizable; import org.apache.sentry.core.common.SentryConfigurationException; @@ -28,10 +26,10 @@ import org.apache.sentry.provider.common.ProviderBackendContext; import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient; import org.apache.sentry.service.thrift.SentryServiceClientFactory; +import org.apache.sentry.service.thrift.ServiceConstants; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableSet; public class SimpleDBProviderBackend implements ProviderBackend { @@ -39,37 +37,26 @@ public class SimpleDBProviderBackend implements ProviderBackend { private static final Logger LOGGER = LoggerFactory .getLogger(SimpleDBProviderBackend.class); - private SentryPolicyServiceClient policyServiceClient; + private Configuration conf; + private int retryCount; + private int retryIntervalSec; - private volatile boolean initialized; - private Configuration conf; - - public SimpleDBProviderBackend(Configuration conf, String resourcePath) throws Exception { + public SimpleDBProviderBackend(Configuration conf, String resourcePath) throws Exception { //NOPMD // DB Provider doesn't use policy file path this(conf); } public SimpleDBProviderBackend(Configuration conf) throws Exception { - this(SentryServiceClientFactory.create(conf)); - this.initialized = false; this.conf = conf; + this.retryCount = conf.getInt(ServiceConstants.ClientConfig.RETRY_COUNT_CONF, ServiceConstants.ClientConfig.RETRY_COUNT_DEFAULT); + this.retryIntervalSec = conf.getInt(ServiceConstants.ClientConfig.RETRY_INTERVAL_SEC_CONF, ServiceConstants.ClientConfig.RETRY_INTERVAL_SEC_DEFAULT); } - - @VisibleForTesting - public SimpleDBProviderBackend(SentryPolicyServiceClient policyServiceClient) throws IOException { - this.initialized = false; - this.policyServiceClient = policyServiceClient; - } - /** * {@inheritDoc} */ @Override public void initialize(ProviderBackendContext context) { - if (initialized) { - throw new IllegalStateException("Backend has already been initialized, cannot be initialized twice"); - } - this.initialized = true; + //Noop } /** @@ -77,29 +64,35 @@ public void initialize(ProviderBackendContext context) { */ @Override public ImmutableSet getPrivileges(Set groups, ActiveRoleSet roleSet, Authorizable... authorizableHierarchy) { - return getPrivileges(1, groups, roleSet, authorizableHierarchy); - } - - private ImmutableSet getPrivileges(int retryCount, Set groups, ActiveRoleSet roleSet, Authorizable... authorizableHierarchy) { - if (!initialized) { - throw new IllegalStateException("Backend has not been properly initialized"); - } - try { - return ImmutableSet.copyOf(getSentryClient().listPrivilegesForProvider(groups, roleSet, authorizableHierarchy)); - } catch (Exception e) { - policyServiceClient = null; - if (retryCount > 0) { - return getPrivileges(retryCount - 1, groups, roleSet, authorizableHierarchy); - } else { - String msg = "Unable to obtain privileges from server: " + e.getMessage(); - LOGGER.error(msg, e); - try { + int retries = Math.max(retryCount + 1, 1); // if customer configs retryCount as Integer.MAX_VALUE, try only once + while (retries > 0) { + retries--; + SentryPolicyServiceClient policyServiceClient = null; + try { + policyServiceClient = SentryServiceClientFactory.create(conf); + return ImmutableSet.copyOf(policyServiceClient.listPrivilegesForProvider(groups, roleSet, authorizableHierarchy)); + } catch (Exception e) { + //TODO: differentiate transient errors and permanent errors + String msg = "Unable to obtain privileges from server: " + e.getMessage() + "."; + if (retries > 0) { + LOGGER.warn(msg + " Will retry for " + retries + " time(s)"); + } else { + LOGGER.error(msg, e); + } + if (retries > 0) { + try { + Thread.sleep(retryIntervalSec * 1000); + } catch (InterruptedException e1) { + LOGGER.info("Sleeping is interrupted.", e1); + } + } + } finally { + if(policyServiceClient != null) { policyServiceClient.close(); - } catch (Exception ex2) { - // Ignore } } } + return ImmutableSet.of(); } @@ -113,32 +106,15 @@ public ImmutableSet getRoles(Set groups, ActiveRoleSet roleSet) @Override public void close() { - if (policyServiceClient != null) { - policyServiceClient.close(); - } + //Noop } - private SentryPolicyServiceClient getSentryClient() { - if (policyServiceClient == null) { - try { - policyServiceClient = SentryServiceClientFactory.create(conf); - } catch (Exception e) { - LOGGER.error("Error connecting to Sentry ['{}'] !!", - e.getMessage()); - policyServiceClient = null; - return null; - } - } - return policyServiceClient; - } /** * SimpleDBProviderBackend does not implement validatePolicy() */ @Override public void validatePolicy(boolean strictValidation) throws SentryConfigurationException { - if (!initialized) { - throw new IllegalStateException("Backend has not been properly initialized"); - } - // db provider does not implement validation + //Noop } } + diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SearchProviderBackend.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/SentryGenericProviderBackend.java similarity index 59% rename from sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SearchProviderBackend.java rename to sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/SentryGenericProviderBackend.java index ae324bfa2..474d05c74 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SearchProviderBackend.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/SentryGenericProviderBackend.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.sentry.provider.db.generic.service.thrift; +package org.apache.sentry.provider.db.generic; import java.util.Arrays; import java.util.Set; @@ -26,9 +26,11 @@ import org.apache.sentry.core.common.ActiveRoleSet; import org.apache.sentry.core.common.Authorizable; import org.apache.sentry.core.common.SentryConfigurationException; -import org.apache.sentry.core.common.Subject; import org.apache.sentry.provider.common.ProviderBackend; import org.apache.sentry.provider.common.ProviderBackendContext; +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceClient; +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceClientFactory; +import org.apache.sentry.provider.db.generic.service.thrift.TSentryRole; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -36,44 +38,49 @@ import com.google.common.collect.Sets; /** - * when Solr integration with Database store, this backend will communicate with Sentry service to get - * privileges according to the requested groups - * + * This class used when any component such as Hive, Solr or Sqoop want to integration with the Sentry service */ -public class SearchProviderBackend implements ProviderBackend { - private static final Logger LOGGER = LoggerFactory.getLogger(SearchProviderBackend.class); +public class SentryGenericProviderBackend implements ProviderBackend { + private static final Logger LOGGER = LoggerFactory.getLogger(SentryGenericProviderBackend.class); private final Configuration conf; - private final Subject subject; private volatile boolean initialized = false; + private String componentType; + private String serviceName; - public SearchProviderBackend(Configuration conf, String resourcePath) throws Exception { + // ProviderBackend should have the same construct to support the reflect in authBinding, + // eg:SqoopAuthBinding + public SentryGenericProviderBackend(Configuration conf, String resource) //NOPMD + throws Exception { this.conf = conf; - /** - * Who create the searchProviderBackend, this subject will been used the requester to communicate - * with Sentry Service - */ - subject = new Subject(UserGroupInformation.getCurrentUser() - .getShortUserName()); } @Override public void initialize(ProviderBackendContext context) { if (initialized) { - throw new IllegalStateException("SearchProviderBackend has already been initialized, cannot be initialized twice"); + throw new IllegalStateException("SentryGenericProviderBackend has already been initialized, cannot be initialized twice"); } this.initialized = true; } + /** + * The Sentry-296(generate client for connection pooling) has already finished development and reviewed by now. When it + * was committed to master, the getClient method was needed to refactor using the connection pool + */ + private SentryGenericServiceClient getClient() throws Exception { + return SentryGenericServiceClientFactory.create(conf); + } + @Override public ImmutableSet getPrivileges(Set groups, ActiveRoleSet roleSet, Authorizable... authorizableHierarchy) { if (!initialized) { - throw new IllegalStateException("SearchProviderBackend has not been properly initialized"); + throw new IllegalStateException("SentryGenericProviderBackend has not been properly initialized"); } - SearchPolicyServiceClient client = null; + SentryGenericServiceClient client = null; try { client = getClient(); - return ImmutableSet.copyOf(client.listPrivilegesForProvider(roleSet, groups, Arrays.asList(authorizableHierarchy))); + return ImmutableSet.copyOf(client.listPrivilegesForProvider(componentType, serviceName, + roleSet, groups, Arrays.asList(authorizableHierarchy))); } catch (SentryUserException e) { String msg = "Unable to obtain privileges from server: " + e.getMessage(); LOGGER.error(msg, e); @@ -91,15 +98,16 @@ public ImmutableSet getPrivileges(Set groups, @Override public ImmutableSet getRoles(Set groups, ActiveRoleSet roleSet) { if (!initialized) { - throw new IllegalStateException("SearchProviderBackend has not been properly initialized"); + throw new IllegalStateException("SentryGenericProviderBackend has not been properly initialized"); } - SearchPolicyServiceClient client = null; + SentryGenericServiceClient client = null; try { Set tRoles = Sets.newHashSet(); client = getClient(); //get the roles according to group + String requestor = UserGroupInformation.getCurrentUser().getShortUserName(); for (String group : groups) { - tRoles.addAll(client.listRolesByGroupName(subject.getName(), group)); + tRoles.addAll(client.listRolesByGroupName(requestor, group, getComponentType())); } Set roles = Sets.newHashSet(); for (TSentryRole tRole : tRoles) { @@ -120,22 +128,35 @@ public ImmutableSet getRoles(Set groups, ActiveRoleSet roleSet) return ImmutableSet.of(); } - public SearchPolicyServiceClient getClient() throws Exception { - return new SearchPolicyServiceClient(conf); - } - /** - * SearchProviderBackend does nothing in the validatePolicy() + * SentryGenericProviderBackend does nothing in the validatePolicy() */ @Override public void validatePolicy(boolean strictValidation) throws SentryConfigurationException { if (!initialized) { - throw new IllegalStateException("Backend has not been properly initialized"); + throw new IllegalStateException("SentryGenericProviderBackend has not been properly initialized"); } } @Override public void close() { } -} \ No newline at end of file + + public void setComponentType(String componentType) { + this.componentType = componentType; + } + + public String getComponentType() { + return componentType; + } + + public String getServiceName() { + return serviceName; + } + + public void setServiceName(String serviceName) { + this.serviceName = serviceName; + } + +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/persistent/DelegateSentryStore.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/persistent/DelegateSentryStore.java index 6061ef2a6..d51b3baf5 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/persistent/DelegateSentryStore.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/persistent/DelegateSentryStore.java @@ -17,6 +17,7 @@ */ package org.apache.sentry.provider.db.generic.service.persistent; +import java.io.IOException; import java.util.Arrays; import java.util.HashSet; import java.util.LinkedList; @@ -39,6 +40,7 @@ import org.apache.sentry.provider.db.service.model.MSentryRole; import org.apache.sentry.provider.db.service.persistent.CommitContext; import org.apache.sentry.provider.db.service.persistent.SentryStore; +import org.apache.sentry.provider.db.service.thrift.SentryConfigurationException; import org.apache.sentry.provider.db.service.thrift.SentryPolicyStoreProcessor; import org.apache.sentry.provider.db.service.thrift.TSentryGroup; import org.apache.sentry.provider.db.service.thrift.TSentryRole; @@ -65,14 +67,14 @@ public class DelegateSentryStore implements SentryStoreLayer { private PrivilegeOperatePersistence privilegeOperator; public DelegateSentryStore(Configuration conf) throws SentryNoSuchObjectException, - SentryAccessDeniedException { - this.privilegeOperator = new PrivilegeOperatePersistence(); + SentryAccessDeniedException, SentryConfigurationException, IOException { + this.privilegeOperator = new PrivilegeOperatePersistence(conf); // The generic model doesn't turn on the thread that cleans hive privileges conf.set(ServerConfig.SENTRY_STORE_ORPHANED_PRIVILEGE_REMOVAL,"false"); this.conf = conf; //delegated old sentryStore this.delegate = new SentryStore(conf); - adminGroups = ImmutableSet.copyOf(toTrimedLower(Sets.newHashSet(conf.getStrings( + adminGroups = ImmutableSet.copyOf(toTrimmed(Sets.newHashSet(conf.getStrings( ServerConfig.ADMIN_GROUPS, new String[]{})))); } @@ -111,7 +113,7 @@ public CommitContext dropRole(String component, String role, String requestor) throws SentryNoSuchObjectException { boolean rollbackTransaction = true; PersistenceManager pm = null; - role = toTrimedLower(role); + role = toTrimmedLower(role); try { pm = openTransaction(); Query query = pm.newQuery(MSentryRole.class); @@ -120,7 +122,7 @@ public CommitContext dropRole(String component, String role, String requestor) query.setUnique(true); MSentryRole sentryRole = (MSentryRole) query.execute(role); if (sentryRole == null) { - throw new SentryNoSuchObjectException("Role " + role); + throw new SentryNoSuchObjectException("Role: " + role + " doesn't exist"); } else { pm.retrieve(sentryRole); sentryRole.removeGMPrivileges(); @@ -137,6 +139,11 @@ public CommitContext dropRole(String component, String role, String requestor) } } + @Override + public Set getAllRoleNames() { + return delegate.getAllRoleNames(); + } + @Override public CommitContext alterRoleAddGroups(String component, String role, Set groups, String requestor) throws SentryNoSuchObjectException { @@ -154,14 +161,14 @@ public CommitContext alterRoleDeleteGroups(String component, String role, public CommitContext alterRoleGrantPrivilege(String component, String role, PrivilegeObject privilege, String grantorPrincipal) throws SentryUserException { - role = toTrimedLower(role); + role = toTrimmedLower(role); PersistenceManager pm = null; boolean rollbackTransaction = true; try{ pm = openTransaction(); MSentryRole mRole = getRole(role, pm); if (mRole == null) { - throw new SentryNoSuchObjectException("role:" + role + " isn't exist"); + throw new SentryNoSuchObjectException("Role: " + role + " doesn't exist"); } /** * check with grant option @@ -185,14 +192,14 @@ public CommitContext alterRoleGrantPrivilege(String component, String role, public CommitContext alterRoleRevokePrivilege(String component, String role, PrivilegeObject privilege, String grantorPrincipal) throws SentryUserException { - role = toTrimedLower(role); + role = toTrimmedLower(role); PersistenceManager pm = null; boolean rollbackTransaction = true; try{ pm = openTransaction(); MSentryRole mRole = getRole(role, pm); if (mRole == null) { - throw new SentryNoSuchObjectException("role:" + role + " isn't exist"); + throw new SentryNoSuchObjectException("Role: " + role + " doesn't exist"); } /** * check with grant option @@ -234,7 +241,7 @@ public CommitContext renamePrivilege(String component, String service, try { pm = openTransaction(); - privilegeOperator.renamePrivilege(toTrimedLower(component), toTrimedLower(service), + privilegeOperator.renamePrivilege(toTrimmedLower(component), toTrimmedLower(service), oldAuthorizables, newAuthorizables, requestor, pm); CommitContext commitContext = commitUpdateTransaction(pm); @@ -289,7 +296,7 @@ private void grantOptionCheck(PrivilegeObject requestPrivilege, String grantorPr + " has no grant!"); } //admin group check - if (!Sets.intersection(adminGroups, toTrimedLower(groups)).isEmpty()) { + if (!Sets.intersection(adminGroups, toTrimmed(groups)).isEmpty()) { return; } //privilege grant option check @@ -316,9 +323,11 @@ public Set getRolesByGroups(String component, Set groups) @Override public Set getGroupsByRoles(String component, Set roles) throws SentryUserException { - roles = toTrimedLower(roles); + roles = toTrimmedLower(roles); Set groupNames = Sets.newHashSet(); - if (roles.size() == 0) return groupNames; + if (roles.size() == 0) { + return groupNames; + } PersistenceManager pm = null; try{ @@ -343,7 +352,9 @@ public Set getGroupsByRoles(String component, Set roles) } return groupNames; } finally { - commitTransaction(pm); + if (pm != null) { + commitTransaction(pm); + } } } @@ -352,21 +363,25 @@ public Set getPrivilegesByRole(String component, Set roles) throws SentryUserException { Preconditions.checkNotNull(roles); Set privileges = Sets.newHashSet(); - if (roles.isEmpty()) return privileges; + if (roles.isEmpty()) { + return privileges; + } PersistenceManager pm = null; try { pm = openTransaction(); Set mRoles = Sets.newHashSet(); for (String role : roles) { - MSentryRole mRole = getRole(toTrimedLower(role), pm); + MSentryRole mRole = getRole(toTrimmedLower(role), pm); if (mRole != null) { mRoles.add(mRole); } } privileges.addAll(privilegeOperator.getPrivilegesByRole(mRoles, pm)); } finally { - commitTransaction(pm); + if (pm != null) { + commitTransaction(pm); + } } return privileges; } @@ -378,15 +393,15 @@ public Set getPrivilegesByProvider(String component, Preconditions.checkNotNull(component); Preconditions.checkNotNull(service); - component = toTrimedLower(component); - service = toTrimedLower(service); + component = toTrimmedLower(component); + service = toTrimmedLower(service); Set privileges = Sets.newHashSet(); PersistenceManager pm = null; try { pm = openTransaction(); //CaseInsensitive roleNames - roles = toTrimedLower(roles); + roles = toTrimmedLower(roles); if (groups != null) { roles.addAll(delegate.getRoleNamesForGroups(groups)); @@ -406,25 +421,62 @@ public Set getPrivilegesByProvider(String component, //get the privileges privileges.addAll(privilegeOperator.getPrivilegesByProvider(component, service, mRoles, authorizables, pm)); } finally { - commitTransaction(pm); + if (pm != null) { + commitTransaction(pm); + } } return privileges; } @Override + public Set getPrivilegesByAuthorizable(String component, String service, + Set validActiveRoles, List authorizables) + throws SentryUserException { + + Preconditions.checkNotNull(component); + Preconditions.checkNotNull(service); + + component = toTrimmedLower(component); + service = toTrimmedLower(service); + + Set privileges = Sets.newHashSet(); + PersistenceManager pm = null; + try { + pm = openTransaction(); + + if (validActiveRoles == null || validActiveRoles.size() == 0) { + return privileges; + } + + Set mRoles = Sets.newHashSet(); + for (String role : validActiveRoles) { + MSentryRole mRole = getRole(role, pm); + if (mRole != null) { + mRoles.add(mRole); + } + } + //get the privileges + privileges.addAll(privilegeOperator.getPrivilegesByAuthorizable(component, service, mRoles, authorizables, pm)); + } finally { + commitTransaction(pm); + } + return privileges; + } + + @Override public void close() { delegate.stop(); } private Set toTSentryGroups(Set groups) { Set tSentryGroups = Sets.newHashSet(); - for (String group : toTrimedLower(groups)) { + for (String group : groups) { tSentryGroups.add(new TSentryGroup(group)); } return tSentryGroups; } - private Set toTrimedLower(Set s) { + private Set toTrimmedLower(Set s) { if (s == null) { return new HashSet(); } @@ -435,7 +487,18 @@ private Set toTrimedLower(Set s) { return result; } - private String toTrimedLower(String s) { + private Set toTrimmed(Set s) { + if (s == null) { + return new HashSet(); + } + Set result = Sets.newHashSet(); + for (String v : s) { + result.add(v.trim()); + } + return result; + } + + private String toTrimmedLower(String s) { if (s == null) { return ""; } diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/persistent/PrivilegeObject.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/persistent/PrivilegeObject.java index aa5620703..3c00d23fe 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/persistent/PrivilegeObject.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/persistent/PrivilegeObject.java @@ -17,10 +17,9 @@ */ package org.apache.sentry.provider.db.generic.service.persistent; -import static org.apache.sentry.provider.common.ProviderConstants.KV_JOINER; -import static org.apache.sentry.provider.common.ProviderConstants.AUTHORIZABLE_JOINER; +import static org.apache.sentry.policy.common.PolicyConstants.KV_JOINER; +import static org.apache.sentry.policy.common.PolicyConstants.AUTHORIZABLE_JOINER; -import java.util.ArrayList; import java.util.List; import org.apache.sentry.core.common.Authorizable; import com.google.common.base.Preconditions; @@ -91,33 +90,44 @@ public int hashCode() { @Override public boolean equals(Object obj) { - if (this == obj) + if (this == obj) { return true; - if (obj == null) + } + if (obj == null) { return false; - if (getClass() != obj.getClass()) + } + if (getClass() != obj.getClass()) { return false; + } PrivilegeObject other = (PrivilegeObject) obj; if (action == null) { - if (other.action != null) + if (other.action != null) { return false; - } else if (!action.equals(other.action)) + } + } else if (!action.equals(other.action)) { return false; + } if (service == null) { - if (other.service != null) + if (other.service != null) { return false; - } else if (!service.equals(other.service)) + } + } else if (!service.equals(other.service)) { return false; + } if (component == null) { - if (other.component != null) + if (other.component != null) { return false; - } else if (!component.equals(other.component)) + } + } else if (!component.equals(other.component)) { return false; + } if (grantOption == null) { - if (other.grantOption != null) + if (other.grantOption != null) { return false; - } else if (!grantOption.equals(other.grantOption)) + } + } else if (!grantOption.equals(other.grantOption)) { return false; + } if (authorizables.size() != other.authorizables.size()) { return false; @@ -186,7 +196,7 @@ public Builder setAuthorizables(List authorizables) { */ private List toLowerAuthorizableName(List authorizables) { List newAuthorizable = Lists.newArrayList(); - if ((authorizables == null) || (authorizables.size() == 0)) { + if (authorizables == null || authorizables.size() == 0) { return newAuthorizable; } for (final Authorizable authorizable : authorizables) { diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/persistent/PrivilegeOperatePersistence.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/persistent/PrivilegeOperatePersistence.java index dab7d743c..a86a74095 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/persistent/PrivilegeOperatePersistence.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/persistent/PrivilegeOperatePersistence.java @@ -17,6 +17,7 @@ */ package org.apache.sentry.provider.db.generic.service.persistent; +import java.lang.reflect.Constructor; import java.util.ArrayList; import java.util.LinkedList; import java.util.List; @@ -26,12 +27,15 @@ import javax.jdo.PersistenceManager; import javax.jdo.Query; +import org.apache.hadoop.conf.Configuration; import org.apache.sentry.SentryUserException; import org.apache.sentry.core.common.Action; import org.apache.sentry.core.common.Authorizable; import org.apache.sentry.core.common.BitFieldAction; import org.apache.sentry.core.common.BitFieldActionFactory; +import org.apache.sentry.core.model.kafka.KafkaActionFactory; import org.apache.sentry.core.model.search.SearchActionFactory; +import org.apache.sentry.core.model.sqoop.SqoopActionFactory; import org.apache.sentry.provider.db.generic.service.persistent.PrivilegeObject.Builder; import org.apache.sentry.provider.db.service.model.MSentryGMPrivilege; import org.apache.sentry.provider.db.service.model.MSentryRole; @@ -40,15 +44,27 @@ import com.google.common.base.Strings; import com.google.common.collect.Maps; import com.google.common.collect.Sets; +import org.apache.sentry.service.thrift.ServiceConstants; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This class used do some operations related privilege and make the results * persistence */ public class PrivilegeOperatePersistence { + private static final Logger LOGGER = LoggerFactory.getLogger(PrivilegeOperatePersistence.class); private static final Map actionFactories = Maps.newHashMap(); static{ actionFactories.put("solr", new SearchActionFactory()); + actionFactories.put("sqoop", new SqoopActionFactory()); + actionFactories.put("kafka", KafkaActionFactory.getInstance()); + } + + private final Configuration conf; + + public PrivilegeOperatePersistence(Configuration conf) { + this.conf = conf; } public boolean checkPrivilegeOption(Set roles, PrivilegeObject privilege, PersistenceManager pm) { @@ -57,7 +73,7 @@ public boolean checkPrivilegeOption(Set roles, PrivilegeObject priv //get persistent privileges by roles Query query = pm.newQuery(MSentryGMPrivilege.class); StringBuilder filters = new StringBuilder(); - if ((roles != null) && (roles.size() > 0)) { + if (roles != null && roles.size() > 0) { query.declareVariables("org.apache.sentry.provider.db.service.model.MSentryRole role"); List rolesFiler = new LinkedList(); for (MSentryRole role : roles) { @@ -99,7 +115,7 @@ private void grantRolePartial(MSentryGMPrivilege grantPrivilege, for (BitFieldAction ac : actions) { grantPrivilege.setAction(ac.getValue()); MSentryGMPrivilege existPriv = getPrivilege(grantPrivilege, pm); - if ((existPriv != null) && (role.getGmPrivileges().contains(existPriv))) { + if (existPriv != null && role.getGmPrivileges().contains(existPriv)) { /** * force to load all roles related this privilege * avoid the lazy-loading risk,such as: @@ -119,7 +135,7 @@ private void grantRolePartial(MSentryGMPrivilege grantPrivilege, */ grantPrivilege.setAction(allAction.getValue()); MSentryGMPrivilege allPrivilege = getPrivilege(grantPrivilege, pm); - if ((allPrivilege != null) && (role.getGmPrivileges().contains(allPrivilege))) { + if (allPrivilege != null && role.getGmPrivileges().contains(allPrivilege)) { return; } } @@ -144,6 +160,8 @@ public void revokePrivilege(PrivilegeObject privilege,MSentryRole role, Persiste MSentryGMPrivilege mPrivilege = getPrivilege(convertToPrivilege(privilege), pm); if (mPrivilege == null) { mPrivilege = convertToPrivilege(privilege); + } else { + mPrivilege = (MSentryGMPrivilege) pm.detachCopy(mPrivilege); } Set privilegeGraph = Sets.newHashSet(); @@ -161,10 +179,9 @@ public void revokePrivilege(PrivilegeObject privilege,MSentryRole role, Persiste * privilege.removeRole(role) and pm.makePersistent(privilege) * will remove other roles that shouldn't been removed */ - pm.retrieve(persistedPriv); - revokeRolePartial(mPrivilege, persistedPriv, role, pm); } + pm.makePersistent(role); } /** @@ -180,7 +197,7 @@ private Set populateIncludePrivileges(Set roles //add populateIncludePrivilegesQuery filters.append(MSentryGMPrivilege.populateIncludePrivilegesQuery(parent)); // add filter for role names - if ((roles != null) && (roles.size() > 0)) { + if (roles != null && roles.size() > 0) { query.declareVariables("org.apache.sentry.provider.db.service.model.MSentryRole role"); List rolesFiler = new LinkedList(); for (MSentryRole role : roles) { @@ -234,10 +251,16 @@ private void revokeRolePartial(MSentryGMPrivilege revokePrivilege, /** * grant the left privileges to role */ - MSentryGMPrivilege leftPriv = new MSentryGMPrivilege(persistedPriv); - leftPriv.setAction(ac.getValue()); - leftPriv.appendRole(role); - pm.makePersistent(leftPriv); + MSentryGMPrivilege tmpPriv = new MSentryGMPrivilege(persistedPriv); + tmpPriv.setAction(ac.getValue()); + MSentryGMPrivilege leftPersistedPriv = getPrivilege(tmpPriv, pm); + if (leftPersistedPriv == null) { + //leftPersistedPriv isn't exist + leftPersistedPriv = tmpPriv; + role.appendGMPrivilege(leftPersistedPriv); + } + leftPersistedPriv.appendRole(role); + pm.makePersistent(leftPersistedPriv); } } } else if (revokeaction.implies(persistedAction)) { @@ -247,12 +270,11 @@ private void revokeRolePartial(MSentryGMPrivilege revokePrivilege, */ persistedPriv.removeRole(role); pm.makePersistent(persistedPriv); - } else { - /** - * if the revoke action is not equal to the persisted action, - * do nothing - */ } + /** + * if the revoke action is not equal to the persisted action, + * do nothing + */ } } @@ -301,7 +323,7 @@ private MSentryGMPrivilege getPrivilege(MSentryGMPrivilege privilege, Persistenc @SuppressWarnings("unchecked") public Set getPrivilegesByRole(Set roles, PersistenceManager pm) { Set privileges = Sets.newHashSet(); - if ((roles == null) || (roles.size() == 0)) { + if (roles == null || roles.size() == 0) { return privileges; } Query query = pm.newQuery(MSentryGMPrivilege.class); @@ -316,7 +338,7 @@ public Set getPrivilegesByRole(Set roles, Persiste query.setFilter(filters.toString()); List mPrivileges = (List) query.execute(); - if ((mPrivileges == null) || (mPrivileges.size() ==0)) { + if (mPrivileges == null || mPrivileges.isEmpty()) { return privileges; } for (MSentryGMPrivilege mPrivilege : mPrivileges) { @@ -335,7 +357,9 @@ public Set getPrivilegesByProvider(String component, String service, Set roles, List authorizables, PersistenceManager pm) { Set privileges = Sets.newHashSet(); - if ((roles == null) || (roles.size() == 0)) return privileges; + if (roles == null || roles.isEmpty()) { + return privileges; + } MSentryGMPrivilege parentPrivilege = new MSentryGMPrivilege(component, service, authorizables, null, null); Set privilegeGraph = Sets.newHashSet(); @@ -353,6 +377,20 @@ public Set getPrivilegesByProvider(String component, return privileges; } + public Set getPrivilegesByAuthorizable(String component, + String service, Set roles, + List authorizables, PersistenceManager pm) { + + Set privilegeGraph = Sets.newHashSet(); + + if (roles == null || roles.isEmpty()) { + return privilegeGraph; + } + + MSentryGMPrivilege parentPrivilege = new MSentryGMPrivilege(component, service, authorizables, null, null); + privilegeGraph.addAll(populateIncludePrivileges(roles, parentPrivilege, pm)); + return privilegeGraph; + } public void renamePrivilege(String component, String service, List oldAuthorizables, List newAuthorizables, @@ -394,7 +432,7 @@ public void renamePrivilege(String component, String service, } } - public static BitFieldAction getAction(String component, String name) { + private BitFieldAction getAction(String component, String name) { BitFieldActionFactory actionFactory = getActionFactory(component); BitFieldAction action = actionFactory.getActionByName(name); if (action == null) { @@ -403,10 +441,44 @@ public static BitFieldAction getAction(String component, String name) { return action; } - public static BitFieldActionFactory getActionFactory(String component) { - BitFieldActionFactory actionFactory = actionFactories.get(component.toLowerCase()); - if (actionFactory == null) { - throw new RuntimeException("can't get actionFactory for component:" + component); + private BitFieldActionFactory getActionFactory(String component) { + String caseInsensitiveComponent = component.toLowerCase(); + if (actionFactories.containsKey(caseInsensitiveComponent)) { + return actionFactories.get(caseInsensitiveComponent); + } + BitFieldActionFactory actionFactory = createActionFactory(caseInsensitiveComponent); + actionFactories.put(caseInsensitiveComponent, actionFactory); + LOGGER.info("Action factory for component {} not found in cache. Loaded it from configuration as {}.", + component, actionFactory.getClass().getName()); + return actionFactory; + } + + private BitFieldActionFactory createActionFactory(String component) { + String actionFactoryClassName = + conf.get(String.format(ServiceConstants.ServerConfig.SENTRY_COMPONENT_ACTION_FACTORY_FORMAT, component)); + if (actionFactoryClassName == null) { + throw new RuntimeException("ActionFactory not defined for component " + component + + ". Please define the parameter " + + "sentry." + component + ".action.factory in configuration"); + } + Class actionFactoryClass; + try { + actionFactoryClass = Class.forName(actionFactoryClassName); + } catch (ClassNotFoundException e) { + throw new RuntimeException("ActionFactory class " + actionFactoryClassName + " not found."); + } + if (!BitFieldActionFactory.class.isAssignableFrom(actionFactoryClass)) { + throw new RuntimeException("ActionFactory class " + actionFactoryClassName + " must extend " + + BitFieldActionFactory.class.getName()); + } + BitFieldActionFactory actionFactory; + try { + Constructor actionFactoryConstructor = actionFactoryClass.getDeclaredConstructor(); + actionFactoryConstructor.setAccessible(true); + actionFactory = (BitFieldActionFactory) actionFactoryClass.newInstance(); + } catch (NoSuchMethodException | InstantiationException | IllegalAccessException e) { + throw new RuntimeException("Could not instantiate actionFactory " + actionFactoryClassName + + " for component: " + component, e); } return actionFactory; } diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/persistent/SentryStoreLayer.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/persistent/SentryStoreLayer.java index ba9e36fbd..49a78ef11 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/persistent/SentryStoreLayer.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/persistent/SentryStoreLayer.java @@ -24,6 +24,7 @@ import org.apache.sentry.core.common.Authorizable; import org.apache.sentry.provider.db.SentryAlreadyExistsException; import org.apache.sentry.provider.db.SentryNoSuchObjectException; +import org.apache.sentry.provider.db.service.model.MSentryGMPrivilege; import org.apache.sentry.provider.db.service.persistent.CommitContext; /** @@ -38,7 +39,7 @@ public interface SentryStoreLayer { * @returns commit context used for notification handlers * @throws SentryAlreadyExistsException */ - public CommitContext createRole(String component, String role, + CommitContext createRole(String component, String role, String requestor) throws SentryAlreadyExistsException; /** @@ -49,7 +50,7 @@ public CommitContext createRole(String component, String role, * @returns commit context used for notification handlers * @throws SentryNoSuchObjectException */ - public CommitContext dropRole(String component, String role, + CommitContext dropRole(String component, String role, String requestor) throws SentryNoSuchObjectException; /** @@ -61,7 +62,7 @@ public CommitContext dropRole(String component, String role, * @returns commit context used for notification handlers * @throws SentryNoSuchObjectException */ - public CommitContext alterRoleAddGroups(String component, String role, + CommitContext alterRoleAddGroups(String component, String role, Set groups, String requestor) throws SentryNoSuchObjectException; /** @@ -73,7 +74,7 @@ public CommitContext alterRoleAddGroups(String component, String role, * @returns commit context used for notification handlers * @throws SentryNoSuchObjectException */ - public CommitContext alterRoleDeleteGroups(String component, String role, + CommitContext alterRoleDeleteGroups(String component, String role, Set groups, String requestor) throws SentryNoSuchObjectException; /** @@ -85,7 +86,7 @@ public CommitContext alterRoleDeleteGroups(String component, String role, * @returns commit context Used for notification handlers * @throws SentryUserException */ - public CommitContext alterRoleGrantPrivilege(String component, String role, + CommitContext alterRoleGrantPrivilege(String component, String role, PrivilegeObject privilege, String grantorPrincipal) throws SentryUserException; /** @@ -97,7 +98,7 @@ public CommitContext alterRoleGrantPrivilege(String component, String role, * @returns commit context used for notification handlers * @throws SentryUserException */ - public CommitContext alterRoleRevokePrivilege(String component, String role, + CommitContext alterRoleRevokePrivilege(String component, String role, PrivilegeObject privilege, String grantorPrincipal) throws SentryUserException; /** @@ -111,7 +112,7 @@ public CommitContext alterRoleRevokePrivilege(String component, String role, * @returns commit context used for notification handlers * @throws SentryUserException */ - public CommitContext renamePrivilege( + CommitContext renamePrivilege( String component, String service, List oldAuthorizables, List newAuthorizables, String requestor) throws SentryUserException; @@ -123,7 +124,7 @@ public CommitContext renamePrivilege( * @returns commit context used for notification handlers * @throws SentryUserException */ - public CommitContext dropPrivilege(String component, PrivilegeObject privilege, + CommitContext dropPrivilege(String component, PrivilegeObject privilege, String requestor) throws SentryUserException; /** @@ -133,7 +134,7 @@ public CommitContext dropPrivilege(String component, PrivilegeObject privilege, * @returns the set of roles * @throws SentryUserException */ - public Set getRolesByGroups(String component, Set groups) throws SentryUserException; + Set getRolesByGroups(String component, Set groups) throws SentryUserException; /** * Get groups @@ -142,7 +143,7 @@ public CommitContext dropPrivilege(String component, PrivilegeObject privilege, * @returns the set of groups * @throws SentryUserException */ - public Set getGroupsByRoles(String component, Set roles) throws SentryUserException; + Set getGroupsByRoles(String component, Set roles) throws SentryUserException; /** * Get privileges @@ -151,7 +152,7 @@ public CommitContext dropPrivilege(String component, PrivilegeObject privilege, * @returns the set of privileges * @throws SentryUserException */ - public Set getPrivilegesByRole(String component, Set roles) throws SentryUserException; + Set getPrivilegesByRole(String component, Set roles) throws SentryUserException; /** * get sentry privileges from provider as followings: @@ -164,12 +165,34 @@ public CommitContext dropPrivilege(String component, PrivilegeObject privilege, * @throws SentryUserException */ - public Set getPrivilegesByProvider(String component, String service,Set roles, + Set getPrivilegesByProvider(String component, String service, Set roles, Set groups, List authorizables) throws SentryUserException; + + /** + * Get all roles name. + * + * @returns The set of roles name, + */ + Set getAllRoleNames(); + + /** + * Get sentry privileges based on valid active roles and the authorize objects. + * + * @param component: The request respond to which component + * @param service: The name of service + * @param validActiveRoles: The valid active roles + * @param authorizables: The list of authorize objects + * @returns The set of MSentryGMPrivilege + * @throws SentryUserException + */ + Set getPrivilegesByAuthorizable(String component, String service, + Set validActiveRoles, List authorizables) + throws SentryUserException; + /** * close sentryStore */ - public void close(); + void close(); } diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/NotificationHandler.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/NotificationHandler.java index d8a51a653..e0a5f03d5 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/NotificationHandler.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/NotificationHandler.java @@ -17,47 +17,31 @@ */ package org.apache.sentry.provider.db.generic.service.thrift; -import org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleAddGroupsRequest; -import org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleAddGroupsResponse; -import org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleDeleteGroupsRequest; -import org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleDeleteGroupsResponse; -import org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleGrantPrivilegeRequest; -import org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleGrantPrivilegeResponse; -import org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleRevokePrivilegeRequest; -import org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleRevokePrivilegeResponse; -import org.apache.sentry.provider.db.generic.service.thrift.TCreateSentryRoleRequest; -import org.apache.sentry.provider.db.generic.service.thrift.TCreateSentryRoleResponse; -import org.apache.sentry.provider.db.generic.service.thrift.TDropPrivilegesRequest; -import org.apache.sentry.provider.db.generic.service.thrift.TDropPrivilegesResponse; -import org.apache.sentry.provider.db.generic.service.thrift.TDropSentryRoleRequest; -import org.apache.sentry.provider.db.generic.service.thrift.TDropSentryRoleResponse; -import org.apache.sentry.provider.db.generic.service.thrift.TRenamePrivilegesRequest; -import org.apache.sentry.provider.db.generic.service.thrift.TRenamePrivilegesResponse; import org.apache.sentry.provider.db.service.persistent.CommitContext; public interface NotificationHandler { - public void create_sentry_role(CommitContext context, + void create_sentry_role(CommitContext context, TCreateSentryRoleRequest request, TCreateSentryRoleResponse response); - public void drop_sentry_role(CommitContext context, TDropSentryRoleRequest request, + void drop_sentry_role(CommitContext context, TDropSentryRoleRequest request, TDropSentryRoleResponse response); - public void alter_sentry_role_grant_privilege(CommitContext context, TAlterSentryRoleGrantPrivilegeRequest request, + void alter_sentry_role_grant_privilege(CommitContext context, TAlterSentryRoleGrantPrivilegeRequest request, TAlterSentryRoleGrantPrivilegeResponse response); - public void alter_sentry_role_revoke_privilege(CommitContext context, TAlterSentryRoleRevokePrivilegeRequest request, + void alter_sentry_role_revoke_privilege(CommitContext context, TAlterSentryRoleRevokePrivilegeRequest request, TAlterSentryRoleRevokePrivilegeResponse response); - public void alter_sentry_role_add_groups(CommitContext context,TAlterSentryRoleAddGroupsRequest request, + void alter_sentry_role_add_groups(CommitContext context,TAlterSentryRoleAddGroupsRequest request, TAlterSentryRoleAddGroupsResponse response); - public void alter_sentry_role_delete_groups(CommitContext context, TAlterSentryRoleDeleteGroupsRequest request, + void alter_sentry_role_delete_groups(CommitContext context, TAlterSentryRoleDeleteGroupsRequest request, TAlterSentryRoleDeleteGroupsResponse response); - public void drop_sentry_privilege(CommitContext context, TDropPrivilegesRequest request, + void drop_sentry_privilege(CommitContext context, TDropPrivilegesRequest request, TDropPrivilegesResponse response); - public void rename_sentry_privilege(CommitContext context, TRenamePrivilegesRequest request, + void rename_sentry_privilege(CommitContext context, TRenamePrivilegesRequest request, TRenamePrivilegesResponse response); } diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/NotificationHandlerInvoker.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/NotificationHandlerInvoker.java index 317c97b9d..11b545675 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/NotificationHandlerInvoker.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/NotificationHandlerInvoker.java @@ -19,22 +19,6 @@ import java.util.List; -import org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleAddGroupsRequest; -import org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleAddGroupsResponse; -import org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleDeleteGroupsRequest; -import org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleDeleteGroupsResponse; -import org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleGrantPrivilegeRequest; -import org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleGrantPrivilegeResponse; -import org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleRevokePrivilegeRequest; -import org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleRevokePrivilegeResponse; -import org.apache.sentry.provider.db.generic.service.thrift.TCreateSentryRoleRequest; -import org.apache.sentry.provider.db.generic.service.thrift.TCreateSentryRoleResponse; -import org.apache.sentry.provider.db.generic.service.thrift.TDropPrivilegesRequest; -import org.apache.sentry.provider.db.generic.service.thrift.TDropPrivilegesResponse; -import org.apache.sentry.provider.db.generic.service.thrift.TDropSentryRoleRequest; -import org.apache.sentry.provider.db.generic.service.thrift.TDropSentryRoleResponse; -import org.apache.sentry.provider.db.generic.service.thrift.TRenamePrivilegesRequest; -import org.apache.sentry.provider.db.generic.service.thrift.TRenamePrivilegesResponse; import org.apache.sentry.provider.db.service.persistent.CommitContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SearchPolicyServiceClient.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SearchPolicyServiceClient.java deleted file mode 100644 index 1ed3fcddf..000000000 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SearchPolicyServiceClient.java +++ /dev/null @@ -1,159 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.sentry.provider.db.generic.service.thrift; - -import java.util.List; -import java.util.Set; - -import org.apache.hadoop.conf.Configuration; -import org.apache.sentry.SentryUserException; -import org.apache.sentry.core.common.Action; -import org.apache.sentry.core.common.ActiveRoleSet; -import org.apache.sentry.core.common.Authorizable; -import org.apache.sentry.core.model.search.Collection; -import org.apache.sentry.provider.common.AuthorizationComponent; - -import com.google.common.collect.Lists; - -import static org.apache.sentry.core.model.search.SearchModelAuthorizable.AuthorizableType.Collection; -import static org.apache.sentry.core.model.search.SearchConstants.SENTRY_SEARCH_CLUSTER_KEY; -import static org.apache.sentry.core.model.search.SearchConstants.SENTRY_SEARCH_CLUSTER_DEFAULT; - -/** - * This search policy client will be used in the solr component to communicate with Sentry service. - * - */ -public class SearchPolicyServiceClient { - private static final String COMPONENT_TYPE = AuthorizationComponent.Search; - - private String searchClusterName; - private SentryGenericServiceClient client; - - public SearchPolicyServiceClient(Configuration conf) throws Exception { - this.searchClusterName = conf.get(SENTRY_SEARCH_CLUSTER_KEY, SENTRY_SEARCH_CLUSTER_DEFAULT); - this.client = new SentryGenericServiceClient(conf); - } - - public void createRole(final String requestor, final String roleName) - throws SentryUserException { - client.createRole(requestor, roleName, COMPONENT_TYPE); - } - - public void createRoleIfNotExist(final String requestor, - final String roleName) throws SentryUserException { - client.createRoleIfNotExist(requestor, roleName, COMPONENT_TYPE); - } - - public void dropRole(final String requestor, final String roleName) - throws SentryUserException { - client.dropRole(requestor, roleName, COMPONENT_TYPE); - } - - public void dropRoleIfExists(final String requestor, final String roleName) - throws SentryUserException { - client.dropRoleIfExists(requestor, roleName, COMPONENT_TYPE); - } - - public void addRoleToGroups(final String requestor, final String roleName, - final Set groups) throws SentryUserException { - client.addRoleToGroups(requestor, roleName, COMPONENT_TYPE, groups); - } - - public void deleteRoleFromGroups(final String requestor, final String roleName, - final Set groups) throws SentryUserException { - client.deleteRoleToGroups(requestor, roleName, COMPONENT_TYPE, groups); - } - - public void grantCollectionPrivilege(final String collection, final String requestor, - final String roleName,final String action) throws SentryUserException { - grantCollectionPrivilege(collection, requestor, roleName, action, false); - } - - public void grantCollectionPrivilege(final String collection, final String requestor, - final String roleName, final String action, final Boolean grantOption) throws SentryUserException { - TSentryPrivilege tPrivilege = toTSentryPrivilege(collection, action, grantOption); - client.grantPrivilege(requestor, roleName, COMPONENT_TYPE, tPrivilege); - } - - public void revokeCollectionPrivilege(final String collection, final String requestor, final String roleName, - final String action) throws SentryUserException { - revokeCollectionPrivilege(collection, requestor, roleName, action, false); - } - - public void revokeCollectionPrivilege(final String collection, final String requestor, final String roleName, - final String action, final Boolean grantOption) throws SentryUserException { - TSentryPrivilege tPrivilege = toTSentryPrivilege(collection, action, grantOption); - client.revokePrivilege(requestor, roleName, COMPONENT_TYPE, tPrivilege); - } - - public void renameCollectionPrivilege(final String oldCollection, final String newCollection, final String requestor) - throws SentryUserException { - client.renamePrivilege(requestor, COMPONENT_TYPE, searchClusterName, Lists.newArrayList(new Collection(oldCollection)), - Lists.newArrayList(new Collection(newCollection))); - } - - public void dropCollectionPrivilege(final String collection, final String requestor) throws SentryUserException { - final TSentryPrivilege tPrivilege = toTSentryPrivilege(collection, Action.ALL, null); - client.dropPrivilege(requestor, COMPONENT_TYPE, tPrivilege); - } - - public Set listAllRoles(final String user) throws SentryUserException { - return client.listAllRoles(user, COMPONENT_TYPE); - } - - public Set listRolesByGroupName(final String requestor, final String groupName) throws SentryUserException { - return client.listRolesByGroupName(requestor, groupName, COMPONENT_TYPE); - } - - public Set listPrivilegesByRoleName( - final String requestor, final String roleName, - final List authorizables) throws SentryUserException { - return client.listPrivilegesByRoleName(requestor, roleName, COMPONENT_TYPE, searchClusterName, authorizables); - } - - public Set listPrivilegesForProvider(final ActiveRoleSet roleSet, final Set groups, - final List authorizables) throws SentryUserException { - return client.listPrivilegesForProvider(COMPONENT_TYPE, searchClusterName, roleSet, groups, authorizables); - } - - private TSentryPrivilege toTSentryPrivilege(String collection, String action, - Boolean grantOption) { - TSentryPrivilege tPrivilege = new TSentryPrivilege(); - tPrivilege.setComponent(COMPONENT_TYPE); - tPrivilege.setServiceName(searchClusterName); - tPrivilege.setAction(action); - - if (grantOption == null) { - tPrivilege.setGrantOption(TSentryGrantOption.UNSET); - } else if (grantOption) { - tPrivilege.setGrantOption(TSentryGrantOption.TRUE); - } else { - tPrivilege.setGrantOption(TSentryGrantOption.FALSE); - } - - List authorizables = Lists.newArrayList(new TAuthorizable(Collection.name(), collection)); - tPrivilege.setAuthorizables(authorizables); - return tPrivilege; - } - - public void close() { - if (client != null) { - client.close(); - } - } -} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericPolicyProcessor.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericPolicyProcessor.java index d6600a0cb..97c2e7153 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericPolicyProcessor.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericPolicyProcessor.java @@ -17,30 +17,41 @@ */ package org.apache.sentry.provider.db.generic.service.thrift; -import static org.apache.sentry.provider.common.ProviderConstants.AUTHORIZABLE_JOINER; -import static org.apache.sentry.provider.common.ProviderConstants.KV_JOINER; +import static org.apache.sentry.policy.common.PolicyConstants.AUTHORIZABLE_JOINER; +import static org.apache.sentry.policy.common.PolicyConstants.KV_JOINER; import java.lang.reflect.Constructor; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.sentry.SentryUserException; import org.apache.sentry.core.common.Authorizable; import org.apache.sentry.core.model.db.AccessConstants; +import org.apache.sentry.policy.common.KeyValue; +import org.apache.sentry.policy.common.PolicyConstants; +import org.apache.sentry.provider.common.AuthorizationComponent; import org.apache.sentry.provider.db.SentryAccessDeniedException; import org.apache.sentry.provider.db.SentryAlreadyExistsException; import org.apache.sentry.provider.db.SentryInvalidInputException; import org.apache.sentry.provider.db.SentryNoSuchObjectException; +import org.apache.sentry.provider.db.SentryThriftAPIMismatchException; import org.apache.sentry.provider.db.generic.service.persistent.PrivilegeObject; -import org.apache.sentry.provider.db.generic.service.persistent.SentryStoreLayer; import org.apache.sentry.provider.db.generic.service.persistent.PrivilegeObject.Builder; +import org.apache.sentry.provider.db.generic.service.persistent.SentryStoreLayer; +import org.apache.sentry.provider.db.log.entity.JsonLogEntityFactory; +import org.apache.sentry.provider.db.log.util.Constants; +import org.apache.sentry.provider.db.service.model.MSentryGMPrivilege; +import org.apache.sentry.provider.db.service.model.MSentryRole; import org.apache.sentry.provider.db.service.persistent.CommitContext; import org.apache.sentry.provider.db.service.thrift.PolicyStoreConstants; import org.apache.sentry.provider.db.service.thrift.SentryConfigurationException; import org.apache.sentry.provider.db.service.thrift.SentryPolicyStoreProcessor; import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; +import org.apache.sentry.service.thrift.ServiceConstants.ThriftConstants; +import org.apache.sentry.service.thrift.ServiceConstants; import org.apache.sentry.service.thrift.Status; import org.apache.sentry.service.thrift.TSentryResponseStatus; import org.apache.thrift.TException; @@ -52,22 +63,26 @@ import com.google.common.base.Strings; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; +import com.google.common.collect.Maps; import com.google.common.collect.Sets; public class SentryGenericPolicyProcessor implements SentryGenericPolicyService.Iface { private static final Logger LOGGER = LoggerFactory.getLogger(SentryGenericPolicyProcessor.class); + private static final Logger AUDIT_LOGGER = LoggerFactory + .getLogger(Constants.AUDIT_LOGGER_NAME_GENERIC); private final Configuration conf; private final ImmutableSet adminGroups; private final SentryStoreLayer store; private final NotificationHandlerInvoker handerInvoker; public static final String SENTRY_GENERIC_SERVICE_NAME = "SentryGenericPolicyService"; + private static final String ACCESS_DENIAL_MESSAGE = "Access denied to "; public SentryGenericPolicyProcessor(Configuration conf) throws Exception { this.store = createStore(conf); this.handerInvoker = new NotificationHandlerInvoker(createHandlers(conf)); this.conf = conf; - adminGroups = ImmutableSet.copyOf(toTrimedLower(Sets.newHashSet(conf.getStrings( + adminGroups = ImmutableSet.copyOf((Sets.newHashSet(conf.getStrings( ServerConfig.ADMIN_GROUPS, new String[]{})))); } @@ -76,7 +91,7 @@ public SentryGenericPolicyProcessor(Configuration conf, SentryStoreLayer store) this.store = store; this.handerInvoker = new NotificationHandlerInvoker(createHandlers(conf)); this.conf = conf; - adminGroups = ImmutableSet.copyOf(toTrimedLower(Sets.newHashSet(conf.getStrings( + adminGroups = ImmutableSet.copyOf(toTrimmed(Sets.newHashSet(conf.getStrings( ServerConfig.ADMIN_GROUPS, new String[]{})))); } @@ -86,12 +101,14 @@ private void authorize(String requestorUser, Set requestorGroups) String msg = "User: " + requestorUser + " is part of " + requestorGroups + " which does not, intersect admin groups " + adminGroups; LOGGER.warn(msg); - throw new SentryAccessDeniedException("Access denied to " + requestorUser); + throw new SentryAccessDeniedException(ACCESS_DENIAL_MESSAGE + requestorUser); } } - private Set toTrimedLower(Set s) { - if (null == s) return new HashSet(); + private Set toTrimmedLower(Set s) { + if (null == s) { + return new HashSet(); + } Set result = Sets.newHashSet(); for (String v : s) { result.add(v.trim().toLowerCase()); @@ -99,7 +116,18 @@ private Set toTrimedLower(Set s) { return result; } - private String toTrimedLower(String s) { + private Set toTrimmed(Set s) { + if (null == s) { + return new HashSet(); + } + Set result = Sets.newHashSet(); + for (String v : s) { + result.add(v.trim()); + } + return result; + } + + private String toTrimmedLower(String s) { if (Strings.isNullOrEmpty(s)){ return ""; } @@ -111,16 +139,15 @@ public static Set getRequestorGroups(Configuration conf, String userName } private boolean inAdminGroups(Set requestorGroups) { - requestorGroups = toTrimedLower(requestorGroups); if (Sets.intersection(adminGroups, requestorGroups).isEmpty()) { return false; - } else return true; + } + return true; } public static SentryStoreLayer createStore(Configuration conf) throws SentryConfigurationException { SentryStoreLayer storeLayer = null; - String Store = conf.get(PolicyStoreConstants.SENTRY_GENERIC_POLICY_STORE, - PolicyStoreConstants.SENTRY_GENERIC_POLICY_STORE_DEFAULT); + String Store = conf.get(PolicyStoreConstants.SENTRY_GENERIC_POLICY_STORE, PolicyStoreConstants.SENTRY_GENERIC_POLICY_STORE_DEFAULT); if (Strings.isNullOrEmpty(Store)) { throw new SentryConfigurationException("the parameter configuration for sentry.generic.policy.store can't be empty"); @@ -171,18 +198,25 @@ private Response requestHandle(RequestHandler handler) { try { response = handler.handle(); } catch (SentryAccessDeniedException e) { - LOGGER.error(e.getMessage(), e); + String msg = "Sentry access denied: " + e.getMessage(); + LOGGER.error(msg, e); response.status = Status.AccessDenied(e.getMessage(), e); } catch (SentryAlreadyExistsException e) { - LOGGER.error(e.getMessage(), e); + String msg = "Sentry object already exists: " + e.getMessage(); + LOGGER.error(msg, e); response.status = Status.AlreadyExists(e.getMessage(), e); } catch (SentryNoSuchObjectException e) { - LOGGER.error(e.getMessage(), e); + String msg = "Sentry object doesn't exist: " + e.getMessage(); + LOGGER.error(msg, e); response.status = Status.NoSuchObject(e.getMessage(), e); } catch (SentryInvalidInputException e) { - String msg = "Invalid input privilege object"; + String msg = "Invalid input privilege object: " + e.getMessage(); LOGGER.error(msg, e); response.status = Status.InvalidInput(msg, e); + } catch (SentryThriftAPIMismatchException e) { + String msg = "Sentry thrift API mismatch error: " + e.getMessage(); + LOGGER.error(msg, e); + response.status = Status.THRIFT_VERSION_MISMATCH(e.getMessage(), e); } catch (Exception e) { String msg = "Unknown error:" + e.getMessage(); LOGGER.error(msg, e); @@ -231,6 +265,22 @@ private List fromAuthorizable(List author return tAuthorizables; } + private String fromAuthorizableToStr(List authorizables) { + if (authorizables != null && !authorizables.isEmpty()) { + List privileges = Lists.newArrayList(); + + for (Authorizable authorizable : authorizables) { + + privileges.add(PolicyConstants.KV_JOINER.join(authorizable.getTypeName(), + authorizable.getName())); + } + + return PolicyConstants.AUTHORIZABLE_JOINER.join(privileges); + } else { + return ""; + } + } + private List toAuthorizables(List tAuthorizables) { List authorizables = Lists.newArrayList(); if (tAuthorizables == null) { @@ -251,10 +301,82 @@ public String getName() { return authorizables; } + private List toAuthorizables(String privilegeStr) { + List authorizables = Lists.newArrayList(); + if (privilegeStr == null) { + return authorizables; + } + + for (String authorizable : PolicyConstants.AUTHORIZABLE_SPLITTER.split(privilegeStr)) { + KeyValue tempKV = new KeyValue(authorizable); + final String key = tempKV.getKey(); + final String value = tempKV.getValue(); + + authorizables.add(new Authorizable() { + @Override + public String getTypeName() { + return key; + } + + @Override + public String getName() { + return value; + } + }); + } + + return authorizables; + } + + // Construct the role to set of privileges mapping based on the + // MSentryGMPrivilege information. + private TSentryPrivilegeMap toTSentryPrivilegeMap(Set mPrivileges) { + + // Mapping of >. + Map> tPrivilegeMap = Maps.newTreeMap(); + + for (MSentryGMPrivilege mPrivilege : mPrivileges) { + for (MSentryRole role : mPrivilege.getRoles()) { + + TSentryPrivilege tPrivilege = toTSentryPrivilege(mPrivilege); + + if (tPrivilegeMap.containsKey(role.getRoleName())) { + tPrivilegeMap.get(role.getRoleName()).add(tPrivilege); + } else { + Set tPrivilegeSet = Sets.newTreeSet(); + tPrivilegeSet.add(tPrivilege); + tPrivilegeMap.put(role.getRoleName(), tPrivilegeSet); + } + } + } + + return new TSentryPrivilegeMap(tPrivilegeMap); + } + + // Construct TSentryPrivilege based on MSentryGMPrivilege information. + private TSentryPrivilege toTSentryPrivilege(MSentryGMPrivilege mPrivilege) { + + TSentryPrivilege tPrivilege = new TSentryPrivilege(mPrivilege.getComponentName(), + mPrivilege.getServiceName(), fromAuthorizable(mPrivilege.getAuthorizables()), mPrivilege.getAction()); + + if (mPrivilege.getGrantOption() == null) { + tPrivilege.setGrantOption(TSentryGrantOption.UNSET); + } else if (mPrivilege.getGrantOption()) { + tPrivilege.setGrantOption(TSentryGrantOption.TRUE); + } else { + tPrivilege.setGrantOption(TSentryGrantOption.FALSE); + } + + return tPrivilege; + } + private Set buildPermissions(Set privileges) { Set permissions = Sets.newHashSet(); for (PrivilegeObject privilege : privileges) { List hierarchy = Lists.newArrayList(); + if (hasComponentServerPrivilege(privilege.getComponent())) { + hierarchy.add(KV_JOINER.join("server", privilege.getService())); + } for (Authorizable authorizable : privilege.getAuthorizables()) { hierarchy.add(KV_JOINER.join(authorizable.getTypeName(),authorizable.getName())); } @@ -264,12 +386,18 @@ private Set buildPermissions(Set privileges) { return permissions; } + private boolean hasComponentServerPrivilege(String component) { + //judge the component whether has the server privilege, for example: sqoop has the privilege on the server + return AuthorizationComponent.SQOOP.equalsIgnoreCase(component); + } + @Override public TCreateSentryRoleResponse create_sentry_role( final TCreateSentryRoleRequest request) throws TException { Response respose = requestHandle(new RequestHandler() { @Override public Response handle() throws Exception { + validateClientVersion(request.getProtocol_version()); authorize(request.getRequestorUserName(), getRequestorGroups(conf, request.getRequestorUserName())); CommitContext context = store.createRole(request.getComponent(), request.getRoleName(), request.getRequestorUserName()); @@ -281,6 +409,15 @@ public Response handle() throws Exception { if (Status.OK.getCode() == respose.status.getValue()) { handerInvoker.create_sentry_role(respose.context, request, tResponse); } + + try { + AUDIT_LOGGER.info(JsonLogEntityFactory.getInstance() + .createJsonLogEntity(request, tResponse, conf).toJsonFormatLog()); + } catch (Exception e) { + // if any exception, log the exception. + String msg = "Error creating audit log for create role: " + e.getMessage(); + LOGGER.error(msg, e); + } return tResponse; } @@ -290,6 +427,7 @@ public TDropSentryRoleResponse drop_sentry_role(final TDropSentryRoleRequest req Response respose = requestHandle(new RequestHandler() { @Override public Response handle() throws Exception { + validateClientVersion(request.getProtocol_version()); authorize(request.getRequestorUserName(), getRequestorGroups(conf, request.getRequestorUserName())); CommitContext context = store.dropRole(request.getComponent(), request.getRoleName(), request.getRequestorUserName()); @@ -301,6 +439,15 @@ public Response handle() throws Exception { if (Status.OK.getCode() == respose.status.getValue()) { handerInvoker.drop_sentry_role(respose.context, request, tResponse); } + + try { + AUDIT_LOGGER.info(JsonLogEntityFactory.getInstance() + .createJsonLogEntity(request, tResponse, conf).toJsonFormatLog()); + } catch (Exception e) { + // if any exception, log the exception. + String msg = "Error creating audit log for drop role: " + e.getMessage(); + LOGGER.error(msg, e); + } return tResponse; } @@ -310,9 +457,8 @@ public TAlterSentryRoleGrantPrivilegeResponse alter_sentry_role_grant_privilege( Response respose = requestHandle(new RequestHandler() { @Override public Response handle() throws Exception { - CommitContext context = store.alterRoleGrantPrivilege(request.getComponent(), request.getRoleName(), - toPrivilegeObject(request.getPrivilege()), - request.getRequestorUserName()); + validateClientVersion(request.getProtocol_version()); + CommitContext context = store.alterRoleGrantPrivilege(request.getComponent(), request.getRoleName(), toPrivilegeObject(request.getPrivilege()), request.getRequestorUserName()); return new Response(Status.OK(), context); } }); @@ -321,6 +467,15 @@ public Response handle() throws Exception { if (Status.OK.getCode() == respose.status.getValue()) { handerInvoker.alter_sentry_role_grant_privilege(respose.context, request, tResponse); } + + try { + AUDIT_LOGGER.info(JsonLogEntityFactory.getInstance() + .createJsonLogEntity(request, tResponse, conf).toJsonFormatLog()); + } catch (Exception e) { + // if any exception, log the exception. + String msg = "Error creating audit log for grant privilege to role: " + e.getMessage(); + LOGGER.error(msg, e); + } return tResponse; } @@ -330,9 +485,8 @@ public TAlterSentryRoleRevokePrivilegeResponse alter_sentry_role_revoke_privileg Response respose = requestHandle(new RequestHandler() { @Override public Response handle() throws Exception { - CommitContext context = store.alterRoleRevokePrivilege(request.getComponent(), request.getRoleName(), - toPrivilegeObject(request.getPrivilege()), - request.getRequestorUserName()); + validateClientVersion(request.getProtocol_version()); + CommitContext context = store.alterRoleRevokePrivilege(request.getComponent(), request.getRoleName(), toPrivilegeObject(request.getPrivilege()), request.getRequestorUserName()); return new Response(Status.OK(), context); } }); @@ -341,6 +495,15 @@ public Response handle() throws Exception { if (Status.OK.getCode() == respose.status.getValue()) { handerInvoker.alter_sentry_role_revoke_privilege(respose.context, request, tResponse); } + + try { + AUDIT_LOGGER.info(JsonLogEntityFactory.getInstance() + .createJsonLogEntity(request, tResponse, conf).toJsonFormatLog()); + } catch (Exception e) { + // if any exception, log the exception. + String msg = "Error creating audit log for revoke privilege from role: " + e.getMessage(); + LOGGER.error(msg, e); + } return tResponse; } @@ -350,11 +513,10 @@ public TAlterSentryRoleAddGroupsResponse alter_sentry_role_add_groups( Response respose = requestHandle(new RequestHandler() { @Override public Response handle() throws Exception { + validateClientVersion(request.getProtocol_version()); authorize(request.getRequestorUserName(), getRequestorGroups(conf, request.getRequestorUserName())); - CommitContext context = store.alterRoleAddGroups( - request.getComponent(), request.getRoleName(), request.getGroups(), - request.getRequestorUserName()); + CommitContext context = store.alterRoleAddGroups(request.getComponent(), request.getRoleName(), request.getGroups(), request.getRequestorUserName()); return new Response(Status.OK(), context); } }); @@ -363,6 +525,15 @@ public Response handle() throws Exception { if (Status.OK.getCode() == respose.status.getValue()) { handerInvoker.alter_sentry_role_add_groups(respose.context, request, tResponse); } + + try { + AUDIT_LOGGER.info(JsonLogEntityFactory.getInstance() + .createJsonLogEntity(request, tResponse, conf).toJsonFormatLog()); + } catch (Exception e) { + // if any exception, log the exception. + String msg = "Error creating audit log for add role to group: " + e.getMessage(); + LOGGER.error(msg, e); + } return tResponse; } @@ -372,11 +543,10 @@ public TAlterSentryRoleDeleteGroupsResponse alter_sentry_role_delete_groups( Response respose = requestHandle(new RequestHandler() { @Override public Response handle() throws Exception { + validateClientVersion(request.getProtocol_version()); authorize(request.getRequestorUserName(), getRequestorGroups(conf, request.getRequestorUserName())); - CommitContext context = store.alterRoleDeleteGroups( - request.getComponent(), request.getRoleName(), request.getGroups(), - request.getRequestorUserName()); + CommitContext context = store.alterRoleDeleteGroups(request.getComponent(), request.getRoleName(), request.getGroups(), request.getRequestorUserName()); return new Response(Status.OK(), context); } }); @@ -385,6 +555,15 @@ public Response handle() throws Exception { if (Status.OK.getCode() == respose.status.getValue()) { handerInvoker.alter_sentry_role_delete_groups(respose.context, request, tResponse); } + + try { + AUDIT_LOGGER.info(JsonLogEntityFactory.getInstance() + .createJsonLogEntity(request, tResponse, conf).toJsonFormatLog()); + } catch (Exception e) { + // if any exception, log the exception. + String msg = "Error creating audit log for delete role from group: " + e.getMessage(); + LOGGER.error(msg, e); + } return tResponse; } @@ -394,15 +573,14 @@ public TListSentryRolesResponse list_sentry_roles_by_group( Response> respose = requestHandle(new RequestHandler>() { @Override public Response> handle() throws Exception { + validateClientVersion(request.getProtocol_version()); Set groups = getRequestorGroups(conf, request.getRequestorUserName()); - if (AccessConstants.ALL.equalsIgnoreCase(request.getGroupName())) { - //check all groups which requestorUserName belongs to - } else { + if (!AccessConstants.ALL.equalsIgnoreCase(request.getGroupName())) { boolean admin = inAdminGroups(groups); //Only admin users can list all roles in the system ( groupname = null) //Non admin users are only allowed to list only groups which they belong to if(!admin && (request.getGroupName() == null || !groups.contains(request.getGroupName()))) { - throw new SentryAccessDeniedException("Access denied to " + request.getRequestorUserName()); + throw new SentryAccessDeniedException(ACCESS_DENIAL_MESSAGE + request.getRequestorUserName()); } groups.clear(); groups.add(request.getGroupName()); @@ -429,18 +607,18 @@ public TListSentryPrivilegesResponse list_sentry_privileges_by_role( Response> respose = requestHandle(new RequestHandler>() { @Override public Response> handle() throws Exception { + validateClientVersion(request.getProtocol_version()); Set groups = getRequestorGroups(conf, request.getRequestorUserName()); if (!inAdminGroups(groups)) { - Set roleNamesForGroups = toTrimedLower(store.getRolesByGroups(request.getComponent(), groups)); - if (!roleNamesForGroups.contains(toTrimedLower(request.getRoleName()))) { - throw new SentryAccessDeniedException("Access denied to " + request.getRequestorUserName()); + Set roleNamesForGroups = toTrimmedLower(store.getRolesByGroups(request.getComponent(), groups)); + if (!roleNamesForGroups.contains(toTrimmedLower(request.getRoleName()))) { + throw new SentryAccessDeniedException(ACCESS_DENIAL_MESSAGE + request.getRequestorUserName()); } } Set privileges = store.getPrivilegesByProvider(request.getComponent(), request.getServiceName(), Sets.newHashSet(request.getRoleName()), - null, - toAuthorizables(request.getAuthorizables())); + null, toAuthorizables(request.getAuthorizables())); Set tSentryPrivileges = Sets.newHashSet(); for (PrivilegeObject privilege : privileges) { tSentryPrivileges.add(fromPrivilegeObject(privilege)); @@ -460,13 +638,14 @@ public TListSentryPrivilegesForProviderResponse list_sentry_privileges_for_provi Response> respose = requestHandle(new RequestHandler>() { @Override public Response> handle() throws Exception { - Set activeRoleNames = toTrimedLower(request.getRoleSet().getRoles()); + validateClientVersion(request.getProtocol_version()); + Set activeRoleNames = toTrimmedLower(request.getRoleSet().getRoles()); Set roleNamesForGroups = store.getRolesByGroups(request.getComponent(), request.getGroups()); Set rolesToQuery = request.getRoleSet().isAll() ? roleNamesForGroups : Sets.intersection(activeRoleNames, roleNamesForGroups); Set privileges = store.getPrivilegesByProvider(request.getComponent(), - request.getServiceName(), - rolesToQuery, null, - toAuthorizables(request.getAuthorizables())); + request.getServiceName(), + rolesToQuery, null, + toAuthorizables(request.getAuthorizables())); return new Response>(Status.OK(), buildPermissions(privileges)); } }); @@ -476,12 +655,104 @@ public Response> handle() throws Exception { return tResponse; } + @Override + public TListSentryPrivilegesByAuthResponse list_sentry_privileges_by_authorizable(TListSentryPrivilegesByAuthRequest request) throws TException { + + TListSentryPrivilegesByAuthResponse response = new TListSentryPrivilegesByAuthResponse(); + Map authRoleMap = Maps.newHashMap(); + + // Group names are case sensitive. + Set requestedGroups = request.getGroups(); + String subject = request.getRequestorUserName(); + TSentryActiveRoleSet activeRoleSet = request.getRoleSet(); + Set validActiveRoles = Sets.newHashSet(); + + try { + validateClientVersion(request.getProtocol_version()); + Set memberGroups = getRequestorGroups(conf, subject); + + // Disallow non-admin users to lookup groups that + // they are not part of. + if(!inAdminGroups(memberGroups)) { + + if (requestedGroups != null && !requestedGroups.isEmpty()) { + for (String requestedGroup : requestedGroups) { + + // If user doesn't belong to one of the requested groups, + // then raise security exception. + if (!memberGroups.contains(requestedGroup)) { + throw new SentryAccessDeniedException(ACCESS_DENIAL_MESSAGE + subject); + } + } + } else { + // Non-admin's search is limited to its own groups. + requestedGroups = memberGroups; + } + + // Disallow non-admin to lookup roles that they are not part of + if (activeRoleSet != null && !activeRoleSet.isAll()) { + Set grantedRoles = toTrimmedLower(store.getRolesByGroups(request.getComponent(), requestedGroups)); + Set activeRoleNames = toTrimmedLower(activeRoleSet.getRoles()); + + for (String activeRole : activeRoleNames) { + if (!grantedRoles.contains(activeRole)) { + throw new SentryAccessDeniedException(ACCESS_DENIAL_MESSAGE + + subject); + } + } + + // For non-admin, valid active roles are intersection of active roles and granted roles. + validActiveRoles.addAll(activeRoleSet.isAll() ? grantedRoles : Sets.intersection(activeRoleNames, grantedRoles)); + } + } else { + Set allRoles = toTrimmedLower(store.getAllRoleNames()); + Set activeRoleNames = toTrimmedLower(activeRoleSet.getRoles()); + + // For admin, if requestedGroups are empty, valid active roles are intersection of active roles and all roles. + // Otherwise, valid active roles are intersection of active roles and the roles of requestedGroups. + if (requestedGroups == null || requestedGroups.isEmpty()) { + validActiveRoles.addAll(activeRoleSet.isAll() ? allRoles : Sets.intersection(activeRoleNames, allRoles)); + } else { + Set requestedRoles = toTrimmedLower(store.getRolesByGroups(request.getComponent(), requestedGroups)); + validActiveRoles.addAll(activeRoleSet.isAll() ? allRoles : Sets.intersection(activeRoleNames, requestedRoles)); + } + } + + // If user is not part of any group.. return empty response + if (request.getAuthorizablesSet() != null) { + for (String authorizablesStr : request.getAuthorizablesSet()) { + + List authorizables = toAuthorizables(authorizablesStr); + Set sentryPrivileges = store.getPrivilegesByAuthorizable(request.getComponent(), request.getServiceName(), validActiveRoles, authorizables); + authRoleMap.put(fromAuthorizableToStr(authorizables), toTSentryPrivilegeMap(sentryPrivileges)); + } + } + + response.setPrivilegesMapByAuth(authRoleMap); + response.setStatus(Status.OK()); + } catch (SentryAccessDeniedException e) { + LOGGER.error(e.getMessage(), e); + response.setStatus(Status.AccessDenied(e.getMessage(), e)); + } catch (SentryThriftAPIMismatchException e) { + LOGGER.error(e.getMessage(), e); + response.setStatus(Status.THRIFT_VERSION_MISMATCH(e.getMessage(), e)); + } catch (Exception e) { + String msg = "Unknown error for request: " + request + ", message: " + + e.getMessage(); + LOGGER.error(msg, e); + response.setStatus(Status.RuntimeError(msg, e)); + } + + return response; + } + @Override public TDropPrivilegesResponse drop_sentry_privilege( final TDropPrivilegesRequest request) throws TException { Response respose = requestHandle(new RequestHandler() { @Override public Response handle() throws Exception { + validateClientVersion(request.getProtocol_version()); authorize(request.getRequestorUserName(), getRequestorGroups(conf, request.getRequestorUserName())); CommitContext context = store.dropPrivilege(request.getComponent(), @@ -504,6 +775,7 @@ public TRenamePrivilegesResponse rename_sentry_privilege( Response respose = requestHandle(new RequestHandler() { @Override public Response handle() throws Exception { + validateClientVersion(request.getProtocol_version()); authorize(request.getRequestorUserName(), getRequestorGroups(conf, request.getRequestorUserName())); CommitContext context = store.renamePrivilege(request.getComponent(), request.getServiceName(), @@ -544,6 +816,15 @@ private static class Response { } } private interface RequestHandler { - public Response handle() throws Exception ; + Response handle() throws Exception ; + } + + private static void validateClientVersion(int protocol_version) throws SentryThriftAPIMismatchException { + if (ServiceConstants.ThriftConstants.TSENTRY_SERVICE_VERSION_CURRENT != protocol_version) { + String msg = "Sentry thrift API protocol version mismatch: Client thrift version " + + "is: " + protocol_version + " , server thrift verion " + + "is " + ThriftConstants.TSENTRY_SERVICE_VERSION_CURRENT; + throw new SentryThriftAPIMismatchException(msg); + } } -} \ No newline at end of file +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericPolicyProcessorFactory.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericPolicyProcessorFactory.java index 71ce57974..1cce1fc4b 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericPolicyProcessorFactory.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericPolicyProcessorFactory.java @@ -18,8 +18,6 @@ package org.apache.sentry.provider.db.generic.service.thrift; import org.apache.hadoop.conf.Configuration; -import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericPolicyService; -import org.apache.sentry.provider.db.service.thrift.PolicyStoreConstants.PolicyStoreServerConfig; import org.apache.sentry.service.thrift.ProcessorFactory; import org.apache.thrift.TMultiplexedProcessor; import org.apache.thrift.TProcessor; @@ -33,7 +31,8 @@ public SentryGenericPolicyProcessorFactory(Configuration conf) { @Override public boolean register(TMultiplexedProcessor multiplexedProcessor) throws Exception { SentryGenericPolicyProcessor processHandler = new SentryGenericPolicyProcessor(conf); - TProcessor processor = new SentryGenericPolicyService.Processor(processHandler); + TProcessor processor = new SentryGenericPolicyProcessorWrapper( + processHandler); multiplexedProcessor.registerProcessor(SentryGenericPolicyProcessor.SENTRY_GENERIC_SERVICE_NAME, processor); return true; } diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericPolicyProcessorWrapper.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericPolicyProcessorWrapper.java new file mode 100644 index 000000000..d320d0fdb --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericPolicyProcessorWrapper.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.provider.db.generic.service.thrift; + +import org.apache.sentry.provider.db.service.thrift.ThriftUtil; +import org.apache.thrift.TException; +import org.apache.thrift.protocol.TProtocol; + +public class SentryGenericPolicyProcessorWrapper + extends SentryGenericPolicyService.Processor { + + public SentryGenericPolicyProcessorWrapper(I iface) { + super(iface); + } + + @Override + public boolean process(TProtocol in, TProtocol out) throws TException { + // set the ip and impersonator for audit log + ThriftUtil.setIpAddress(in); + ThriftUtil.setImpersonator(in); + return super.process(in, out); + } +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericServiceClient.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericServiceClient.java index 9f4a292bc..60502895a 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericServiceClient.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericServiceClient.java @@ -17,148 +17,14 @@ */ package org.apache.sentry.provider.db.generic.service.thrift; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.security.PrivilegedExceptionAction; -import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Set; -import javax.security.auth.callback.CallbackHandler; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.SaslRpcServer; -import org.apache.hadoop.security.SaslRpcServer.AuthMethod; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.sentry.SentryUserException; import org.apache.sentry.core.common.ActiveRoleSet; import org.apache.sentry.core.common.Authorizable; -import org.apache.sentry.core.model.db.AccessConstants; -import org.apache.sentry.service.thrift.ServiceConstants.ClientConfig; -import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; -import org.apache.sentry.service.thrift.Status; -import org.apache.sentry.service.thrift.sentry_common_serviceConstants; -import org.apache.thrift.TException; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TMultiplexedProtocol; -import org.apache.thrift.transport.TSaslClientTransport; -import org.apache.thrift.transport.TSocket; -import org.apache.thrift.transport.TTransport; -import org.apache.thrift.transport.TTransportException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; - -public class SentryGenericServiceClient { - private final Configuration conf; - private final InetSocketAddress serverAddress; - private final boolean kerberos; - private final String[] serverPrincipalParts; - private SentryGenericPolicyService.Client client; - private TTransport transport; - private int connectionTimeout; - private static final Logger LOGGER = LoggerFactory - .getLogger(SentryGenericServiceClient.class); - private static final String THRIFT_EXCEPTION_MESSAGE = "Thrift exception occured "; - - /** - * This transport wraps the Sasl transports to set up the right UGI context for open(). - */ - public static class UgiSaslClientTransport extends TSaslClientTransport { - protected UserGroupInformation ugi = null; - - public UgiSaslClientTransport(String mechanism, String authorizationId, - String protocol, String serverName, Map props, - CallbackHandler cbh, TTransport transport, boolean wrapUgi) - throws IOException { - super(mechanism, authorizationId, protocol, serverName, props, cbh, - transport); - if (wrapUgi) { - ugi = UserGroupInformation.getLoginUser(); - } - } - - // open the SASL transport with using the current UserGroupInformation - // This is needed to get the current login context stored - @Override - public void open() throws TTransportException { - if (ugi == null) { - baseOpen(); - } else { - try { - if (ugi.isFromKeytab()) { - ugi.checkTGTAndReloginFromKeytab(); - } - ugi.doAs(new PrivilegedExceptionAction() { - public Void run() throws TTransportException { - baseOpen(); - return null; - } - }); - } catch (IOException e) { - throw new TTransportException("Failed to open SASL transport", e); - } catch (InterruptedException e) { - throw new TTransportException( - "Interrupted while opening underlying transport", e); - } - } - } - - private void baseOpen() throws TTransportException { - super.open(); - } - } - - public SentryGenericServiceClient(Configuration conf) throws IOException { - this.conf = conf; - Preconditions.checkNotNull(this.conf, "Configuration object cannot be null"); - this.serverAddress = NetUtils.createSocketAddr(Preconditions.checkNotNull( - conf.get(ClientConfig.SERVER_RPC_ADDRESS), "Config key " - + ClientConfig.SERVER_RPC_ADDRESS + " is required"), conf.getInt( - ClientConfig.SERVER_RPC_PORT, ClientConfig.SERVER_RPC_PORT_DEFAULT)); - this.connectionTimeout = conf.getInt(ClientConfig.SERVER_RPC_CONN_TIMEOUT, - ClientConfig.SERVER_RPC_CONN_TIMEOUT_DEFAULT); - kerberos = ServerConfig.SECURITY_MODE_KERBEROS.equalsIgnoreCase( - conf.get(ServerConfig.SECURITY_MODE, ServerConfig.SECURITY_MODE_KERBEROS).trim()); - transport = new TSocket(serverAddress.getHostName(), - serverAddress.getPort(), connectionTimeout); - if (kerberos) { - String serverPrincipal = Preconditions.checkNotNull(conf.get(ServerConfig.PRINCIPAL), ServerConfig.PRINCIPAL + " is required"); - - // Resolve server host in the same way as we are doing on server side - serverPrincipal = SecurityUtil.getServerPrincipal(serverPrincipal, serverAddress.getAddress()); - LOGGER.debug("Using server kerberos principal: " + serverPrincipal); - - serverPrincipalParts = SaslRpcServer.splitKerberosName(serverPrincipal); - Preconditions.checkArgument(serverPrincipalParts.length == 3, - "Kerberos principal should have 3 parts: " + serverPrincipal); - boolean wrapUgi = "true".equalsIgnoreCase(conf - .get(ServerConfig.SECURITY_USE_UGI_TRANSPORT, "true")); - transport = new UgiSaslClientTransport(AuthMethod.KERBEROS.getMechanismName(), - null, serverPrincipalParts[0], serverPrincipalParts[1], - ClientConfig.SASL_PROPERTIES, null, transport, wrapUgi); - } else { - serverPrincipalParts = null; - } - try { - transport.open(); - } catch (TTransportException e) { - throw new IOException("Transport exception while opening transport: " + e.getMessage(), e); - } - LOGGER.debug("Successfully opened transport: " + transport + " to " + serverAddress); - TMultiplexedProtocol protocol = new TMultiplexedProtocol( - new TBinaryProtocol(transport), - SentryGenericPolicyProcessor.SENTRY_GENERIC_SERVICE_NAME); - client = new SentryGenericPolicyService.Client(protocol); - LOGGER.debug("Successfully created client"); - } - +public interface SentryGenericServiceClient { /** * Create a sentry role @@ -167,38 +33,11 @@ public SentryGenericServiceClient(Configuration conf) throws IOException { * @param component: The request is issued to which component * @throws SentryUserException */ - public synchronized void createRole(String requestorUserName, String roleName, String component) - throws SentryUserException { - TCreateSentryRoleRequest request = new TCreateSentryRoleRequest(); - request.setProtocol_version(sentry_common_serviceConstants.TSENTRY_SERVICE_V2); - request.setRequestorUserName(requestorUserName); - request.setRoleName(roleName); - request.setComponent(component); - try { - TCreateSentryRoleResponse response = client.create_sentry_role(request); - Status.throwIfNotOk(response.getStatus()); - } catch (TException e) { - throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); - } - } + void createRole(String requestorUserName, String roleName, + String component) throws SentryUserException; - public void createRoleIfNotExist(String requestorUserName, String roleName, String component) throws SentryUserException { - TCreateSentryRoleRequest request = new TCreateSentryRoleRequest(); - request.setProtocol_version(sentry_common_serviceConstants.TSENTRY_SERVICE_V2); - request.setRequestorUserName(requestorUserName); - request.setRoleName(roleName); - request.setComponent(component); - try { - TCreateSentryRoleResponse response = client.create_sentry_role(request); - Status status = Status.fromCode(response.getStatus().getValue()); - if (status == Status.ALREADY_EXISTS) { - return; - } - Status.throwIfNotOk(response.getStatus()); - } catch (TException e) { - throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); - } - } + void createRoleIfNotExist(String requestorUserName, + String roleName, String component) throws SentryUserException; /** * Drop a sentry role @@ -207,37 +46,11 @@ public void createRoleIfNotExist(String requestorUserName, String roleName, Stri * @param component: The request is issued to which component * @throws SentryUserException */ - public void dropRole(String requestorUserName, - String roleName, String component) - throws SentryUserException { - dropRole(requestorUserName, roleName, component, false); - } - - public void dropRoleIfExists(String requestorUserName, - String roleName, String component) - throws SentryUserException { - dropRole(requestorUserName, roleName, component, true); - } + void dropRole(String requestorUserName, String roleName, + String component) throws SentryUserException; - private void dropRole(String requestorUserName, - String roleName, String component , boolean ifExists) - throws SentryUserException { - TDropSentryRoleRequest request = new TDropSentryRoleRequest(); - request.setProtocol_version(sentry_common_serviceConstants.TSENTRY_SERVICE_V2); - request.setRequestorUserName(requestorUserName); - request.setRoleName(roleName); - request.setComponent(component); - try { - TDropSentryRoleResponse response = client.drop_sentry_role(request); - Status status = Status.fromCode(response.getStatus().getValue()); - if (ifExists && status == Status.NO_SUCH_OBJECT) { - return; - } - Status.throwIfNotOk(response.getStatus()); - } catch (TException e) { - throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); - } - } + void dropRoleIfExists(String requestorUserName, String roleName, + String component) throws SentryUserException; /** * add a sentry role to groups. @@ -247,22 +60,8 @@ private void dropRole(String requestorUserName, * @param groups: The name of groups * @throws SentryUserException */ - public void addRoleToGroups(String requestorUserName, String roleName, - String component, Set groups) throws SentryUserException { - TAlterSentryRoleAddGroupsRequest request = new TAlterSentryRoleAddGroupsRequest(); - request.setProtocol_version(sentry_common_serviceConstants.TSENTRY_SERVICE_V2); - request.setRequestorUserName(requestorUserName); - request.setRoleName(roleName); - request.setGroups(groups); - request.setComponent(component); - - try { - TAlterSentryRoleAddGroupsResponse response = client.alter_sentry_role_add_groups(request); - Status.throwIfNotOk(response.getStatus()); - } catch (TException e) { - throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); - } - } + void addRoleToGroups(String requestorUserName, String roleName, + String component, Set groups) throws SentryUserException; /** * delete a sentry role from groups. @@ -272,22 +71,8 @@ public void addRoleToGroups(String requestorUserName, String roleName, * @param groups: The name of groups * @throws SentryUserException */ - public void deleteRoleToGroups(String requestorUserName, String roleName, - String component, Set groups) throws SentryUserException { - TAlterSentryRoleDeleteGroupsRequest request = new TAlterSentryRoleDeleteGroupsRequest(); - request.setProtocol_version(sentry_common_serviceConstants.TSENTRY_SERVICE_V2); - request.setRequestorUserName(requestorUserName); - request.setRoleName(roleName); - request.setGroups(groups); - request.setComponent(component); - - try { - TAlterSentryRoleDeleteGroupsResponse response = client.alter_sentry_role_delete_groups(request); - Status.throwIfNotOk(response.getStatus()); - } catch (TException e) { - throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); - } - } + void deleteRoleToGroups(String requestorUserName, String roleName, + String component, Set groups) throws SentryUserException; /** * grant privilege @@ -297,22 +82,8 @@ public void deleteRoleToGroups(String requestorUserName, String roleName, * @param privilege * @throws SentryUserException */ - public void grantPrivilege(String requestorUserName, String roleName, - String component, TSentryPrivilege privilege) throws SentryUserException { - TAlterSentryRoleGrantPrivilegeRequest request = new TAlterSentryRoleGrantPrivilegeRequest(); - request.setProtocol_version(sentry_common_serviceConstants.TSENTRY_SERVICE_V2); - request.setComponent(component); - request.setRoleName(roleName); - request.setRequestorUserName(requestorUserName); - request.setPrivilege(privilege); - - try { - TAlterSentryRoleGrantPrivilegeResponse response = client.alter_sentry_role_grant_privilege(request); - Status.throwIfNotOk(response.getStatus()); - } catch (TException e) { - throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); - } - } + void grantPrivilege(String requestorUserName, String roleName, + String component, TSentryPrivilege privilege) throws SentryUserException; /** * revoke privilege @@ -322,22 +93,8 @@ public void grantPrivilege(String requestorUserName, String roleName, * @param privilege * @throws SentryUserException */ - public void revokePrivilege(String requestorUserName, String roleName, - String component, TSentryPrivilege privilege) throws SentryUserException { - TAlterSentryRoleRevokePrivilegeRequest request = new TAlterSentryRoleRevokePrivilegeRequest(); - request.setProtocol_version(sentry_common_serviceConstants.TSENTRY_SERVICE_V2); - request.setComponent(component); - request.setRequestorUserName(requestorUserName); - request.setRoleName(roleName); - request.setPrivilege(privilege); - - try { - TAlterSentryRoleRevokePrivilegeResponse response = client.alter_sentry_role_revoke_privilege(request); - Status.throwIfNotOk(response.getStatus()); - } catch (TException e) { - throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); - } - } + void revokePrivilege(String requestorUserName, String roleName, + String component, TSentryPrivilege privilege) throws SentryUserException; /** * drop privilege @@ -347,21 +104,8 @@ public void revokePrivilege(String requestorUserName, String roleName, * @param privilege * @throws SentryUserException */ - public void dropPrivilege(String requestorUserName,String component, - TSentryPrivilege privilege) throws SentryUserException { - TDropPrivilegesRequest request = new TDropPrivilegesRequest(); - request.setProtocol_version(sentry_common_serviceConstants.TSENTRY_SERVICE_V2); - request.setComponent(component); - request.setRequestorUserName(requestorUserName); - request.setPrivilege(privilege); - - try { - TDropPrivilegesResponse response = client.drop_sentry_privilege(request); - Status.throwIfNotOk(response.getStatus()); - } catch (TException e) { - throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); - } - } + void dropPrivilege(String requestorUserName,String component, + TSentryPrivilege privilege) throws SentryUserException; /** * rename privilege @@ -372,38 +116,9 @@ public void dropPrivilege(String requestorUserName,String component, * @param newAuthorizables * @throws SentryUserException */ - public void renamePrivilege(String requestorUserName, String component, + void renamePrivilege(String requestorUserName, String component, String serviceName, List oldAuthorizables, - List newAuthorizables) throws SentryUserException { - if ((oldAuthorizables == null) || (oldAuthorizables.size() == 0) - || (newAuthorizables == null) || (newAuthorizables.size() == 0)) { - throw new SentryUserException("oldAuthorizables and newAuthorizables can't be null or empty"); - } - - TRenamePrivilegesRequest request = new TRenamePrivilegesRequest(); - request.setProtocol_version(sentry_common_serviceConstants.TSENTRY_SERVICE_V2); - request.setComponent(component); - request.setRequestorUserName(requestorUserName); - request.setServiceName(serviceName); - - List oldTAuthorizables = Lists.newArrayList(); - List newTAuthorizables = Lists.newArrayList(); - for (Authorizable authorizable : oldAuthorizables) { - oldTAuthorizables.add(new TAuthorizable(authorizable.getTypeName(), authorizable.getName())); - request.setOldAuthorizables(oldTAuthorizables); - } - for (Authorizable authorizable : newAuthorizables) { - newTAuthorizables.add(new TAuthorizable(authorizable.getTypeName(), authorizable.getName())); - request.setNewAuthorizables(newTAuthorizables); - } - - try { - TRenamePrivilegesResponse response = client.rename_sentry_privilege(request); - Status.throwIfNotOk(response.getStatus()); - } catch (TException e) { - throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); - } - } + List newAuthorizables) throws SentryUserException; /** * Gets sentry role objects for a given groupName using the Sentry service @@ -413,35 +128,17 @@ public void renamePrivilege(String requestorUserName, String component, * @return Set of thrift sentry role objects * @throws SentryUserException */ - public synchronized Set listRolesByGroupName( + Set listRolesByGroupName( String requestorUserName, String groupName, String component) - throws SentryUserException { - TListSentryRolesRequest request = new TListSentryRolesRequest(); - request.setProtocol_version(sentry_common_serviceConstants.TSENTRY_SERVICE_V2); - request.setRequestorUserName(requestorUserName); - request.setGroupName(groupName); - request.setComponent(component); - TListSentryRolesResponse response; - try { - response = client.list_sentry_roles_by_group(request); - Status.throwIfNotOk(response.getStatus()); - return response.getRoles(); - } catch (TException e) { - throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); - } - } + throws SentryUserException; - public Set listUserRoles(String requestorUserName, String component) - throws SentryUserException { - return listRolesByGroupName(requestorUserName, AccessConstants.ALL, component); - } + Set listUserRoles(String requestorUserName, String component) + throws SentryUserException; - public Set listAllRoles(String requestorUserName, String component) - throws SentryUserException { - return listRolesByGroupName(requestorUserName, null, component); - } + Set listAllRoles(String requestorUserName, String component) + throws SentryUserException; /** * Gets sentry privileges for a given roleName and Authorizable Hirerchys using the Sentry service @@ -453,39 +150,14 @@ public Set listAllRoles(String requestorUserName, String component) * @return * @throws SentryUserException */ - public Set listPrivilegesByRoleName( + Set listPrivilegesByRoleName( String requestorUserName, String roleName, String component, String serviceName, List authorizables) - throws SentryUserException { - TListSentryPrivilegesRequest request = new TListSentryPrivilegesRequest(); - request.setProtocol_version(sentry_common_serviceConstants.TSENTRY_SERVICE_V2); - request.setComponent(component); - request.setServiceName(serviceName); - request.setRequestorUserName(requestorUserName); - request.setRoleName(roleName); - if ((authorizables != null) && (authorizables.size() > 0)) { - List tAuthorizables = Lists.newArrayList(); - for (Authorizable authorizable : authorizables) { - tAuthorizables.add(new TAuthorizable(authorizable.getTypeName(), authorizable.getName())); - } - request.setAuthorizables(tAuthorizables); - } - - TListSentryPrivilegesResponse response; - try { - response = client.list_sentry_privileges_by_role(request); - Status.throwIfNotOk(response.getStatus()); - } catch (TException e) { - throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); - } - return response.getPrivileges(); - } + throws SentryUserException; - public Set listPrivilegesByRoleName( + Set listPrivilegesByRoleName( String requestorUserName, String roleName, String component, - String serviceName) throws SentryUserException { - return listPrivilegesByRoleName(requestorUserName, roleName, component, serviceName, null); - } + String serviceName) throws SentryUserException; /** * get sentry permissions from provider as followings: @@ -497,40 +169,9 @@ public Set listPrivilegesByRoleName( * @returns the set of permissions * @throws SentryUserException */ - public Set listPrivilegesForProvider(String component, + Set listPrivilegesForProvider(String component, String serviceName, ActiveRoleSet roleSet, Set groups, - List authorizables) throws SentryUserException { - TSentryActiveRoleSet thriftRoleSet = new TSentryActiveRoleSet(roleSet.isAll(), roleSet.getRoles()); - TListSentryPrivilegesForProviderRequest request = new TListSentryPrivilegesForProviderRequest(); - request.setProtocol_version(sentry_common_serviceConstants.TSENTRY_SERVICE_V2); - request.setComponent(component); - request.setServiceName(serviceName); - request.setRoleSet(thriftRoleSet); - if (groups == null) { - request.setGroups(new HashSet()); - } else { - request.setGroups(groups); - } - List tAuthoriables = Lists.newArrayList(); - if ((authorizables != null) && (authorizables.size() > 0)) { - for (Authorizable authorizable : authorizables) { - tAuthoriables.add(new TAuthorizable(authorizable.getTypeName(), authorizable.getName())); - } - request.setAuthorizables(tAuthoriables); - } - - try { - TListSentryPrivilegesForProviderResponse response = client.list_sentry_privileges_for_provider(request); - Status.throwIfNotOk(response.getStatus()); - return response.getPrivileges(); - } catch (TException e) { - throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); - } - } + List authorizables) throws SentryUserException; - public void close() { - if (transport != null) { - transport.close(); - } - } + void close(); } diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericServiceClientDefaultImpl.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericServiceClientDefaultImpl.java new file mode 100644 index 000000000..e52b6efc1 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericServiceClientDefaultImpl.java @@ -0,0 +1,605 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.db.generic.service.thrift; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.security.PrivilegedExceptionAction; +import java.util.*; + +import javax.security.auth.callback.CallbackHandler; + +import com.google.common.collect.Sets; +import org.apache.hadoop.conf.Configuration; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.SaslRpcServer; +import org.apache.hadoop.security.SaslRpcServer.AuthMethod; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.sentry.SentryUserException; +import org.apache.sentry.core.common.ActiveRoleSet; +import org.apache.sentry.core.common.Authorizable; +import org.apache.sentry.core.model.db.AccessConstants; +import org.apache.sentry.service.thrift.ServiceConstants; +import org.apache.sentry.service.thrift.ServiceConstants.ClientConfig; +import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; +import org.apache.sentry.service.thrift.Status; +import org.apache.sentry.service.thrift.sentry_common_serviceConstants; +import org.apache.thrift.TException; +import org.apache.thrift.protocol.TBinaryProtocol; +import org.apache.thrift.protocol.TMultiplexedProtocol; +import org.apache.thrift.transport.TSaslClientTransport; +import org.apache.thrift.transport.TSocket; +import org.apache.thrift.transport.TTransport; +import org.apache.thrift.transport.TTransportException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; + +public class SentryGenericServiceClientDefaultImpl implements SentryGenericServiceClient { + private final Configuration conf; + private final InetSocketAddress serverAddress; + private final boolean kerberos; + private final String[] serverPrincipalParts; + private SentryGenericPolicyService.Client client; + private TTransport transport; + private int connectionTimeout; + private static final Logger LOGGER = LoggerFactory + .getLogger(SentryGenericServiceClientDefaultImpl.class); + private static final String THRIFT_EXCEPTION_MESSAGE = "Thrift exception occured "; + + /** + * This transport wraps the Sasl transports to set up the right UGI context for open(). + */ + public static class UgiSaslClientTransport extends TSaslClientTransport { + protected UserGroupInformation ugi = null; + + public UgiSaslClientTransport(String mechanism, String authorizationId, + String protocol, String serverName, Map props, + CallbackHandler cbh, TTransport transport, boolean wrapUgi, Configuration conf) + throws IOException { + super(mechanism, authorizationId, protocol, serverName, props, cbh, + transport); + if (wrapUgi) { + // If we don't set the configuration, the UGI will be created based on + // what's on the classpath, which may lack the kerberos changes we require + UserGroupInformation.setConfiguration(conf); + ugi = UserGroupInformation.getLoginUser(); + } + } + + // open the SASL transport with using the current UserGroupInformation + // This is needed to get the current login context stored + @Override + public void open() throws TTransportException { + if (ugi == null) { + baseOpen(); + } else { + try { + if (ugi.isFromKeytab()) { + ugi.checkTGTAndReloginFromKeytab(); + } + ugi.doAs(new PrivilegedExceptionAction() { + public Void run() throws TTransportException { + baseOpen(); + return null; + } + }); + } catch (IOException e) { + throw new TTransportException("Failed to open SASL transport", e); + } catch (InterruptedException e) { + throw new TTransportException( + "Interrupted while opening underlying transport", e); + } + } + } + + private void baseOpen() throws TTransportException { + super.open(); + } + } + + public SentryGenericServiceClientDefaultImpl(Configuration conf) throws IOException { + // copy the configuration because we may make modifications to it. + this.conf = new Configuration(conf); + Preconditions.checkNotNull(this.conf, "Configuration object cannot be null"); + this.serverAddress = NetUtils.createSocketAddr(Preconditions.checkNotNull( + conf.get(ClientConfig.SERVER_RPC_ADDRESS), "Config key " + + ClientConfig.SERVER_RPC_ADDRESS + " is required"), conf.getInt( + ClientConfig.SERVER_RPC_PORT, ClientConfig.SERVER_RPC_PORT_DEFAULT)); + this.connectionTimeout = conf.getInt(ClientConfig.SERVER_RPC_CONN_TIMEOUT, + ClientConfig.SERVER_RPC_CONN_TIMEOUT_DEFAULT); + kerberos = ServerConfig.SECURITY_MODE_KERBEROS.equalsIgnoreCase( + conf.get(ServerConfig.SECURITY_MODE, ServerConfig.SECURITY_MODE_KERBEROS).trim()); + transport = new TSocket(serverAddress.getHostName(), + serverAddress.getPort(), connectionTimeout); + if (kerberos) { + String serverPrincipal = Preconditions.checkNotNull(conf.get(ServerConfig.PRINCIPAL), ServerConfig.PRINCIPAL + " is required"); + // since the client uses hadoop-auth, we need to set kerberos in + // hadoop-auth if we plan to use kerberos + conf.set(HADOOP_SECURITY_AUTHENTICATION, ServerConfig.SECURITY_MODE_KERBEROS); + + // Resolve server host in the same way as we are doing on server side + serverPrincipal = SecurityUtil.getServerPrincipal(serverPrincipal, serverAddress.getAddress()); + LOGGER.debug("Using server kerberos principal: " + serverPrincipal); + + serverPrincipalParts = SaslRpcServer.splitKerberosName(serverPrincipal); + Preconditions.checkArgument(serverPrincipalParts.length == 3, + "Kerberos principal should have 3 parts: " + serverPrincipal); + boolean wrapUgi = "true".equalsIgnoreCase(conf + .get(ServerConfig.SECURITY_USE_UGI_TRANSPORT, "true")); + transport = new UgiSaslClientTransport(AuthMethod.KERBEROS.getMechanismName(), + null, serverPrincipalParts[0], serverPrincipalParts[1], + ClientConfig.SASL_PROPERTIES, null, transport, wrapUgi, conf); + } else { + serverPrincipalParts = null; + } + try { + transport.open(); + } catch (TTransportException e) { + throw new IOException("Transport exception while opening transport: " + e.getMessage(), e); + } + LOGGER.debug("Successfully opened transport: " + transport + " to " + serverAddress); + long maxMessageSize = conf.getLong(ServiceConstants.ClientConfig.SENTRY_POLICY_CLIENT_THRIFT_MAX_MESSAGE_SIZE, + ServiceConstants.ClientConfig.SENTRY_POLICY_CLIENT_THRIFT_MAX_MESSAGE_SIZE_DEFAULT); + TMultiplexedProtocol protocol = new TMultiplexedProtocol( + new TBinaryProtocol(transport, maxMessageSize, maxMessageSize, true, true), + SentryGenericPolicyProcessor.SENTRY_GENERIC_SERVICE_NAME); + client = new SentryGenericPolicyService.Client(protocol); + LOGGER.debug("Successfully created client"); + } + + + + /** + * Create a sentry role + * @param requestorUserName: user on whose behalf the request is issued + * @param roleName: Name of the role + * @param component: The request is issued to which component + * @throws SentryUserException + */ + public synchronized void createRole(String requestorUserName, String roleName, String component) + throws SentryUserException { + TCreateSentryRoleRequest request = new TCreateSentryRoleRequest(); + request.setProtocol_version(sentry_common_serviceConstants.TSENTRY_SERVICE_V2); + request.setRequestorUserName(requestorUserName); + request.setRoleName(roleName); + request.setComponent(component); + try { + TCreateSentryRoleResponse response = client.create_sentry_role(request); + Status.throwIfNotOk(response.getStatus()); + } catch (TException e) { + throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); + } + } + + public void createRoleIfNotExist(String requestorUserName, String roleName, String component) throws SentryUserException { + TCreateSentryRoleRequest request = new TCreateSentryRoleRequest(); + request.setProtocol_version(sentry_common_serviceConstants.TSENTRY_SERVICE_V2); + request.setRequestorUserName(requestorUserName); + request.setRoleName(roleName); + request.setComponent(component); + try { + TCreateSentryRoleResponse response = client.create_sentry_role(request); + Status status = Status.fromCode(response.getStatus().getValue()); + if (status == Status.ALREADY_EXISTS) { + return; + } + Status.throwIfNotOk(response.getStatus()); + } catch (TException e) { + throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); + } + } + + /** + * Drop a sentry role + * @param requestorUserName: user on whose behalf the request is issued + * @param roleName: Name of the role + * @param component: The request is issued to which component + * @throws SentryUserException + */ + public void dropRole(String requestorUserName, + String roleName, String component) + throws SentryUserException { + dropRole(requestorUserName, roleName, component, false); + } + + public void dropRoleIfExists(String requestorUserName, + String roleName, String component) + throws SentryUserException { + dropRole(requestorUserName, roleName, component, true); + } + + private void dropRole(String requestorUserName, + String roleName, String component , boolean ifExists) + throws SentryUserException { + TDropSentryRoleRequest request = new TDropSentryRoleRequest(); + request.setProtocol_version(sentry_common_serviceConstants.TSENTRY_SERVICE_V2); + request.setRequestorUserName(requestorUserName); + request.setRoleName(roleName); + request.setComponent(component); + try { + TDropSentryRoleResponse response = client.drop_sentry_role(request); + Status status = Status.fromCode(response.getStatus().getValue()); + if (ifExists && status == Status.NO_SUCH_OBJECT) { + return; + } + Status.throwIfNotOk(response.getStatus()); + } catch (TException e) { + throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); + } + } + + /** + * add a sentry role to groups. + * @param requestorUserName: user on whose behalf the request is issued + * @param roleName: Name of the role + * @param component: The request is issued to which component + * @param groups: The name of groups + * @throws SentryUserException + */ + public void addRoleToGroups(String requestorUserName, String roleName, + String component, Set groups) throws SentryUserException { + TAlterSentryRoleAddGroupsRequest request = new TAlterSentryRoleAddGroupsRequest(); + request.setProtocol_version(sentry_common_serviceConstants.TSENTRY_SERVICE_V2); + request.setRequestorUserName(requestorUserName); + request.setRoleName(roleName); + request.setGroups(groups); + request.setComponent(component); + + try { + TAlterSentryRoleAddGroupsResponse response = client.alter_sentry_role_add_groups(request); + Status.throwIfNotOk(response.getStatus()); + } catch (TException e) { + throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); + } + } + + /** + * delete a sentry role from groups. + * @param requestorUserName: user on whose behalf the request is issued + * @param roleName: Name of the role + * @param component: The request is issued to which component + * @param groups: The name of groups + * @throws SentryUserException + */ + public void deleteRoleToGroups(String requestorUserName, String roleName, + String component, Set groups) throws SentryUserException { + TAlterSentryRoleDeleteGroupsRequest request = new TAlterSentryRoleDeleteGroupsRequest(); + request.setProtocol_version(sentry_common_serviceConstants.TSENTRY_SERVICE_V2); + request.setRequestorUserName(requestorUserName); + request.setRoleName(roleName); + request.setGroups(groups); + request.setComponent(component); + + try { + TAlterSentryRoleDeleteGroupsResponse response = client.alter_sentry_role_delete_groups(request); + Status.throwIfNotOk(response.getStatus()); + } catch (TException e) { + throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); + } + } + + /** + * grant privilege + * @param requestorUserName: user on whose behalf the request is issued + * @param roleName: Name of the role + * @param component: The request is issued to which component + * @param privilege + * @throws SentryUserException + */ + public void grantPrivilege(String requestorUserName, String roleName, + String component, TSentryPrivilege privilege) throws SentryUserException { + TAlterSentryRoleGrantPrivilegeRequest request = new TAlterSentryRoleGrantPrivilegeRequest(); + request.setProtocol_version(sentry_common_serviceConstants.TSENTRY_SERVICE_V2); + request.setComponent(component); + request.setRoleName(roleName); + request.setRequestorUserName(requestorUserName); + request.setPrivilege(privilege); + + try { + TAlterSentryRoleGrantPrivilegeResponse response = client.alter_sentry_role_grant_privilege(request); + Status.throwIfNotOk(response.getStatus()); + } catch (TException e) { + throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); + } + } + + /** + * revoke privilege + * @param requestorUserName: user on whose behalf the request is issued + * @param roleName: Name of the role + * @param component: The request is issued to which component + * @param privilege + * @throws SentryUserException + */ + public void revokePrivilege(String requestorUserName, String roleName, + String component, TSentryPrivilege privilege) throws SentryUserException { + TAlterSentryRoleRevokePrivilegeRequest request = new TAlterSentryRoleRevokePrivilegeRequest(); + request.setProtocol_version(sentry_common_serviceConstants.TSENTRY_SERVICE_V2); + request.setComponent(component); + request.setRequestorUserName(requestorUserName); + request.setRoleName(roleName); + request.setPrivilege(privilege); + + try { + TAlterSentryRoleRevokePrivilegeResponse response = client.alter_sentry_role_revoke_privilege(request); + Status.throwIfNotOk(response.getStatus()); + } catch (TException e) { + throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); + } + } + + /** + * drop privilege + * @param requestorUserName: user on whose behalf the request is issued + * @param roleName: Name of the role + * @param component: The request is issued to which component + * @param privilege + * @throws SentryUserException + */ + public void dropPrivilege(String requestorUserName,String component, + TSentryPrivilege privilege) throws SentryUserException { + TDropPrivilegesRequest request = new TDropPrivilegesRequest(); + request.setProtocol_version(sentry_common_serviceConstants.TSENTRY_SERVICE_V2); + request.setComponent(component); + request.setRequestorUserName(requestorUserName); + request.setPrivilege(privilege); + + try { + TDropPrivilegesResponse response = client.drop_sentry_privilege(request); + Status.throwIfNotOk(response.getStatus()); + } catch (TException e) { + throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); + } + } + + /** + * rename privilege + * @param requestorUserName: user on whose behalf the request is issued + * @param component: The request is issued to which component + * @param serviceName: The Authorizable belongs to which service + * @param oldAuthorizables + * @param newAuthorizables + * @throws SentryUserException + */ + public void renamePrivilege(String requestorUserName, String component, + String serviceName, List oldAuthorizables, + List newAuthorizables) throws SentryUserException { + if (oldAuthorizables == null || oldAuthorizables.isEmpty() + || newAuthorizables == null || newAuthorizables.isEmpty()) { + throw new SentryUserException("oldAuthorizables and newAuthorizables can't be null or empty"); + } + + TRenamePrivilegesRequest request = new TRenamePrivilegesRequest(); + request.setProtocol_version(sentry_common_serviceConstants.TSENTRY_SERVICE_V2); + request.setComponent(component); + request.setRequestorUserName(requestorUserName); + request.setServiceName(serviceName); + + List oldTAuthorizables = Lists.newArrayList(); + List newTAuthorizables = Lists.newArrayList(); + for (Authorizable authorizable : oldAuthorizables) { + oldTAuthorizables.add(new TAuthorizable(authorizable.getTypeName(), authorizable.getName())); + request.setOldAuthorizables(oldTAuthorizables); + } + for (Authorizable authorizable : newAuthorizables) { + newTAuthorizables.add(new TAuthorizable(authorizable.getTypeName(), authorizable.getName())); + request.setNewAuthorizables(newTAuthorizables); + } + + try { + TRenamePrivilegesResponse response = client.rename_sentry_privilege(request); + Status.throwIfNotOk(response.getStatus()); + } catch (TException e) { + throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); + } + } + + /** + * Gets sentry role objects for a given groupName using the Sentry service + * @param requestorUserName : user on whose behalf the request is issued + * @param groupName : groupName to look up ( if null returns all roles for groups related to requestorUserName) + * @param component: The request is issued to which component + * @return Set of thrift sentry role objects + * @throws SentryUserException + */ + public synchronized Set listRolesByGroupName( + String requestorUserName, + String groupName, + String component) + throws SentryUserException { + TListSentryRolesRequest request = new TListSentryRolesRequest(); + request.setProtocol_version(sentry_common_serviceConstants.TSENTRY_SERVICE_V2); + request.setRequestorUserName(requestorUserName); + request.setGroupName(groupName); + request.setComponent(component); + TListSentryRolesResponse response; + try { + response = client.list_sentry_roles_by_group(request); + Status.throwIfNotOk(response.getStatus()); + return response.getRoles(); + } catch (TException e) { + throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); + } + } + + public Set listUserRoles(String requestorUserName, String component) + throws SentryUserException { + return listRolesByGroupName(requestorUserName, AccessConstants.ALL, component); + } + + public Set listAllRoles(String requestorUserName, String component) + throws SentryUserException { + return listRolesByGroupName(requestorUserName, null, component); + } + + /** + * Gets sentry privileges for a given roleName and Authorizable Hirerchys using the Sentry service + * @param requestorUserName: user on whose behalf the request is issued + * @param roleName: + * @param component: The request is issued to which component + * @param serviceName + * @param authorizables + * @return + * @throws SentryUserException + */ + public Set listPrivilegesByRoleName( + String requestorUserName, String roleName, String component, + String serviceName, List authorizables) + throws SentryUserException { + TListSentryPrivilegesRequest request = new TListSentryPrivilegesRequest(); + request.setProtocol_version(sentry_common_serviceConstants.TSENTRY_SERVICE_V2); + request.setComponent(component); + request.setServiceName(serviceName); + request.setRequestorUserName(requestorUserName); + request.setRoleName(roleName); + if (authorizables != null && !authorizables.isEmpty()) { + List tAuthorizables = Lists.newArrayList(); + for (Authorizable authorizable : authorizables) { + tAuthorizables.add(new TAuthorizable(authorizable.getTypeName(), authorizable.getName())); + } + request.setAuthorizables(tAuthorizables); + } + + TListSentryPrivilegesResponse response; + try { + response = client.list_sentry_privileges_by_role(request); + Status.throwIfNotOk(response.getStatus()); + } catch (TException e) { + throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); + } + return response.getPrivileges(); + } + + public Set listPrivilegesByRoleName( + String requestorUserName, String roleName, String component, + String serviceName) throws SentryUserException { + return listPrivilegesByRoleName(requestorUserName, roleName, component, serviceName, null); + } + + /** + * get sentry permissions from provider as followings: + * @param: component: The request is issued to which component + * @param: serviceName: The privilege belongs to which service + * @param: roleSet + * @param: groupNames + * @param: the authorizables + * @returns the set of permissions + * @throws SentryUserException + */ + public Set listPrivilegesForProvider(String component, + String serviceName, ActiveRoleSet roleSet, Set groups, + List authorizables) throws SentryUserException { + TSentryActiveRoleSet thriftRoleSet = new TSentryActiveRoleSet(roleSet.isAll(), roleSet.getRoles()); + TListSentryPrivilegesForProviderRequest request = new TListSentryPrivilegesForProviderRequest(); + request.setProtocol_version(sentry_common_serviceConstants.TSENTRY_SERVICE_V2); + request.setComponent(component); + request.setServiceName(serviceName); + request.setRoleSet(thriftRoleSet); + if (groups == null) { + request.setGroups(new HashSet()); + } else { + request.setGroups(groups); + } + List tAuthoriables = Lists.newArrayList(); + if (authorizables != null && !authorizables.isEmpty()) { + for (Authorizable authorizable : authorizables) { + tAuthoriables.add(new TAuthorizable(authorizable.getTypeName(), authorizable.getName())); + } + request.setAuthorizables(tAuthoriables); + } + + try { + TListSentryPrivilegesForProviderResponse response = client.list_sentry_privileges_for_provider(request); + Status.throwIfNotOk(response.getStatus()); + return response.getPrivileges(); + } catch (TException e) { + throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); + } + } + + private List fromAuthorizable(List authorizables) { + List tAuthorizables = Lists.newArrayList(); + for (Authorizable authorizable : authorizables) { + tAuthorizables.add(new TAuthorizable(authorizable.getTypeName(), authorizable.getName())); + } + return tAuthorizables; + } + + /** + * Get sentry privileges based on valid active roles and the authorize objects. Note that + * it is client responsibility to ensure the requestor username, etc. is not impersonated. + * + * @param component: The request respond to which component. + * @param serviceName: The name of service. + * @param requestorUserName: The requestor user name. + * @param authorizablesSet: The set of authorize objects. Represented as a string. e.g + * resourceType1=resourceName1->resourceType2=resourceName2->resourceType3=resourceName3. + * @param groups: The requested groups. + * @param roleSet: The active roles set. + * + * @returns The mapping of authorize objects and TSentryPrivilegeMap(). + * @throws SentryUserException + */ + public Map listPrivilegsbyAuthorizable(String component, + String serviceName, String requestorUserName, Set> authorizablesSet, + Set groups, ActiveRoleSet roleSet) throws SentryUserException { + + Set> authSet = Sets.newHashSet(); + for (List authorizables : authorizablesSet) { + authSet.add(fromAuthorizable(authorizables)); + } + + TListSentryPrivilegesByAuthRequest request = new TListSentryPrivilegesByAuthRequest(); + + request.setProtocol_version(sentry_common_serviceConstants.TSENTRY_SERVICE_V2); + request.setComponent(component); + request.setServiceName(serviceName); + request.setRequestorUserName(requestorUserName); + + if (groups == null) { + request.setGroups(new HashSet()); + } else { + request.setGroups(groups); + } + + if (roleSet != null) { + request.setRoleSet(new TSentryActiveRoleSet(roleSet.isAll(), roleSet.getRoles())); + } + + try { + TListSentryPrivilegesByAuthResponse response = client.list_sentry_privileges_by_authorizable(request); + Status.throwIfNotOk(response.getStatus()); + return response.getPrivilegesMapByAuth(); + } catch (TException e) { + throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); + } + } + + @Override + public void close() { + if (transport != null) { + transport.close(); + } + } + +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericServiceClientFactory.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericServiceClientFactory.java new file mode 100644 index 000000000..b070c6de4 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericServiceClientFactory.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.db.generic.service.thrift; + +import org.apache.hadoop.conf.Configuration; + +/** + * SentryGenericServiceClientFactory is a public class for the components which using Generic Model to create sentry client. + */ +public class SentryGenericServiceClientFactory { + + private SentryGenericServiceClientFactory() { + } + + public static SentryGenericServiceClient create(Configuration conf) throws Exception { + return new SentryGenericServiceClientDefaultImpl(conf); + } + +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/SentryShellSolr.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/SentryShellSolr.java new file mode 100644 index 000000000..de718e963 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/SentryShellSolr.java @@ -0,0 +1,112 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.provider.db.generic.tools; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceClient; +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceClientFactory; +import org.apache.sentry.provider.db.generic.tools.command.*; +import org.apache.sentry.provider.db.tools.SentryShellCommon; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * SentryShellSolr is an admin tool, and responsible for the management of repository. + * The following commands are supported: + * create role, drop role, add group to role, grant privilege to role, + * revoke privilege from role, list roles, list privilege for role. + */ +public class SentryShellSolr extends SentryShellCommon { + + private static final Logger LOGGER = LoggerFactory.getLogger(SentryShellSolr.class); + public static final String SOLR_SERVICE_NAME = "sentry.service.client.solr.service.name"; + + @Override + public void run() throws Exception { + Command command = null; + String component = "SOLR"; + Configuration conf = getSentryConf(); + + String service = conf.get(SOLR_SERVICE_NAME, "service1"); + SentryGenericServiceClient client = SentryGenericServiceClientFactory.create(conf); + UserGroupInformation ugi = UserGroupInformation.getLoginUser(); + String requestorName = ugi.getShortUserName(); + + if (isCreateRole) { + command = new CreateRoleCmd(roleName, component); + } else if (isDropRole) { + command = new DropRoleCmd(roleName, component); + } else if (isAddRoleGroup) { + command = new AddRoleToGroupCmd(roleName, groupName, component); + } else if (isDeleteRoleGroup) { + command = new DeleteRoleFromGroupCmd(roleName, groupName, component); + } else if (isGrantPrivilegeRole) { + command = new GrantPrivilegeToRoleCmd(roleName, component, + privilegeStr, new SolrTSentryPrivilegeConvertor(component, service)); + } else if (isRevokePrivilegeRole) { + command = new RevokePrivilegeFromRoleCmd(roleName, component, + privilegeStr, new SolrTSentryPrivilegeConvertor(component, service)); + } else if (isListRole) { + command = new ListRolesCmd(groupName, component); + } else if (isListPrivilege) { + command = new ListPrivilegesByRoleCmd(roleName, component, + service, new SolrTSentryPrivilegeConvertor(component, service)); + } + + // check the requestor name + if (StringUtils.isEmpty(requestorName)) { + // The exception message will be recorded in log file. + throw new Exception("The requestor name is empty."); + } + + if (command != null) { + command.execute(client, requestorName); + } + } + + private Configuration getSentryConf() { + Configuration conf = new Configuration(); + conf.addResource(new Path(confPath)); + return conf; + } + + public static void main(String[] args) throws Exception { + SentryShellSolr sentryShell = new SentryShellSolr(); + try { + sentryShell.executeShell(args); + } catch (Exception e) { + LOGGER.error(e.getMessage(), e); + Throwable current = e; + // find the first printable message; + while (current != null && current.getMessage() == null) { + current = current.getCause(); + } + String error = ""; + if (current != null && current.getMessage() != null) { + error = "Message: " + current.getMessage(); + } + System.out.println("The operation failed. " + error); + System.exit(1); + } + } + +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/SolrTSentryPrivilegeConvertor.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/SolrTSentryPrivilegeConvertor.java new file mode 100644 index 000000000..e2dfdf13d --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/SolrTSentryPrivilegeConvertor.java @@ -0,0 +1,128 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.provider.db.generic.tools; + +import com.google.common.collect.Lists; + +import org.apache.sentry.core.model.search.Collection; +import org.apache.sentry.core.model.search.SearchModelAuthorizable; +import org.apache.sentry.policy.common.PolicyConstants; +import org.apache.sentry.policy.common.PrivilegeValidator; +import org.apache.sentry.policy.common.PrivilegeValidatorContext; +import org.apache.sentry.policy.search.SearchModelAuthorizables; +import org.apache.sentry.policy.search.SimpleSearchPolicyEngine; +import org.apache.sentry.policy.common.KeyValue; +import org.apache.sentry.provider.common.PolicyFileConstants; +import org.apache.sentry.provider.db.generic.service.thrift.TAuthorizable; +import org.apache.sentry.provider.db.generic.service.thrift.TSentryGrantOption; +import org.apache.sentry.provider.db.generic.service.thrift.TSentryPrivilege; +import org.apache.sentry.provider.db.generic.tools.command.TSentryPrivilegeConvertor; +import org.apache.shiro.config.ConfigurationException; + +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; + +public class SolrTSentryPrivilegeConvertor implements TSentryPrivilegeConvertor { + private String component; + private String service; + + public SolrTSentryPrivilegeConvertor(String component, String service) { + this.component = component; + this.service = service; + } + + public TSentryPrivilege fromString(String privilegeStr) throws Exception { + validatePrivilegeHierarchy(privilegeStr); + TSentryPrivilege tSentryPrivilege = new TSentryPrivilege(); + List authorizables = new LinkedList(); + for (String authorizable : PolicyConstants.AUTHORIZABLE_SPLITTER.split(privilegeStr)) { + KeyValue keyValue = new KeyValue(authorizable); + String key = keyValue.getKey(); + String value = keyValue.getValue(); + + // is it an authorizable? + SearchModelAuthorizable authz = SearchModelAuthorizables.from(keyValue); + if (authz != null) { + if (authz instanceof Collection) { + Collection coll = (Collection)authz; + authorizables.add(new TAuthorizable(coll.getTypeName(), coll.getName())); + } else { + throw new IllegalArgumentException("Unknown authorizable type: " + authz.getTypeName()); + } + } else if (PolicyFileConstants.PRIVILEGE_ACTION_NAME.equalsIgnoreCase(key)) { + tSentryPrivilege.setAction(value); + // Limitation: don't support grant at this time, since the existing solr use cases don't need it. + } else { + throw new IllegalArgumentException("Unknown key: " + key); + } + } + + if (tSentryPrivilege.getAction() == null) { + throw new IllegalArgumentException("Privilege is invalid: action required but not specified."); + } + tSentryPrivilege.setComponent(component); + tSentryPrivilege.setServiceName(service); + tSentryPrivilege.setAuthorizables(authorizables); + return tSentryPrivilege; + } + + public String toString(TSentryPrivilege tSentryPrivilege) { + List privileges = Lists.newArrayList(); + if (tSentryPrivilege != null) { + List authorizables = tSentryPrivilege.getAuthorizables(); + String action = tSentryPrivilege.getAction(); + String grantOption = (tSentryPrivilege.getGrantOption() == TSentryGrantOption.TRUE ? "true" + : "false"); + + Iterator it = authorizables.iterator(); + if (it != null) { + while (it.hasNext()) { + TAuthorizable tAuthorizable = it.next(); + privileges.add(PolicyConstants.KV_JOINER.join( + tAuthorizable.getType(), tAuthorizable.getName())); + } + } + + if (!authorizables.isEmpty()) { + privileges.add(PolicyConstants.KV_JOINER.join( + PolicyFileConstants.PRIVILEGE_ACTION_NAME, action)); + } + + // only append the grant option to privilege string if it's true + if ("true".equals(grantOption)) { + privileges.add(PolicyConstants.KV_JOINER.join( + PolicyFileConstants.PRIVILEGE_GRANT_OPTION_NAME, grantOption)); + } + } + return PolicyConstants.AUTHORIZABLE_JOINER.join(privileges); + } + + private static void validatePrivilegeHierarchy(String privilegeStr) throws Exception { + List validators = SimpleSearchPolicyEngine.createPrivilegeValidators(); + PrivilegeValidatorContext context = new PrivilegeValidatorContext(null, privilegeStr); + for (PrivilegeValidator validator : validators) { + try { + validator.validate(context); + } catch (ConfigurationException e) { + throw new IllegalArgumentException(e); + } + } + } +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/AddRoleToGroupCmd.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/AddRoleToGroupCmd.java new file mode 100644 index 000000000..a45d7e4ec --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/AddRoleToGroupCmd.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.db.generic.tools.command; + +import com.google.common.collect.Sets; +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceClient; +import org.apache.sentry.provider.db.tools.SentryShellCommon; + +import java.util.Set; + +/** + * Command for adding groups to a role. + */ +public class AddRoleToGroupCmd implements Command { + + private String roleName; + private String groups; + private String component; + + public AddRoleToGroupCmd(String roleName, String groups, String component) { + this.roleName = roleName; + this.groups = groups; + this.component = component; + } + + @Override + public void execute(SentryGenericServiceClient client, String requestorName) throws Exception { + Set groupSet = Sets.newHashSet(groups.split(SentryShellCommon.GROUP_SPLIT_CHAR)); + client.addRoleToGroups(requestorName, roleName, component, groupSet); + } +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/Command.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/Command.java new file mode 100644 index 000000000..e824fb3ba --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/Command.java @@ -0,0 +1,27 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.db.generic.tools.command; + +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceClient; + +/** + * The interface for all admin commands, eg, CreateRoleCmd. + */ +public interface Command { + void execute(SentryGenericServiceClient client, String requestorName) throws Exception; +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/CreateRoleCmd.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/CreateRoleCmd.java new file mode 100644 index 000000000..da60a6435 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/CreateRoleCmd.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.db.generic.tools.command; + +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceClient; + +/** + * The class for admin command to create role. + */ +public class CreateRoleCmd implements Command { + + private String roleName; + private String component; + + public CreateRoleCmd(String roleName, String component) { + this.roleName = roleName; + this.component = component; + } + + @Override + public void execute(SentryGenericServiceClient client, String requestorName) throws Exception { + client.createRole(requestorName, roleName, component); + } +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/DeleteRoleFromGroupCmd.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/DeleteRoleFromGroupCmd.java new file mode 100644 index 000000000..95f39ea51 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/DeleteRoleFromGroupCmd.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.db.generic.tools.command; + +import com.google.common.collect.Sets; +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceClient; +import org.apache.sentry.provider.db.tools.SentryShellCommon; + +import java.util.Set; + +/** + * Command for deleting groups from a role. + */ +public class DeleteRoleFromGroupCmd implements Command { + + private String roleName; + private String groups; + private String component; + + public DeleteRoleFromGroupCmd(String roleName, String groups, String component) { + this.groups = groups; + this.roleName = roleName; + this.component = component; + } + + @Override + public void execute(SentryGenericServiceClient client, String requestorName) throws Exception { + Set groupSet = Sets.newHashSet(groups.split(SentryShellCommon.GROUP_SPLIT_CHAR)); + client.deleteRoleToGroups(requestorName, roleName, component, groupSet); + } +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/DropRoleCmd.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/DropRoleCmd.java new file mode 100644 index 000000000..ac2a328b1 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/DropRoleCmd.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.db.generic.tools.command; + +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceClient; + +/** + * The class for admin command to drop role. + */ +public class DropRoleCmd implements Command { + + private String roleName; + private String component; + + public DropRoleCmd(String roleName, String component) { + this.roleName = roleName; + this.component = component; + } + + @Override + public void execute(SentryGenericServiceClient client, String requestorName) throws Exception { + client.dropRole(requestorName, roleName, component); + } +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/GrantPrivilegeToRoleCmd.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/GrantPrivilegeToRoleCmd.java new file mode 100644 index 000000000..586798360 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/GrantPrivilegeToRoleCmd.java @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.db.generic.tools.command; + +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceClient; +import org.apache.sentry.provider.db.generic.service.thrift.TSentryPrivilege; + +/** + * The class for admin command to grant privilege to role. + */ +public class GrantPrivilegeToRoleCmd implements Command { + + private String roleName; + private String component; + private String privilegeStr; + private TSentryPrivilegeConvertor convertor; + + public GrantPrivilegeToRoleCmd(String roleName, String component, String privilegeStr, + TSentryPrivilegeConvertor convertor) { + this.roleName = roleName; + this.component = component; + this.privilegeStr = privilegeStr; + this.convertor = convertor; + } + + @Override + public void execute(SentryGenericServiceClient client, String requestorName) throws Exception { + TSentryPrivilege privilege = convertor.fromString(privilegeStr); + client.grantPrivilege(requestorName, roleName, component, privilege); + + } +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/ListPrivilegesByRoleCmd.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/ListPrivilegesByRoleCmd.java new file mode 100644 index 000000000..8420291a9 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/ListPrivilegesByRoleCmd.java @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.db.generic.tools.command; + +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceClient; +import org.apache.sentry.provider.db.generic.service.thrift.TSentryPrivilege; + +import java.util.Set; + +/** + * The class for admin command to list privileges by role. + */ +public class ListPrivilegesByRoleCmd implements Command { + + private String roleName; + private String component; + private String serviceName; + private TSentryPrivilegeConvertor convertor; + + public ListPrivilegesByRoleCmd(String roleName, String component, String serviceName, + TSentryPrivilegeConvertor convertor) { + this.roleName = roleName; + this.component = component; + this.serviceName = serviceName; + this.convertor = convertor; + } + + @Override + public void execute(SentryGenericServiceClient client, String requestorName) throws Exception { + Set privileges = client + .listPrivilegesByRoleName(requestorName, roleName, component, serviceName); + if (privileges != null) { + for (TSentryPrivilege privilege : privileges) { + String privilegeStr = convertor.toString(privilege); + System.out.println(privilegeStr); + } + } + } +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/ListRolesCmd.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/ListRolesCmd.java new file mode 100644 index 000000000..6b68d06a4 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/ListRolesCmd.java @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.db.generic.tools.command; + +import org.apache.commons.lang.StringUtils; +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceClient; +import org.apache.sentry.provider.db.generic.service.thrift.TSentryRole; + +import java.util.Set; + +/** + * The class for admin command to list roles. + */ +public class ListRolesCmd implements Command { + + private String groupName; + private String component; + + public ListRolesCmd(String groupName, String component) { + this.groupName = groupName; + this.component = component; + } + + @Override + public void execute(SentryGenericServiceClient client, String requestorName) throws Exception { + Set roles; + if (StringUtils.isEmpty(groupName)) { + roles = client.listAllRoles(requestorName, component); + } else { + roles = client.listRolesByGroupName(requestorName, groupName, component); + } + if (roles != null) { + for (TSentryRole role : roles) { + System.out.println(role.getRoleName()); + } + } + } +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/RevokePrivilegeFromRoleCmd.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/RevokePrivilegeFromRoleCmd.java new file mode 100644 index 000000000..fba17e62b --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/RevokePrivilegeFromRoleCmd.java @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.db.generic.tools.command; + +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceClient; +import org.apache.sentry.provider.db.generic.service.thrift.TSentryPrivilege; + +/** + * The class for admin command to revoke privileges from role. + */ +public class RevokePrivilegeFromRoleCmd implements Command { + + private String roleName; + private String component; + private String privilegeStr; + private TSentryPrivilegeConvertor convertor; + + public RevokePrivilegeFromRoleCmd(String roleName, String component, String privilegeStr, + TSentryPrivilegeConvertor convertor) { + this.roleName = roleName; + this.component = component; + this.privilegeStr = privilegeStr; + this.convertor = convertor; + } + + @Override + public void execute(SentryGenericServiceClient client, String requestorName) throws Exception { + TSentryPrivilege privilege = convertor.fromString(privilegeStr); + client.revokePrivilege(requestorName, roleName, component, privilege); + } + +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/TSentryPrivilegeConvertor.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/TSentryPrivilegeConvertor.java new file mode 100644 index 000000000..f8723412f --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/generic/tools/command/TSentryPrivilegeConvertor.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.db.generic.tools.command; + +import org.apache.sentry.provider.db.generic.service.thrift.TSentryPrivilege; + +public interface TSentryPrivilegeConvertor { + + /** + * Convert string to privilege + */ + TSentryPrivilege fromString(String privilegeStr) throws Exception; + + /** + * Convert privilege to string + */ + String toString(TSentryPrivilege tSentryPrivilege); +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/appender/AuditLoggerTestAppender.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/appender/AuditLoggerTestAppender.java new file mode 100644 index 000000000..6eb1f0af8 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/appender/AuditLoggerTestAppender.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.provider.db.log.appender; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.log4j.AppenderSkeleton; +import org.apache.log4j.Level; +import org.apache.log4j.spi.LoggingEvent; + +import com.google.common.annotations.VisibleForTesting; + +@VisibleForTesting +public class AuditLoggerTestAppender extends AppenderSkeleton { + public static List events = new ArrayList(); + + public void close() { + } + + public boolean requiresLayout() { + return false; + } + + @Override + protected void append(LoggingEvent event) { + events.add(event); + } + + public static String getLastLogEvent() { + return events.get(events.size() - 1).getMessage().toString(); + } + + public static Level getLastLogLevel() { + return events.get(events.size() - 1).getLevel(); + } +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/appender/RollingFileWithoutDeleteAppender.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/appender/RollingFileWithoutDeleteAppender.java index edbd16014..b8dafc804 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/appender/RollingFileWithoutDeleteAppender.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/appender/RollingFileWithoutDeleteAppender.java @@ -57,7 +57,7 @@ public RollingFileWithoutDeleteAppender() { */ public RollingFileWithoutDeleteAppender(Layout layout, String filename, boolean append) throws IOException { - super(layout, filename, append); + super(layout, getLogFileName(filename), append); } /** @@ -69,7 +69,7 @@ public RollingFileWithoutDeleteAppender(Layout layout, String filename, */ public RollingFileWithoutDeleteAppender(Layout layout, String filename) throws IOException { - super(layout, filename); + super(layout, getLogFileName(filename)); } /** @@ -88,10 +88,6 @@ public long getMaximumFileSize() { */ // synchronization not necessary since doAppend is alreasy synched public void rollOver() { - File target; - File file; - String suffix = Long.toString(System.currentTimeMillis()); - if (qw != null) { long size = ((CountingQuietWriter) qw).getCount(); LogLog.debug("rolling over count=" + size); @@ -100,40 +96,19 @@ public void rollOver() { nextRollover = size + maxFileSize; } - boolean renameSucceeded = true; - - // Rename fileName to fileName.yyyyMMddHHmmss - target = new File(fileName + "." + suffix); - this.closeFile(); // keep windows happy. - file = new File(fileName); - LogLog.debug("Renaming file " + file + " to " + target); - renameSucceeded = file.renameTo(target); - // - // if file rename failed, reopen file with append = true - // - if (!renameSucceeded) { - try { - this.setFile(fileName, true, bufferedIO, bufferSize); - } catch (IOException e) { - if (e instanceof InterruptedIOException) { - Thread.currentThread().interrupt(); - } - LogLog.error("setFile(" + fileName + ", true) call failed.", e); - } - } else { - try { - // This will also close the file. This is OK since multiple - // close operations are safe. - this.setFile(fileName, false, bufferedIO, bufferSize); - nextRollover = 0; - } catch (IOException e) { - if (e instanceof InterruptedIOException) { - Thread.currentThread().interrupt(); - } - LogLog.error("setFile(" + fileName + ", false) call failed.", e); + String newFileName = getLogFileName(fileName); + try { + // This will also close the file. This is OK since multiple + // close operations are safe. + this.setFile(newFileName, false, bufferedIO, bufferSize); + nextRollover = 0; + } catch (IOException e) { + if (e instanceof InterruptedIOException) { + Thread.currentThread().interrupt(); } + LogLog.error("setFile(" + newFileName + ", false) call failed.", e); } } @@ -154,7 +129,7 @@ public synchronized void setFile(String fileName, boolean append, * required for differentiating the setter taking a long argument * from the setter taking a String argument by the JavaBeans * {@link java.beans.Introspector Introspector}. - * + * * @see #setMaxFileSize(String) */ public void setMaximumFileSize(long maxFileSize) { @@ -192,4 +167,9 @@ protected void subAppend(LoggingEvent event) { } } } + + // Mangled file name. Append the current timestamp + private static String getLogFileName(String oldFileName) { + return oldFileName + "." + Long.toString(System.currentTimeMillis()); + } } diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/entity/AuditMetadataLogEntity.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/entity/AuditMetadataLogEntity.java index 6b6304527..f3eb95ba6 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/entity/AuditMetadataLogEntity.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/entity/AuditMetadataLogEntity.java @@ -19,45 +19,30 @@ package org.apache.sentry.provider.db.log.entity; import java.io.IOException; -import java.io.StringWriter; -import org.apache.sentry.provider.db.log.util.Constants; import org.codehaus.jackson.JsonFactory; -import org.codehaus.jackson.JsonGenerator; import org.codehaus.jackson.JsonNode; import org.codehaus.jackson.map.MappingJsonFactory; import org.codehaus.jackson.map.ObjectMapper; import org.codehaus.jackson.node.ContainerNode; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class AuditMetadataLogEntity implements JsonLogEntity { - - private static final Logger LOGGER = LoggerFactory - .getLogger(AuditMetadataLogEntity.class); - private static final JsonFactory factory = new MappingJsonFactory(); - private String serviceName; - private String userName; - private String impersonator; - private String ipAddress; - private String operation; - private String eventTime; - private String operationText; - private String allowed; - private String databaseName; - private String tableName; - private String columnName; - private String resourcePath; - private String objectType; - - public AuditMetadataLogEntity() { - } - - public AuditMetadataLogEntity(String serviceName, String userName, - String impersonator, String ipAddress, String operation, - String eventTime, String operationText, String allowed, - String databaseName, String tableName, String columnName, - String resourcePath, String objectType) { + +abstract public class AuditMetadataLogEntity implements JsonLogEntity { + + static final JsonFactory factory = new MappingJsonFactory(); + String serviceName; + String userName; + String impersonator; + String ipAddress; + String operation; + String eventTime; + String operationText; + String allowed; + String objectType; + String component; + + void setCommonAttr(String serviceName, String userName, String impersonator, String ipAddress, + String operation, String eventTime, String operationText, String allowed, String objectType, + String component) { this.serviceName = serviceName; this.userName = userName; this.impersonator = impersonator; @@ -66,52 +51,8 @@ public AuditMetadataLogEntity(String serviceName, String userName, this.eventTime = eventTime; this.operationText = operationText; this.allowed = allowed; - this.databaseName = databaseName; - this.tableName = tableName; - this.columnName = columnName; - this.resourcePath = resourcePath; this.objectType = objectType; - } - - @Override - public String toJsonFormatLog() { - StringWriter stringWriter = new StringWriter(); - JsonGenerator json = null; - try { - json = factory.createJsonGenerator(stringWriter); - json.writeStartObject(); - json.writeStringField(Constants.LOG_FIELD_SERVICE_NAME, serviceName); - json.writeStringField(Constants.LOG_FIELD_USER_NAME, userName); - json.writeStringField(Constants.LOG_FIELD_IMPERSONATOR, impersonator); - json.writeStringField(Constants.LOG_FIELD_IP_ADDRESS, ipAddress); - json.writeStringField(Constants.LOG_FIELD_OPERATION, operation); - json.writeStringField(Constants.LOG_FIELD_EVENT_TIME, eventTime); - json.writeStringField(Constants.LOG_FIELD_OPERATION_TEXT, operationText); - json.writeStringField(Constants.LOG_FIELD_ALLOWED, allowed); - json.writeStringField(Constants.LOG_FIELD_DATABASE_NAME, databaseName); - json.writeStringField(Constants.LOG_FIELD_TABLE_NAME, tableName); - json.writeStringField(Constants.LOG_FIELD_COLUMN_NAME, columnName); - json.writeStringField(Constants.LOG_FIELD_RESOURCE_PATH, resourcePath); - json.writeStringField(Constants.LOG_FIELD_OBJECT_TYPE, objectType); - json.writeEndObject(); - json.flush(); - } catch (IOException e) { - // if there has error when creating the audit log in json, set the audit - // log to empty. - stringWriter = new StringWriter(); - String msg = "Error creating audit log in json format: " + e.getMessage(); - LOGGER.error(msg, e); - } finally { - try { - if (json != null) { - json.close(); - } - } catch (IOException e) { - LOGGER.error("Error closing JsonGenerator", e); - } - } - - return stringWriter.toString(); + this.component = component; } public String getServiceName() { @@ -178,38 +119,6 @@ public void setAllowed(String allowed) { this.allowed = allowed; } - public String getDatabaseName() { - return databaseName; - } - - public void setDatabaseName(String databaseName) { - this.databaseName = databaseName; - } - - public String getTableName() { - return tableName; - } - - public void setTableName(String tableName) { - this.tableName = tableName; - } - - public String getColumnName() { - return columnName; - } - - public void setColumnName(String columnName) { - this.columnName = columnName; - } - - public String getResourcePath() { - return resourcePath; - } - - public void setResourcePath(String resourcePath) { - this.resourcePath = resourcePath; - } - public String getObjectType() { return objectType; } @@ -218,6 +127,14 @@ public void setObjectType(String objectType) { this.objectType = objectType; } + public String getComponent() { + return component; + } + + public void setComponent(String component) { + this.component = component; + } + /** * For use in tests * diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/entity/DBAuditMetadataLogEntity.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/entity/DBAuditMetadataLogEntity.java new file mode 100644 index 000000000..95afe5211 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/entity/DBAuditMetadataLogEntity.java @@ -0,0 +1,122 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.provider.db.log.entity; + +import java.io.IOException; +import java.io.StringWriter; + +import org.apache.sentry.provider.db.log.util.Constants; +import org.codehaus.jackson.JsonGenerator; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class DBAuditMetadataLogEntity extends AuditMetadataLogEntity { + private static final Logger LOGGER = LoggerFactory.getLogger(DBAuditMetadataLogEntity.class); + + private String databaseName; + private String tableName; + private String columnName; + private String resourcePath; + + public DBAuditMetadataLogEntity() { + } + + public DBAuditMetadataLogEntity(String serviceName, String userName, String impersonator, + String ipAddress, String operation, String eventTime, String operationText, String allowed, + String objectType, String component, String databaseName, String tableName, + String columnName, String resourcePath) { + setCommonAttr(serviceName, userName, impersonator, ipAddress, operation, eventTime, + operationText, allowed, objectType, component); + this.databaseName = databaseName; + this.tableName = tableName; + this.columnName = columnName; + this.resourcePath = resourcePath; + } + + public String getDatabaseName() { + return databaseName; + } + + public void setDatabaseName(String databaseName) { + this.databaseName = databaseName; + } + + public String getTableName() { + return tableName; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + + public String getColumnName() { + return columnName; + } + + public void setColumnName(String columnName) { + this.columnName = columnName; + } + + public String getResourcePath() { + return resourcePath; + } + + public void setResourcePath(String resourcePath) { + this.resourcePath = resourcePath; + } + + @Override + public String toJsonFormatLog() throws Exception { + StringWriter stringWriter = new StringWriter(); + JsonGenerator json = null; + try { + json = factory.createJsonGenerator(stringWriter); + json.writeStartObject(); + json.writeStringField(Constants.LOG_FIELD_SERVICE_NAME, serviceName); + json.writeStringField(Constants.LOG_FIELD_USER_NAME, userName); + json.writeStringField(Constants.LOG_FIELD_IMPERSONATOR, impersonator); + json.writeStringField(Constants.LOG_FIELD_IP_ADDRESS, ipAddress); + json.writeStringField(Constants.LOG_FIELD_OPERATION, operation); + json.writeStringField(Constants.LOG_FIELD_EVENT_TIME, eventTime); + json.writeStringField(Constants.LOG_FIELD_OPERATION_TEXT, operationText); + json.writeStringField(Constants.LOG_FIELD_ALLOWED, allowed); + json.writeStringField(Constants.LOG_FIELD_DATABASE_NAME, databaseName); + json.writeStringField(Constants.LOG_FIELD_TABLE_NAME, tableName); + json.writeStringField(Constants.LOG_FIELD_COLUMN_NAME, columnName); + json.writeStringField(Constants.LOG_FIELD_RESOURCE_PATH, resourcePath); + json.writeStringField(Constants.LOG_FIELD_OBJECT_TYPE, objectType); + json.writeEndObject(); + json.flush(); + } catch (IOException e) { + String msg = "Error creating audit log in json format: " + e.getMessage(); + LOGGER.error(msg, e); + throw e; + } finally { + try { + if (json != null) { + json.close(); + } + } catch (IOException e) { + throw e; + } + } + + return stringWriter.toString(); + } +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/entity/GMAuditMetadataLogEntity.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/entity/GMAuditMetadataLogEntity.java new file mode 100644 index 000000000..25d55e0f6 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/entity/GMAuditMetadataLogEntity.java @@ -0,0 +1,95 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.provider.db.log.entity; + +import java.io.IOException; +import java.io.StringWriter; +import java.util.LinkedHashMap; +import java.util.Map; + +import org.apache.sentry.provider.db.log.util.Constants; +import org.codehaus.jackson.JsonGenerator; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class GMAuditMetadataLogEntity extends AuditMetadataLogEntity { + + private static final Logger LOGGER = LoggerFactory.getLogger(GMAuditMetadataLogEntity.class); + private Map privilegesMap; + + public GMAuditMetadataLogEntity() { + privilegesMap = new LinkedHashMap(); + } + + public GMAuditMetadataLogEntity(String serviceName, String userName, String impersonator, + String ipAddress, String operation, String eventTime, String operationText, String allowed, + String objectType, String component, Map privilegesMap) { + setCommonAttr(serviceName, userName, impersonator, ipAddress, operation, eventTime, + operationText, allowed, objectType, component); + this.privilegesMap = privilegesMap; + } + + @Override + public String toJsonFormatLog() throws Exception { + StringWriter stringWriter = new StringWriter(); + JsonGenerator json = null; + try { + json = factory.createJsonGenerator(stringWriter); + json.writeStartObject(); + json.writeStringField(Constants.LOG_FIELD_SERVICE_NAME, serviceName); + json.writeStringField(Constants.LOG_FIELD_USER_NAME, userName); + json.writeStringField(Constants.LOG_FIELD_IMPERSONATOR, impersonator); + json.writeStringField(Constants.LOG_FIELD_IP_ADDRESS, ipAddress); + json.writeStringField(Constants.LOG_FIELD_OPERATION, operation); + json.writeStringField(Constants.LOG_FIELD_EVENT_TIME, eventTime); + json.writeStringField(Constants.LOG_FIELD_OPERATION_TEXT, operationText); + json.writeStringField(Constants.LOG_FIELD_ALLOWED, allowed); + for (Map.Entry entry : privilegesMap.entrySet()) { + json.writeStringField(entry.getKey(), entry.getValue()); + } + json.writeStringField(Constants.LOG_FIELD_OBJECT_TYPE, objectType); + json.writeStringField(Constants.LOG_FIELD_COMPONENT, component); + json.writeEndObject(); + json.flush(); + } catch (IOException e) { + String msg = "Error creating audit log in json format: " + e.getMessage(); + LOGGER.error(msg, e); + throw e; + } finally { + try { + if (json != null) { + json.close(); + } + } catch (IOException e) { + throw e; + } + } + + return stringWriter.toString(); + } + + public Map getPrivilegesMap() { + return privilegesMap; + } + + public void setPrivilegesMap(Map privilegesMap) { + this.privilegesMap = privilegesMap; + } + +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/entity/JsonLogEntity.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/entity/JsonLogEntity.java index 7ad696608..913f125c5 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/entity/JsonLogEntity.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/entity/JsonLogEntity.java @@ -20,6 +20,6 @@ public interface JsonLogEntity { - public String toJsonFormatLog(); + String toJsonFormatLog() throws Exception; } diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/entity/JsonLogEntityFactory.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/entity/JsonLogEntityFactory.java index 90308f442..c29b88e38 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/entity/JsonLogEntityFactory.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/entity/JsonLogEntityFactory.java @@ -18,9 +18,14 @@ package org.apache.sentry.provider.db.log.entity; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; import java.util.Set; import org.apache.hadoop.conf.Configuration; +import org.apache.sentry.provider.db.generic.service.thrift.TAuthorizable; import org.apache.sentry.provider.db.log.util.CommandUtil; import org.apache.sentry.provider.db.log.util.Constants; import org.apache.sentry.provider.db.service.thrift.TAlterSentryRoleAddGroupsRequest; @@ -35,11 +40,14 @@ import org.apache.sentry.provider.db.service.thrift.TCreateSentryRoleResponse; import org.apache.sentry.provider.db.service.thrift.TDropSentryRoleRequest; import org.apache.sentry.provider.db.service.thrift.TDropSentryRoleResponse; +import org.apache.sentry.provider.db.service.thrift.TSentryGroup; import org.apache.sentry.provider.db.service.thrift.TSentryPrivilege; +import org.apache.sentry.provider.db.service.thrift.ThriftUtil; import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; import org.apache.sentry.service.thrift.Status; import org.apache.sentry.service.thrift.TSentryResponseStatus; +import com.google.common.base.Joiner; import com.google.common.collect.ImmutableSet; public class JsonLogEntityFactory { @@ -53,26 +61,29 @@ public static JsonLogEntityFactory getInstance() { return factory; } + // log entity for hive/impala create role public JsonLogEntity createJsonLogEntity(TCreateSentryRoleRequest request, TCreateSentryRoleResponse response, Configuration conf) { - AuditMetadataLogEntity amle = createCommonAMLE(conf, response.getStatus(), + DBAuditMetadataLogEntity hamle = createCommonHAMLE(conf, response.getStatus(), request.getRequestorUserName(), request.getClass().getName()); - amle.setOperationText(CommandUtil.createCmdForCreateOrDropRole( + hamle.setOperationText(CommandUtil.createCmdForCreateOrDropRole( request.getRoleName(), true)); - return amle; + return hamle; } + // log entity for hive/impala drop role public JsonLogEntity createJsonLogEntity(TDropSentryRoleRequest request, TDropSentryRoleResponse response, Configuration conf) { - AuditMetadataLogEntity amle = createCommonAMLE(conf, response.getStatus(), + DBAuditMetadataLogEntity hamle = createCommonHAMLE(conf, response.getStatus(), request.getRequestorUserName(), request.getClass().getName()); - amle.setOperationText(CommandUtil.createCmdForCreateOrDropRole( + hamle.setOperationText(CommandUtil.createCmdForCreateOrDropRole( request.getRoleName(), false)); - return amle; + return hamle; } + // log entity for hive/impala grant privilege public Set createJsonLogEntitys( TAlterSentryRoleGrantPrivilegeRequest request, TAlterSentryRoleGrantPrivilegeResponse response, Configuration conf) { @@ -89,15 +100,16 @@ public Set createJsonLogEntitys( private JsonLogEntity createJsonLogEntity( TAlterSentryRoleGrantPrivilegeRequest request, TSentryPrivilege privilege, TAlterSentryRoleGrantPrivilegeResponse response, Configuration conf) { - AuditMetadataLogEntity amle = createCommonAMLE(conf, response.getStatus(), + DBAuditMetadataLogEntity hamle = createCommonHAMLE(conf, response.getStatus(), request.getRequestorUserName(), request.getClass().getName()); - amle.setOperationText(CommandUtil.createCmdForGrantPrivilege(request)); - amle.setDatabaseName(privilege.getDbName()); - amle.setTableName(privilege.getTableName()); - amle.setResourcePath(privilege.getURI()); - return amle; + hamle.setOperationText(CommandUtil.createCmdForGrantPrivilege(request)); + hamle.setDatabaseName(privilege.getDbName()); + hamle.setTableName(privilege.getTableName()); + hamle.setResourcePath(privilege.getURI()); + return hamle; } + // log entity for hive/impala revoke privilege public Set createJsonLogEntitys( TAlterSentryRoleRevokePrivilegeRequest request, TAlterSentryRoleRevokePrivilegeResponse response, Configuration conf) { @@ -114,34 +126,54 @@ public Set createJsonLogEntitys( private JsonLogEntity createJsonLogEntity( TAlterSentryRoleRevokePrivilegeRequest request, TSentryPrivilege privilege, TAlterSentryRoleRevokePrivilegeResponse response, Configuration conf) { - AuditMetadataLogEntity amle = createCommonAMLE(conf, response.getStatus(), + DBAuditMetadataLogEntity hamle = createCommonHAMLE(conf, response.getStatus(), request.getRequestorUserName(), request.getClass().getName()); - amle.setOperationText(CommandUtil.createCmdForRevokePrivilege(request)); - amle.setDatabaseName(privilege.getDbName()); - amle.setTableName(privilege.getTableName()); - amle.setResourcePath(privilege.getURI()); + hamle.setOperationText(CommandUtil.createCmdForRevokePrivilege(request)); + hamle.setDatabaseName(privilege.getDbName()); + hamle.setTableName(privilege.getTableName()); + hamle.setResourcePath(privilege.getURI()); - return amle; + return hamle; } + // log entity for hive/impala add role to group public JsonLogEntity createJsonLogEntity( TAlterSentryRoleAddGroupsRequest request, TAlterSentryRoleAddGroupsResponse response, Configuration conf) { - AuditMetadataLogEntity amle = createCommonAMLE(conf, response.getStatus(), + DBAuditMetadataLogEntity hamle = createCommonHAMLE(conf, response.getStatus(), request.getRequestorUserName(), request.getClass().getName()); - amle.setOperationText(CommandUtil.createCmdForRoleAddGroup(request)); + String groups = getGroupsStr(request.getGroupsIterator()); + hamle.setOperationText(CommandUtil.createCmdForRoleAddGroup(request.getRoleName(), groups)); - return amle; + return hamle; } + // log entity for hive/impala delete role from group public JsonLogEntity createJsonLogEntity( TAlterSentryRoleDeleteGroupsRequest request, TAlterSentryRoleDeleteGroupsResponse response, Configuration conf) { - AuditMetadataLogEntity amle = createCommonAMLE(conf, response.getStatus(), + DBAuditMetadataLogEntity hamle = createCommonHAMLE(conf, response.getStatus(), request.getRequestorUserName(), request.getClass().getName()); - amle.setOperationText(CommandUtil.createCmdForRoleDeleteGroup(request)); + String groups = getGroupsStr(request.getGroupsIterator()); + hamle.setOperationText(CommandUtil.createCmdForRoleDeleteGroup(request.getRoleName(), groups)); - return amle; + return hamle; + } + + private String getGroupsStr(Iterator iter) { + StringBuilder groups = new StringBuilder(""); + if (iter != null) { + boolean commaFlg = false; + while (iter.hasNext()) { + if (commaFlg) { + groups.append(", "); + } else { + commaFlg = true; + } + groups.append(iter.next().getGroupName()); + } + } + return groups.toString(); } public String isAllowed(TSentryResponseStatus status) { @@ -151,20 +183,129 @@ public String isAllowed(TSentryResponseStatus status) { return Constants.FALSE; } - private AuditMetadataLogEntity createCommonAMLE(Configuration conf, - TSentryResponseStatus responseStatus, String userName, - String requestClassName) { - AuditMetadataLogEntity amle = new AuditMetadataLogEntity(); + // log entity for generic model create role + public JsonLogEntity createJsonLogEntity( + org.apache.sentry.provider.db.generic.service.thrift.TCreateSentryRoleRequest request, + org.apache.sentry.provider.db.generic.service.thrift.TCreateSentryRoleResponse response, + Configuration conf) { + GMAuditMetadataLogEntity gmamle = createCommonGMAMLE(conf, response.getStatus(), + request.getRequestorUserName(), request.getClass().getName(), request.getComponent()); + gmamle.setOperationText(CommandUtil.createCmdForCreateOrDropRole(request.getRoleName(), true)); + + return gmamle; + } + + // log entity for generic model drop role + public JsonLogEntity createJsonLogEntity( + org.apache.sentry.provider.db.generic.service.thrift.TDropSentryRoleRequest request, + org.apache.sentry.provider.db.generic.service.thrift.TDropSentryRoleResponse response, + Configuration conf) { + GMAuditMetadataLogEntity gmamle = createCommonGMAMLE(conf, response.getStatus(), + request.getRequestorUserName(), request.getClass().getName(), request.getComponent()); + gmamle.setOperationText(CommandUtil.createCmdForCreateOrDropRole(request.getRoleName(), false)); + + return gmamle; + } + + // log entity for generic model grant privilege + public JsonLogEntity createJsonLogEntity( + org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleGrantPrivilegeRequest request, + org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleGrantPrivilegeResponse response, + Configuration conf) { + GMAuditMetadataLogEntity gmamle = createCommonGMAMLE(conf, response.getStatus(), + request.getRequestorUserName(), request.getClass().getName(), request.getComponent()); + if (request.getPrivilege() != null) { + List authorizables = request.getPrivilege().getAuthorizables(); + Map privilegesMap = new LinkedHashMap(); + if (authorizables != null) { + for (TAuthorizable authorizable : authorizables) { + privilegesMap.put(authorizable.getType(), authorizable.getName()); + } + } + gmamle.setPrivilegesMap(privilegesMap); + } + gmamle.setOperationText(CommandUtil.createCmdForGrantGMPrivilege(request)); + + return gmamle; + } + + // log entity for generic model revoke privilege + public JsonLogEntity createJsonLogEntity( + org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleRevokePrivilegeRequest request, + org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleRevokePrivilegeResponse response, + Configuration conf) { + GMAuditMetadataLogEntity gmamle = createCommonGMAMLE(conf, response.getStatus(), + request.getRequestorUserName(), request.getClass().getName(), request.getComponent()); + if (request.getPrivilege() != null) { + List authorizables = request.getPrivilege().getAuthorizables(); + Map privilegesMap = new LinkedHashMap(); + if (authorizables != null) { + for (TAuthorizable authorizable : authorizables) { + privilegesMap.put(authorizable.getType(), authorizable.getName()); + } + } + gmamle.setPrivilegesMap(privilegesMap); + } + gmamle.setOperationText(CommandUtil.createCmdForRevokeGMPrivilege(request)); + + return gmamle; + } + + // log entity for generic model add role to group + public JsonLogEntity createJsonLogEntity( + org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleAddGroupsRequest request, + org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleAddGroupsResponse response, + Configuration conf) { + GMAuditMetadataLogEntity gmamle = createCommonGMAMLE(conf, response.getStatus(), + request.getRequestorUserName(), request.getClass().getName(), request.getComponent()); + Joiner joiner = Joiner.on(","); + String groups = joiner.join(request.getGroupsIterator()); + gmamle.setOperationText(CommandUtil.createCmdForRoleAddGroup(request.getRoleName(), groups)); + + return gmamle; + } + + // log entity for hive delete role from group + public JsonLogEntity createJsonLogEntity( + org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleDeleteGroupsRequest request, + org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleDeleteGroupsResponse response, + Configuration conf) { + GMAuditMetadataLogEntity gmamle = createCommonGMAMLE(conf, response.getStatus(), + request.getRequestorUserName(), request.getClass().getName(), request.getComponent()); + Joiner joiner = Joiner.on(","); + String groups = joiner.join(request.getGroupsIterator()); + gmamle.setOperationText(CommandUtil.createCmdForRoleDeleteGroup(request.getRoleName(), groups)); + + return gmamle; + } + + private DBAuditMetadataLogEntity createCommonHAMLE(Configuration conf, + TSentryResponseStatus responseStatus, String userName, String requestClassName) { + DBAuditMetadataLogEntity hamle = new DBAuditMetadataLogEntity(); + setCommAttrForAMLE(hamle, conf, responseStatus, userName, requestClassName); + return hamle; + } + + private GMAuditMetadataLogEntity createCommonGMAMLE(Configuration conf, + TSentryResponseStatus responseStatus, String userName, String requestClassName, + String component) { + GMAuditMetadataLogEntity gmamle = new GMAuditMetadataLogEntity(); + setCommAttrForAMLE(gmamle, conf, responseStatus, userName, requestClassName); + gmamle.setComponent(component); + return gmamle; + } + + private void setCommAttrForAMLE(AuditMetadataLogEntity amle, Configuration conf, + TSentryResponseStatus responseStatus, String userName, String requestClassName) { amle.setUserName(userName); amle.setServiceName(conf.get(ServerConfig.SENTRY_SERVICE_NAME, ServerConfig.SENTRY_SERVICE_NAME_DEFAULT).trim()); - amle.setImpersonator(CommandUtil.getImpersonator()); - amle.setIpAddress(CommandUtil.getIpAddress()); + amle.setImpersonator(ThriftUtil.getImpersonator()); + amle.setIpAddress(ThriftUtil.getIpAddress()); amle.setOperation(Constants.requestTypeToOperationMap.get(requestClassName)); amle.setEventTime(Long.toString(System.currentTimeMillis())); amle.setAllowed(isAllowed(responseStatus)); amle.setObjectType(Constants.requestTypeToObjectTypeMap .get(requestClassName)); - return amle; } } diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/util/CommandUtil.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/util/CommandUtil.java index 9beef837c..d6aecd1a5 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/util/CommandUtil.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/util/CommandUtil.java @@ -18,18 +18,22 @@ package org.apache.sentry.provider.db.log.util; -import java.util.Iterator; +import java.net.InetAddress; +import java.net.NetworkInterface; +import java.util.Enumeration; +import java.util.List; import java.util.Set; import org.apache.sentry.core.model.db.AccessConstants; -import org.apache.sentry.provider.db.service.thrift.TAlterSentryRoleAddGroupsRequest; -import org.apache.sentry.provider.db.service.thrift.TAlterSentryRoleDeleteGroupsRequest; +import org.apache.sentry.provider.db.generic.service.thrift.TAuthorizable; import org.apache.sentry.provider.db.service.thrift.TAlterSentryRoleGrantPrivilegeRequest; import org.apache.sentry.provider.db.service.thrift.TAlterSentryRoleRevokePrivilegeRequest; import org.apache.sentry.provider.db.service.thrift.TSentryGrantOption; -import org.apache.sentry.provider.db.service.thrift.TSentryGroup; import org.apache.sentry.provider.db.service.thrift.TSentryPrivilege; import org.apache.sentry.service.thrift.ServiceConstants.PrivilegeScope; +import org.datanucleus.util.StringUtils; + +import com.google.common.annotations.VisibleForTesting; public class CommandUtil { @@ -41,20 +45,17 @@ public static String createCmdForCreateOrDropRole(String roleName, return "DROP ROLE " + roleName; } - public static String createCmdForRoleAddGroup( - TAlterSentryRoleAddGroupsRequest request) { - return createCmdForRoleAddOrDeleteGroup(request.getRoleName(), - request.getGroupsIterator(), true); + public static String createCmdForRoleAddGroup(String roleName, String groups) { + return createCmdForRoleAddOrDeleteGroup(roleName, groups, true); } - public static String createCmdForRoleDeleteGroup( - TAlterSentryRoleDeleteGroupsRequest request) { - return createCmdForRoleAddOrDeleteGroup(request.getRoleName(), - request.getGroupsIterator(), false); + public static String createCmdForRoleDeleteGroup(String roleName, String groups) { + return createCmdForRoleAddOrDeleteGroup(roleName, groups, false); } private static String createCmdForRoleAddOrDeleteGroup(String roleName, - Iterator iter, boolean isAddGroup) { + String groups, + boolean isAddGroup) { StringBuilder sb = new StringBuilder(); if (isAddGroup) { sb.append("GRANT ROLE "); @@ -68,17 +69,8 @@ private static String createCmdForRoleAddOrDeleteGroup(String roleName, sb.append(" FROM "); } - if (iter != null) { - sb.append("GROUP "); - boolean commaFlg = false; - while (iter.hasNext()) { - if (commaFlg) { - sb.append(", "); - } else { - commaFlg = true; - } - sb.append(iter.next().getGroupName()); - } + if (!StringUtils.isEmpty(groups)) { + sb.append("GROUP ").append(groups); } else { sb = new StringBuilder("Missing group information."); } @@ -154,33 +146,76 @@ private static String createCmdForGrantOrRevokePrivilege(String roleName, return sb.toString(); } - private static ThreadLocal threadLocalIpAddress = new ThreadLocal() { - @Override - protected synchronized String initialValue() { - return ""; - } - }; - - public static void setIpAddress(String ipAddress) { - threadLocalIpAddress.set(ipAddress); + public static String createCmdForGrantGMPrivilege( + org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleGrantPrivilegeRequest request) { + return createCmdForGrantOrRevokeGMPrivilege(request.getRoleName(), request.getPrivilege(), true); } - public static String getIpAddress() { - return threadLocalIpAddress.get(); + public static String createCmdForRevokeGMPrivilege( + org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleRevokePrivilegeRequest request) { + return createCmdForGrantOrRevokeGMPrivilege(request.getRoleName(), request.getPrivilege(), + false); } - private static ThreadLocal threadLocalImpersonator = new ThreadLocal() { - @Override - protected synchronized String initialValue() { - return ""; + private static String createCmdForGrantOrRevokeGMPrivilege(String roleName, + org.apache.sentry.provider.db.generic.service.thrift.TSentryPrivilege privilege, + boolean isGrant) { + StringBuilder sb = new StringBuilder(); + if (isGrant) { + sb.append("GRANT "); + } else { + sb.append("REVOKE "); + } + + String action = privilege.getAction(); + if (AccessConstants.ALL.equalsIgnoreCase(action)) { + sb.append("ALL"); + } else { + if (action != null) { + action = action.toUpperCase(); + } + sb.append(action); } - }; - public static void setImpersonator(String impersonator) { - threadLocalImpersonator.set(impersonator); + sb.append(" ON"); + + List authorizables = privilege.getAuthorizables(); + if (authorizables != null) { + for (TAuthorizable authorizable : authorizables) { + sb.append(" ").append(authorizable.getType()).append(" ").append(authorizable.getName()); + } + } + + if (isGrant) { + sb.append(" TO ROLE "); + } else { + sb.append(" FROM ROLE "); + } + sb.append(roleName); + + if (privilege.getGrantOption() == org.apache.sentry.provider.db.generic.service.thrift.TSentryGrantOption.TRUE) { + sb.append(" WITH GRANT OPTION"); + } + + return sb.toString(); } - public static String getImpersonator() { - return threadLocalImpersonator.get(); + // Check if the given IP is one of the local IP. + @VisibleForTesting + public static boolean assertIPInAuditLog(String ipInAuditLog) throws Exception { + if (ipInAuditLog == null) { + return false; + } + Enumeration netInterfaces = NetworkInterface.getNetworkInterfaces(); + while (netInterfaces.hasMoreElements()) { + NetworkInterface ni = netInterfaces.nextElement(); + Enumeration ips = ni.getInetAddresses(); + while (ips.hasMoreElements()) { + if (ipInAuditLog.indexOf(ips.nextElement().getHostAddress()) != -1) { + return true; + } + } + } + return false; } } diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/util/Constants.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/util/Constants.java index 072a0e8d3..b0a87aec5 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/util/Constants.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/log/util/Constants.java @@ -30,6 +30,7 @@ public class Constants { public final static String AUDIT_LOGGER_NAME = "sentry.hive.authorization.ddl.logger"; + public final static String AUDIT_LOGGER_NAME_GENERIC = "sentry.generic.authorization.ddl.logger"; public final static String LOG_FIELD_SERVICE_NAME = "serviceName"; public final static String LOG_FIELD_USER_NAME = "userName"; @@ -44,6 +45,7 @@ public class Constants { public final static String LOG_FIELD_COLUMN_NAME = "column"; public final static String LOG_FIELD_RESOURCE_PATH = "resourcePath"; public final static String LOG_FIELD_OBJECT_TYPE = "objectType"; + public final static String LOG_FIELD_COMPONENT = "component"; public final static String OPERATION_CREATE_ROLE = "CREATE_ROLE"; public final static String OPERATION_DROP_ROLE = "DROP_ROLE"; @@ -62,6 +64,7 @@ public class Constants { public static final Map requestTypeToObjectTypeMap = new HashMap(); static { + // for hive audit log requestTypeToOperationMap.put(TCreateSentryRoleRequest.class.getName(), Constants.OPERATION_CREATE_ROLE); requestTypeToOperationMap.put( @@ -78,7 +81,30 @@ public class Constants { requestTypeToOperationMap.put( TAlterSentryRoleDeleteGroupsRequest.class.getName(), Constants.OPERATION_DELETE_ROLE); + // for generic model audit log + requestTypeToOperationMap.put( + org.apache.sentry.provider.db.generic.service.thrift.TCreateSentryRoleRequest.class + .getName(), Constants.OPERATION_CREATE_ROLE); + requestTypeToOperationMap + .put(org.apache.sentry.provider.db.generic.service.thrift.TDropSentryRoleRequest.class + .getName(), Constants.OPERATION_DROP_ROLE); + requestTypeToOperationMap + .put( + org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleGrantPrivilegeRequest.class + .getName(), Constants.OPERATION_GRANT_PRIVILEGE); + requestTypeToOperationMap + .put( + org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleRevokePrivilegeRequest.class + .getName(), Constants.OPERATION_REVOKE_PRIVILEGE); + requestTypeToOperationMap.put( + org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleAddGroupsRequest.class + .getName(), Constants.OPERATION_ADD_ROLE); + requestTypeToOperationMap + .put( + org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleDeleteGroupsRequest.class + .getName(), Constants.OPERATION_DELETE_ROLE); + // for hive audit log requestTypeToObjectTypeMap.put(TCreateSentryRoleRequest.class.getName(), Constants.OBJECT_TYPE_ROLE); requestTypeToObjectTypeMap.put(TDropSentryRoleRequest.class.getName(), @@ -95,5 +121,27 @@ public class Constants { requestTypeToObjectTypeMap.put( TAlterSentryRoleRevokePrivilegeRequest.class.getName(), Constants.OBJECT_TYPE_PRINCIPAL); + // for generic model audit log + requestTypeToObjectTypeMap.put( + org.apache.sentry.provider.db.generic.service.thrift.TCreateSentryRoleRequest.class + .getName(), Constants.OBJECT_TYPE_ROLE); + requestTypeToObjectTypeMap + .put(org.apache.sentry.provider.db.generic.service.thrift.TDropSentryRoleRequest.class + .getName(), Constants.OBJECT_TYPE_ROLE); + requestTypeToObjectTypeMap.put( + org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleAddGroupsRequest.class + .getName(), Constants.OBJECT_TYPE_ROLE); + requestTypeToObjectTypeMap + .put( + org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleDeleteGroupsRequest.class + .getName(), Constants.OBJECT_TYPE_ROLE); + requestTypeToObjectTypeMap + .put( + org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleGrantPrivilegeRequest.class + .getName(), Constants.OBJECT_TYPE_PRINCIPAL); + requestTypeToObjectTypeMap + .put( + org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleRevokePrivilegeRequest.class + .getName(), Constants.OBJECT_TYPE_PRINCIPAL); } } diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/model/MSentryGMPrivilege.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/model/MSentryGMPrivilege.java index 266f34943..13b48eaac 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/model/MSentryGMPrivilege.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/model/MSentryGMPrivilege.java @@ -17,11 +17,10 @@ */ package org.apache.sentry.provider.db.service.model; -import static org.apache.sentry.provider.common.ProviderConstants.AUTHORIZABLE_JOINER; -import static org.apache.sentry.provider.common.ProviderConstants.KV_JOINER; +import static org.apache.sentry.policy.common.PolicyConstants.AUTHORIZABLE_JOINER; +import static org.apache.sentry.policy.common.PolicyConstants.KV_JOINER; import java.lang.reflect.Field; -import java.util.Arrays; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -51,14 +50,15 @@ public class MSentryGMPrivilege { * We assume that the generic model privilege for any component(hive/impala or solr) doesn't exceed four level. * This generic model privilege currently can support maximum 4 level. **/ - private String resourceName0 = NULL_COL; - private String resourceType0 = NULL_COL; - private String resourceName1 = NULL_COL; - private String resourceType1 = NULL_COL; - private String resourceName2 = NULL_COL; - private String resourceType2 = NULL_COL; - private String resourceName3 = NULL_COL; - private String resourceType3 = NULL_COL; + private String resourceName0 = NULL_COL; //NOPMD + private String resourceType0 = NULL_COL; //NOPMD + private String resourceName1 = NULL_COL; //NOPMD + private String resourceType1 = NULL_COL; //NOPMD + private String resourceName2 = NULL_COL; //NOPMD + private String resourceType2 = NULL_COL; //NOPMD + private String resourceName3 = NULL_COL; //NOPMD + private String resourceType3 = NULL_COL; //NOPMD + private String serviceName; private String componentName; @@ -180,7 +180,7 @@ public String getName() { * @param authorizables */ public void setAuthorizables(List authorizables) { - if ((authorizables == null) || (authorizables.isEmpty())) { + if (authorizables == null || authorizables.isEmpty()) { //service scope scope = SERVICE_SCOPE; return; @@ -253,38 +253,51 @@ public String toString() { @Override public boolean equals(Object obj) { - if (this == obj) + if (this == obj) { return true; - if (obj == null) + } + if (obj == null) { return false; - if (getClass() != obj.getClass()) + } + if (getClass() != obj.getClass()) { return false; + } MSentryGMPrivilege other = (MSentryGMPrivilege) obj; if (action == null) { - if (other.action != null) + if (other.action != null) { return false; - } else if (!action.equalsIgnoreCase(other.action)) + } + } else if (!action.equalsIgnoreCase(other.action)) { return false; + } if (scope == null) { - if (other.scope != null) + if (other.scope != null) { return false; - } else if (!scope.equals(other.scope)) + } + } else if (!scope.equals(other.scope)) { return false; + } if (serviceName == null) { - if (other.serviceName != null) + if (other.serviceName != null) { return false; - } else if (!serviceName.equals(other.serviceName)) + } + } else if (!serviceName.equals(other.serviceName)) { return false; + } if (componentName == null) { - if (other.componentName != null) + if (other.componentName != null) { return false; - } else if (!componentName.equals(other.componentName)) + } + } else if (!componentName.equals(other.componentName)) { return false; + } if (grantOption == null) { - if (other.grantOption != null) + if (other.grantOption != null) { return false; - } else if (!grantOption.equals(other.grantOption)) + } + } else if (!grantOption.equals(other.grantOption)) { return false; + } List authorizables = getAuthorizables(); List other_authorizables = other.getAuthorizables(); @@ -349,7 +362,7 @@ public boolean implies(MSentryGMPrivilege request) { } } - if ( (!existIterator.hasNext()) && (!requestIterator.hasNext()) ){ + if ( !existIterator.hasNext() && !requestIterator.hasNext() ){ /** * The persistent privilege has the same authorizables size as the requested privilege * The check is pass diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/model/MSentryGroup.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/model/MSentryGroup.java index 32dbafc47..7e41c9329 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/model/MSentryGroup.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/model/MSentryGroup.java @@ -91,20 +91,26 @@ public int hashCode() { @Override public boolean equals(Object obj) { - if (this == obj) + if (this == obj) { return true; - if (obj == null) + } + if (obj == null) { return false; - if (getClass() != obj.getClass()) + } + if (getClass() != obj.getClass()) { return false; + } MSentryGroup other = (MSentryGroup) obj; - if (createTime != other.createTime) + if (createTime != other.createTime) { return false; + } if (groupName == null) { - if (other.groupName != null) + if (other.groupName != null) { return false; - } else if (!groupName.equals(other.groupName)) + } + } else if (!groupName.equals(other.groupName)) { return false; + } return true; } -} \ No newline at end of file +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/model/MSentryPrivilege.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/model/MSentryPrivilege.java index 1c68a0f4a..4c3af7992 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/model/MSentryPrivilege.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/model/MSentryPrivilege.java @@ -53,7 +53,7 @@ public MSentryPrivilege() { this.roles = new HashSet(); } - public MSentryPrivilege(String privilegeName, String privilegeScope, + public MSentryPrivilege(String privilegeScope, String serverName, String dbName, String tableName, String columnName, String URI, String action, Boolean grantOption) { this.privilegeScope = privilegeScope; @@ -67,10 +67,10 @@ public MSentryPrivilege(String privilegeName, String privilegeScope, this.roles = new HashSet(); } - public MSentryPrivilege(String privilegeName, String privilegeScope, + public MSentryPrivilege(String privilegeScope, String serverName, String dbName, String tableName, String columnName, String URI, String action) { - this(privilegeName, privilegeScope, serverName, dbName, tableName, + this(privilegeScope, serverName, dbName, tableName, columnName, URI, action, false); } @@ -202,48 +202,65 @@ public int hashCode() { @Override public boolean equals(Object obj) { - if (this == obj) + if (this == obj) { return true; - if (obj == null) + } + if (obj == null) { return false; - if (getClass() != obj.getClass()) + } + if (getClass() != obj.getClass()) { return false; + } MSentryPrivilege other = (MSentryPrivilege) obj; if (URI == null) { - if (other.URI != null) + if (other.URI != null) { return false; - } else if (!URI.equals(other.URI)) + } + } else if (!URI.equals(other.URI)) { return false; + } if (action == null) { - if (other.action != null) + if (other.action != null) { return false; - } else if (!action.equals(other.action)) + } + } else if (!action.equals(other.action)) { return false; + } if (dbName == null) { - if (other.dbName != null) + if (other.dbName != null) { return false; - } else if (!dbName.equals(other.dbName)) + } + } else if (!dbName.equals(other.dbName)) { return false; + } if (serverName == null) { - if (other.serverName != null) + if (other.serverName != null) { return false; - } else if (!serverName.equals(other.serverName)) + } + } else if (!serverName.equals(other.serverName)) { return false; + } if (tableName == null) { - if (other.tableName != null) + if (other.tableName != null) { return false; - } else if (!tableName.equals(other.tableName)) + } + } else if (!tableName.equals(other.tableName)) { return false; + } if (columnName == null) { - if (other.columnName != null) + if (other.columnName != null) { return false; - } else if (!columnName.equals(other.columnName)) + } + } else if (!columnName.equals(other.columnName)) { return false; + } if (grantOption == null) { - if (other.grantOption != null) + if (other.grantOption != null) { return false; - } else if (!grantOption.equals(other.grantOption)) + } + } else if (!grantOption.equals(other.grantOption)) { return false; + } return true; } diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/model/MSentryRole.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/model/MSentryRole.java index 007675313..24514eafa 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/model/MSentryRole.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/model/MSentryRole.java @@ -166,18 +166,23 @@ public int hashCode() { @Override public boolean equals(Object obj) { - if (this == obj) + if (this == obj) { return true; - if (obj == null) + } + if (obj == null) { return false; - if (getClass() != obj.getClass()) + } + if (getClass() != obj.getClass()) { return false; + } MSentryRole other = (MSentryRole) obj; if (roleName == null) { - if (other.roleName != null) + if (other.roleName != null) { return false; - } else if (!roleName.equals(other.roleName)) + } + } else if (!roleName.equals(other.roleName)) { return false; + } return true; } diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/FixedJsonInstanceSerializer.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/FixedJsonInstanceSerializer.java index 6eb36a1cc..476bf6a5d 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/FixedJsonInstanceSerializer.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/FixedJsonInstanceSerializer.java @@ -75,14 +75,14 @@ private String getTextField(final JsonNode pNode, final String pFieldName) { private Integer getIntegerField(final JsonNode pNode, final String pFieldName) { Preconditions.checkNotNull(pNode); Preconditions.checkNotNull(pFieldName); - return (pNode.get(pFieldName) != null && pNode.get(pFieldName).isNumber()) ? pNode.get(pFieldName) + return pNode.get(pFieldName) != null && pNode.get(pFieldName).isNumber() ? pNode.get(pFieldName) .getIntValue() : null; } private Long getLongField(final JsonNode pNode, final String pFieldName) { Preconditions.checkNotNull(pNode); Preconditions.checkNotNull(pFieldName); - return (pNode.get(pFieldName) != null && pNode.get(pFieldName).isLong()) ? pNode.get(pFieldName).getLongValue() + return pNode.get(pFieldName) != null && pNode.get(pFieldName).isLong() ? pNode.get(pFieldName).getLongValue() : null; } diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/HAContext.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/HAContext.java index c3aa23c88..7bce7418e 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/HAContext.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/HAContext.java @@ -19,7 +19,7 @@ package org.apache.sentry.provider.db.service.persistent; import java.io.IOException; -import java.util.Collections; +import java.util.Arrays; import java.util.List; import org.apache.curator.RetryPolicy; @@ -43,6 +43,8 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import com.google.common.base.Strings; +import com.google.common.collect.Lists; /** * Stores the HA related context @@ -50,9 +52,11 @@ public class HAContext { private static final Logger LOGGER = LoggerFactory.getLogger(HAContext.class); - private static HAContext serverHAContext = null; + private static volatile HAContext serverHAContext = null; + private static boolean aclChecked = false; public final static String SENTRY_SERVICE_REGISTER_NAMESPACE = "sentry-service"; + public static final String SENTRY_ZK_JAAS_NAME = "SentryClient"; private final String zookeeperQuorum; private final int retriesMaxCount; private final int sleepMsBetweenRetries; @@ -64,7 +68,7 @@ public class HAContext { private final CuratorFramework curatorFramework; private final RetryPolicy retryPolicy; - private HAContext(Configuration conf) throws Exception { + protected HAContext(Configuration conf) throws Exception { this.zookeeperQuorum = conf.get(ServerConfig.SENTRY_HA_ZOOKEEPER_QUORUM, ServerConfig.SENTRY_HA_ZOOKEEPER_QUORUM_DEFAULT); this.retriesMaxCount = conf.getInt(ServerConfig.SENTRY_HA_ZOOKEEPER_RETRIES_MAX_COUNT, @@ -80,9 +84,22 @@ private HAContext(Configuration conf) throws Exception { if (zkSecure) { LOGGER.info("Connecting to ZooKeeper with SASL/Kerberos and using 'sasl' ACLs"); setJaasConfiguration(conf); - System.setProperty(ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY, "Client"); - saslACL = Collections.singletonList(new ACL(Perms.ALL, new Id("sasl", getServicePrincipal(conf)))); + System.setProperty(ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY, + SENTRY_ZK_JAAS_NAME); + saslACL = Lists.newArrayList(); + saslACL.add(new ACL(Perms.ALL, new Id("sasl", getServicePrincipal(conf, + ServerConfig.PRINCIPAL)))); + saslACL.add(new ACL(Perms.ALL, new Id("sasl", getServicePrincipal(conf, + ServerConfig.SERVER_HA_ZOOKEEPER_CLIENT_PRINCIPAL)))); aclProvider = new SASLOwnerACLProvider(); + String allowConnect = conf.get(ServerConfig.ALLOW_CONNECT); + + if (!Strings.isNullOrEmpty(allowConnect)) { + for (String principal : Arrays.asList(allowConnect.split("\\s*,\\s*"))) { + LOGGER.info("Adding acls for " + principal); + saslACL.add(new ACL(Perms.ALL, new Id("sasl", principal))); + } + } } else { LOGGER.info("Connecting to ZooKeeper without authentication"); aclProvider = new DefaultACLProvider(); @@ -95,7 +112,7 @@ private HAContext(Configuration conf) throws Exception { .retryPolicy(retryPolicy) .aclProvider(aclProvider) .build(); - checkAndSetACLs(); + startCuratorFramework(); } /** @@ -123,6 +140,13 @@ public void run() { return serverHAContext; } + // HA context for server which verifies the ZK ACLs on namespace + public static HAContext getHAServerContext(Configuration conf) throws Exception { + HAContext serverContext = getHAContext(conf); + serverContext.checkAndSetACLs(); + return serverContext; + } + @VisibleForTesting public static synchronized void clearServerContext() { if (serverHAContext != null) { @@ -162,56 +186,66 @@ private void validateConf() { Preconditions.checkNotNull(namespace, "Zookeeper namespace should not be null."); } - private String getServicePrincipal(Configuration conf) throws IOException { - String principal = conf.get(ServerConfig.PRINCIPAL); + protected String getServicePrincipal(Configuration conf, String confProperty) + throws IOException { + String principal = conf.get(confProperty); Preconditions.checkNotNull(principal); Preconditions.checkArgument(principal.length() != 0, "Server principal is not right."); return principal.split("[/@]")[0]; } private void checkAndSetACLs() throws Exception { - if (zkSecure) { + if (zkSecure && !aclChecked) { // If znodes were previously created without security enabled, and now it is, we need to go through all existing znodes - // and set the ACLs for them + // and set the ACLs for them. This is done just once at the startup // We can't get the namespace znode through curator; have to go through zk client - if (curatorFramework.getState() != CuratorFrameworkState.STARTED) { - curatorFramework.start(); - } + startCuratorFramework(); String namespace = "/" + curatorFramework.getNamespace(); if (curatorFramework.getZookeeperClient().getZooKeeper().exists(namespace, null) != null) { List acls = curatorFramework.getZookeeperClient().getZooKeeper().getACL(namespace, new Stat()); - if (!acls.get(0).getId().getScheme().equals("sasl")) { + if (acls.isEmpty() || !acls.get(0).getId().getScheme().equals("sasl")) { LOGGER.info("'sasl' ACLs not set; setting..."); List children = curatorFramework.getZookeeperClient().getZooKeeper().getChildren(namespace, null); for (String child : children) { - checkAndSetACLs(namespace + "/" + child); + checkAndSetACLs("/" + child); } curatorFramework.getZookeeperClient().getZooKeeper().setACL(namespace, saslACL, -1); } } + aclChecked = true; + } } private void checkAndSetACLs(String path) throws Exception { + LOGGER.info("Setting acls on " + path); List children = curatorFramework.getChildren().forPath(path); for (String child : children) { - checkAndSetACLs(path + "/" + child); + checkAndSetACLs(path + "/" + child); } curatorFramework.setACL().withACL(saslACL).forPath(path); } // This gets ignored during most tests, see ZKXTestCaseWithSecurity#setupZKServer() private void setJaasConfiguration(Configuration conf) throws IOException { + if ("false".equalsIgnoreCase(conf.get( + ServerConfig.SERVER_HA_ZOOKEEPER_CLIENT_TICKET_CACHE, + ServerConfig.SERVER_HA_ZOOKEEPER_CLIENT_TICKET_CACHE_DEFAULT))) { String keytabFile = conf.get(ServerConfig.SERVER_HA_ZOOKEEPER_CLIENT_KEYTAB); Preconditions.checkArgument(keytabFile.length() != 0, "Keytab File is not right."); String principal = conf.get(ServerConfig.SERVER_HA_ZOOKEEPER_CLIENT_PRINCIPAL); - principal = SecurityUtil.getServerPrincipal(principal, conf.get(ServerConfig.RPC_ADDRESS)); + principal = SecurityUtil.getServerPrincipal(principal, + conf.get(ServerConfig.RPC_ADDRESS, ServerConfig.RPC_ADDRESS_DEFAULT)); Preconditions.checkArgument(principal.length() != 0, "Kerberos principal is not right."); // This is equivalent to writing a jaas.conf file and setting the system property, "java.security.auth.login.config", to // point to it (but this way we don't have to write a file, and it works better for the tests) - JaasConfiguration.addEntry("Client", principal, keytabFile); - javax.security.auth.login.Configuration.setConfiguration(JaasConfiguration.getInstance()); + JaasConfiguration.addEntryForKeytab(SENTRY_ZK_JAAS_NAME, principal, keytabFile); + } else { + // Create jaas conf for ticket cache + JaasConfiguration.addEntryForTicketCache(SENTRY_ZK_JAAS_NAME); + } + javax.security.auth.login.Configuration.setConfiguration(JaasConfiguration.getInstance()); } public class SASLOwnerACLProvider implements ACLProvider { diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/SentryStore.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/SentryStore.java index d7937d097..c5c5ffbe2 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/SentryStore.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/SentryStore.java @@ -18,9 +18,10 @@ package org.apache.sentry.provider.db.service.persistent; -import static org.apache.sentry.provider.common.ProviderConstants.AUTHORIZABLE_JOINER; -import static org.apache.sentry.provider.common.ProviderConstants.KV_JOINER; +import static org.apache.sentry.policy.common.PolicyConstants.AUTHORIZABLE_JOINER; +import static org.apache.sentry.policy.common.PolicyConstants.KV_JOINER; +import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; @@ -48,7 +49,7 @@ import org.apache.sentry.SentryUserException; import org.apache.sentry.core.model.db.AccessConstants; import org.apache.sentry.core.model.db.DBModelAuthorizable.AuthorizableType; -import org.apache.sentry.provider.common.ProviderConstants; +import org.apache.sentry.policy.common.PolicyConstants; import org.apache.sentry.provider.db.SentryAccessDeniedException; import org.apache.sentry.provider.db.SentryAlreadyExistsException; import org.apache.sentry.provider.db.SentryGrantDeniedException; @@ -58,11 +59,13 @@ import org.apache.sentry.provider.db.service.model.MSentryPrivilege; import org.apache.sentry.provider.db.service.model.MSentryRole; import org.apache.sentry.provider.db.service.model.MSentryVersion; +import org.apache.sentry.provider.db.service.thrift.SentryConfigurationException; import org.apache.sentry.provider.db.service.thrift.SentryPolicyStoreProcessor; import org.apache.sentry.provider.db.service.thrift.TSentryActiveRoleSet; import org.apache.sentry.provider.db.service.thrift.TSentryAuthorizable; import org.apache.sentry.provider.db.service.thrift.TSentryGrantOption; import org.apache.sentry.provider.db.service.thrift.TSentryGroup; +import org.apache.sentry.provider.db.service.thrift.TSentryMappingData; import org.apache.sentry.provider.db.service.thrift.TSentryPrivilege; import org.apache.sentry.provider.db.service.thrift.TSentryPrivilegeMap; import org.apache.sentry.provider.db.service.thrift.TSentryRole; @@ -74,9 +77,11 @@ import com.codahale.metrics.Gauge; import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Function; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.base.Strings; +import com.google.common.collect.Collections2; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; import com.google.common.collect.Maps; @@ -120,18 +125,27 @@ public class SentryStore { private Thread privCleanerThread = null; public SentryStore(Configuration conf) throws SentryNoSuchObjectException, - SentryAccessDeniedException { + SentryAccessDeniedException, SentryConfigurationException, IOException { commitSequenceId = 0; this.conf = conf; Properties prop = new Properties(); prop.putAll(ServerConfig.SENTRY_STORE_DEFAULTS); String jdbcUrl = conf.get(ServerConfig.SENTRY_STORE_JDBC_URL, "").trim(); Preconditions.checkArgument(!jdbcUrl.isEmpty(), "Required parameter " + - ServerConfig.SENTRY_STORE_JDBC_URL + " missing"); + ServerConfig.SENTRY_STORE_JDBC_URL + " is missed"); String user = conf.get(ServerConfig.SENTRY_STORE_JDBC_USER, ServerConfig. SENTRY_STORE_JDBC_USER_DEFAULT).trim(); - String pass = conf.get(ServerConfig.SENTRY_STORE_JDBC_PASS, ServerConfig. - SENTRY_STORE_JDBC_PASS_DEFAULT).trim(); + //Password will be read from Credential provider specified using property + // CREDENTIAL_PROVIDER_PATH("hadoop.security.credential.provider.path" in sentry-site.xml + // it falls back to reading directly from sentry-site.xml + char[] passTmp = conf.getPassword(ServerConfig.SENTRY_STORE_JDBC_PASS); + String pass = null; + if(passTmp != null) { + pass = new String(passTmp); + } else { + throw new SentryConfigurationException("Error reading " + ServerConfig.SENTRY_STORE_JDBC_PASS); + } + String driverName = conf.get(ServerConfig.SENTRY_STORE_JDBC_DRIVER, ServerConfig.SENTRY_STORE_JDBC_DRIVER_DEFAULT); prop.setProperty(ServerConfig.JAVAX_JDO_URL, jdbcUrl); @@ -162,7 +176,7 @@ public SentryStore(Configuration conf) throws SentryNoSuchObjectException, prop.setProperty("datanucleus.NontransactionalWrite", "false"); pmf = JDOHelper.getPersistenceManagerFactory(prop); - verifySentryStoreSchema(conf, checkSchemaVersion); + verifySentryStoreSchema(checkSchemaVersion); // Kick off the thread that cleans orphaned privileges (unless told not to) privCleaner = this.new PrivCleaner(); @@ -175,8 +189,7 @@ public SentryStore(Configuration conf) throws SentryNoSuchObjectException, } // ensure that the backend DB schema is set - private void verifySentryStoreSchema(Configuration serverConf, - boolean checkVersion) + private void verifySentryStoreSchema(boolean checkVersion) throws SentryNoSuchObjectException, SentryAccessDeniedException { if (!checkVersion) { setSentryVersion(SentryStoreSchemaInfo.getSentryVersion(), @@ -299,16 +312,10 @@ public CommitContext createSentryRole(String roleName) PersistenceManager pm = null; try { pm = openTransaction(); - MSentryRole mSentryRole = getMSentryRole(pm, roleName); - if (mSentryRole == null) { - MSentryRole mRole = new MSentryRole(roleName, System.currentTimeMillis()); - pm.makePersistent(mRole); - CommitContext commit = commitUpdateTransaction(pm); - rollbackTransaction = false; - return commit; - } else { - throw new SentryAlreadyExistsException("Role: " + roleName); - } + createSentryRoleCore(pm, roleName); + CommitContext commit = commitUpdateTransaction(pm); + rollbackTransaction = false; + return commit; } finally { if (rollbackTransaction) { rollbackTransaction(pm); @@ -316,9 +323,20 @@ public CommitContext createSentryRole(String roleName) } } + private void createSentryRoleCore(PersistenceManager pm, String roleName) + throws SentryAlreadyExistsException { + MSentryRole mSentryRole = getMSentryRole(pm, roleName); + if (mSentryRole == null) { + MSentryRole mRole = new MSentryRole(roleName, System.currentTimeMillis()); + pm.makePersistent(mRole); + } else { + throw new SentryAlreadyExistsException("Role: " + roleName); + } + } + private Long getCount(Class tClass) { PersistenceManager pm = null; - Long size = new Long(-1); + Long size = Long.valueOf(-1); try { pm = openTransaction(); Query query = pm.newQuery(); @@ -327,7 +345,9 @@ private Long getCount(Class tClass) { size = (Long)query.execute(); } finally { - commitTransaction(pm); + if (pm != null) { + commitTransaction(pm); + } } return size; } @@ -426,25 +446,26 @@ private MSentryPrivilege alterSentryRoleGrantPrivilegeCore(PersistenceManager pm MSentryPrivilege mPrivilege = null; MSentryRole mRole = getMSentryRole(pm, roleName); if (mRole == null) { - throw new SentryNoSuchObjectException("Role: " + roleName); + throw new SentryNoSuchObjectException("Role: " + roleName + " doesn't exist"); } else { - if ((!isNULL(privilege.getColumnName())) || (!isNULL(privilege.getTableName())) - || (!isNULL(privilege.getDbName()))) { + if (!isNULL(privilege.getColumnName()) || !isNULL(privilege.getTableName()) + || !isNULL(privilege.getDbName())) { // If Grant is for ALL and Either INSERT/SELECT already exists.. // need to remove it and GRANT ALL.. - if (privilege.getAction().equalsIgnoreCase("*")) { + if (AccessConstants.ALL.equalsIgnoreCase(privilege.getAction()) + || AccessConstants.ACTION_ALL.equalsIgnoreCase(privilege.getAction())) { TSentryPrivilege tNotAll = new TSentryPrivilege(privilege); tNotAll.setAction(AccessConstants.SELECT); MSentryPrivilege mSelect = getMSentryPrivilege(tNotAll, pm); tNotAll.setAction(AccessConstants.INSERT); MSentryPrivilege mInsert = getMSentryPrivilege(tNotAll, pm); - if ((mSelect != null) && (mRole.getPrivileges().contains(mSelect))) { + if (mSelect != null && mRole.getPrivileges().contains(mSelect)) { mSelect.removeRole(mRole); privCleaner.incPrivRemoval(); pm.makePersistent(mSelect); } - if ((mInsert != null) && (mRole.getPrivileges().contains(mInsert))) { + if (mInsert != null && mRole.getPrivileges().contains(mInsert)) { mInsert.removeRole(mRole); privCleaner.incPrivRemoval(); pm.makePersistent(mInsert); @@ -454,8 +475,13 @@ private MSentryPrivilege alterSentryRoleGrantPrivilegeCore(PersistenceManager pm // do nothing.. TSentryPrivilege tAll = new TSentryPrivilege(privilege); tAll.setAction(AccessConstants.ALL); - MSentryPrivilege mAll = getMSentryPrivilege(tAll, pm); - if ((mAll != null) && (mRole.getPrivileges().contains(mAll))) { + MSentryPrivilege mAll1 = getMSentryPrivilege(tAll, pm); + tAll.setAction(AccessConstants.ACTION_ALL); + MSentryPrivilege mAll2 = getMSentryPrivilege(tAll, pm); + if (mAll1 != null && mRole.getPrivileges().contains(mAll1)) { + return null; + } + if (mAll2 != null && mRole.getPrivileges().contains(mAll2)) { return null; } } @@ -511,7 +537,7 @@ private void alterSentryRoleRevokePrivilegeCore(PersistenceManager pm, query.setUnique(true); MSentryRole mRole = (MSentryRole) query.execute(roleName); if (mRole == null) { - throw new SentryNoSuchObjectException("Role: " + roleName); + throw new SentryNoSuchObjectException("Role: " + roleName + " doesn't exist"); } else { query = pm.newQuery(MSentryPrivilege.class); MSentryPrivilege mPrivilege = getMSentryPrivilege(tPrivilege, pm); @@ -559,10 +585,10 @@ private void revokePartial(PersistenceManager pm, privCleaner.incPrivRemoval(); pm.makePersistent(persistedPriv); } else if (requestedPrivToRevoke.getAction().equalsIgnoreCase(AccessConstants.SELECT) - && (!currentPrivilege.getAction().equalsIgnoreCase(AccessConstants.INSERT))) { + && !currentPrivilege.getAction().equalsIgnoreCase(AccessConstants.INSERT)) { revokeRolePartial(pm, mRole, currentPrivilege, persistedPriv, AccessConstants.INSERT); } else if (requestedPrivToRevoke.getAction().equalsIgnoreCase(AccessConstants.INSERT) - && (!currentPrivilege.getAction().equalsIgnoreCase(AccessConstants.SELECT))) { + && !currentPrivilege.getAction().equalsIgnoreCase(AccessConstants.SELECT)) { revokeRolePartial(pm, mRole, currentPrivilege, persistedPriv, AccessConstants.SELECT); } } @@ -577,7 +603,7 @@ private void revokeRolePartial(PersistenceManager pm, MSentryRole mRole, currentPrivilege.setAction(AccessConstants.ALL); persistedPriv = getMSentryPrivilege(convertToTSentryPrivilege(currentPrivilege), pm); - if ((persistedPriv != null)&&(mRole.getPrivileges().contains(persistedPriv))) { + if (persistedPriv != null && mRole.getPrivileges().contains(persistedPriv)) { persistedPriv.removeRole(mRole); privCleaner.incPrivRemoval(); pm.makePersistent(persistedPriv); @@ -621,14 +647,14 @@ private void revokePrivilegeFromRole(PersistenceManager pm, TSentryPrivilege tPr private void populateChildren(PersistenceManager pm, Set roleNames, MSentryPrivilege priv, Set children) throws SentryInvalidInputException { Preconditions.checkNotNull(pm); - if ((!isNULL(priv.getServerName())) || (!isNULL(priv.getDbName())) - || (!isNULL(priv.getTableName()))) { + if (!isNULL(priv.getServerName()) || !isNULL(priv.getDbName()) + || !isNULL(priv.getTableName())) { // Get all TableLevel Privs Set childPrivs = getChildPrivileges(pm, roleNames, priv); for (MSentryPrivilege childPriv : childPrivs) { // Only recurse for table level privs.. - if ((!isNULL(childPriv.getDbName())) && (!isNULL(childPriv.getTableName())) - && (!isNULL(childPriv.getColumnName()))) { + if (!isNULL(childPriv.getDbName()) && !isNULL(childPriv.getTableName()) + && !isNULL(childPriv.getColumnName())) { populateChildren(pm, roleNames, childPriv, children); } // The method getChildPrivileges() didn't do filter on "action", @@ -657,7 +683,7 @@ private void populateChildren(PersistenceManager pm, Set roleNames, MSen private Set getChildPrivileges(PersistenceManager pm, Set roleNames, MSentryPrivilege parent) throws SentryInvalidInputException { // Column and URI do not have children - if ((!isNULL(parent.getColumnName())) || (!isNULL(parent.getURI()))) { + if (!isNULL(parent.getColumnName()) || !isNULL(parent.getURI())) { return new HashSet(); } @@ -743,8 +769,9 @@ private MSentryPrivilege getMSentryPrivilege(TSentryPrivilege tPriv, Persistence grantOption = false; } Object obj = query.execute(grantOption); - if (obj != null) + if (obj != null) { return (MSentryPrivilege) obj; + } return null; } @@ -752,25 +779,9 @@ public CommitContext dropSentryRole(String roleName) throws SentryNoSuchObjectException { boolean rollbackTransaction = true; PersistenceManager pm = null; - roleName = roleName.trim().toLowerCase(); try { pm = openTransaction(); - Query query = pm.newQuery(MSentryRole.class); - query.setFilter("this.roleName == t"); - query.declareParameters("java.lang.String t"); - query.setUnique(true); - MSentryRole sentryRole = (MSentryRole) query.execute(roleName); - if (sentryRole == null) { - throw new SentryNoSuchObjectException("Role " + roleName); - } else { - pm.retrieve(sentryRole); - int numPrivs = sentryRole.getPrivileges().size(); - sentryRole.removePrivileges(); - //with SENTRY-398 generic model - sentryRole.removeGMPrivileges(); - privCleaner.incPrivRemoval(numPrivs); - pm.deletePersistent(sentryRole); - } + dropSentryRoleCore(pm, roleName); CommitContext commit = commitUpdateTransaction(pm); rollbackTransaction = false; return commit; @@ -781,42 +792,38 @@ public CommitContext dropSentryRole(String roleName) } } + private void dropSentryRoleCore(PersistenceManager pm, String roleName) + throws SentryNoSuchObjectException { + String lRoleName = roleName.trim().toLowerCase(); + Query query = pm.newQuery(MSentryRole.class); + query.setFilter("this.roleName == t"); + query.declareParameters("java.lang.String t"); + query.setUnique(true); + MSentryRole sentryRole = (MSentryRole) query.execute(lRoleName); + if (sentryRole == null) { + throw new SentryNoSuchObjectException("Role: " + lRoleName + " doesn't exist"); + } else { + pm.retrieve(sentryRole); + int numPrivs = sentryRole.getPrivileges().size(); + sentryRole.removePrivileges(); + // with SENTRY-398 generic model + sentryRole.removeGMPrivileges(); + privCleaner.incPrivRemoval(numPrivs); + pm.deletePersistent(sentryRole); + } + } + public CommitContext alterSentryRoleAddGroups( String grantorPrincipal, String roleName, Set groupNames) throws SentryNoSuchObjectException { boolean rollbackTransaction = true; PersistenceManager pm = null; - roleName = roleName.trim().toLowerCase(); try { pm = openTransaction(); - Query query = pm.newQuery(MSentryRole.class); - query.setFilter("this.roleName == t"); - query.declareParameters("java.lang.String t"); - query.setUnique(true); - MSentryRole role = (MSentryRole) query.execute(roleName); - if (role == null) { - throw new SentryNoSuchObjectException("Role: " + roleName); - } else { - query = pm.newQuery(MSentryGroup.class); - query.setFilter("this.groupName == t"); - query.declareParameters("java.lang.String t"); - query.setUnique(true); - List groups = Lists.newArrayList(); - for (TSentryGroup tGroup : groupNames) { - String groupName = tGroup.getGroupName().trim(); - MSentryGroup group = (MSentryGroup) query.execute(groupName); - if (group == null) { - group = new MSentryGroup(groupName, System.currentTimeMillis(), - Sets.newHashSet(role)); - } - group.appendRole(role); - groups.add(group); - } - pm.makePersistentAll(groups); - CommitContext commit = commitUpdateTransaction(pm); - rollbackTransaction = false; - return commit; - } + alterSentryRoleAddGroupsCore(pm, roleName, groupNames); + CommitContext commit = commitUpdateTransaction(pm); + rollbackTransaction = false; + return commit; } finally { if (rollbackTransaction) { rollbackTransaction(pm); @@ -824,6 +831,35 @@ public CommitContext alterSentryRoleAddGroups( String grantorPrincipal, String r } } + private void alterSentryRoleAddGroupsCore(PersistenceManager pm, String roleName, + Set groupNames) throws SentryNoSuchObjectException { + String lRoleName = roleName.trim().toLowerCase(); + Query query = pm.newQuery(MSentryRole.class); + query.setFilter("this.roleName == t"); + query.declareParameters("java.lang.String t"); + query.setUnique(true); + MSentryRole role = (MSentryRole) query.execute(lRoleName); + if (role == null) { + throw new SentryNoSuchObjectException("Role: " + lRoleName + " doesn't exist"); + } else { + query = pm.newQuery(MSentryGroup.class); + query.setFilter("this.groupName == t"); + query.declareParameters("java.lang.String t"); + query.setUnique(true); + List groups = Lists.newArrayList(); + for (TSentryGroup tGroup : groupNames) { + String groupName = tGroup.getGroupName().trim(); + MSentryGroup group = (MSentryGroup) query.execute(groupName); + if (group == null) { + group = new MSentryGroup(groupName, System.currentTimeMillis(), Sets.newHashSet(role)); + } + group.appendRole(role); + groups.add(group); + } + pm.makePersistentAll(groups); + } + } + public CommitContext alterSentryRoleDeleteGroups(String roleName, Set groupNames) throws SentryNoSuchObjectException { @@ -838,7 +874,7 @@ public CommitContext alterSentryRoleDeleteGroups(String roleName, query.setUnique(true); MSentryRole role = (MSentryRole) query.execute(roleName); if (role == null) { - throw new SentryNoSuchObjectException("Role: " + roleName); + throw new SentryNoSuchObjectException("Role: " + roleName + " doesn't exist"); } else { query = pm.newQuery(MSentryGroup.class); query.setFilter("this.groupName == t"); @@ -879,7 +915,7 @@ MSentryRole getMSentryRoleByName(String roleName) query.setUnique(true); MSentryRole sentryRole = (MSentryRole) query.execute(roleName); if (sentryRole == null) { - throw new SentryNoSuchObjectException("Role " + roleName); + throw new SentryNoSuchObjectException("Role: " + roleName + " doesn't exist"); } else { pm.retrieve(sentryRole); } @@ -894,7 +930,9 @@ MSentryRole getMSentryRoleByName(String roleName) } private boolean hasAnyServerPrivileges(Set roleNames, String serverName) { - if ((roleNames.size() == 0)||(roleNames == null)) return false; + if (roleNames == null || roleNames.isEmpty()) { + return false; + } boolean rollbackTransaction = true; PersistenceManager pm = null; try { @@ -914,7 +952,7 @@ private boolean hasAnyServerPrivileges(Set roleNames, String serverName) Long numPrivs = (Long) query.execute(); rollbackTransaction = false; commitTransaction(pm); - return (numPrivs > 0); + return numPrivs > 0; } finally { if (rollbackTransaction) { rollbackTransaction(pm); @@ -923,7 +961,9 @@ private boolean hasAnyServerPrivileges(Set roleNames, String serverName) } List getMSentryPrivileges(Set roleNames, TSentryAuthorizable authHierarchy) { - if ((roleNames.size() == 0)||(roleNames == null)) return new ArrayList(); + if (roleNames == null || roleNames.isEmpty()) { + return new ArrayList(); + } boolean rollbackTransaction = true; PersistenceManager pm = null; try { @@ -936,17 +976,18 @@ List getMSentryPrivileges(Set roleNames, TSentryAuthor } StringBuilder filters = new StringBuilder("roles.contains(role) " + "&& (" + Joiner.on(" || ").join(rolesFiler) + ") "); - if ((authHierarchy != null) && (authHierarchy.getServer() != null)) { + if (authHierarchy != null && authHierarchy.getServer() != null) { filters.append("&& serverName == \"" + authHierarchy.getServer().toLowerCase() + "\""); if (authHierarchy.getDb() != null) { filters.append(" && ((dbName == \"" + authHierarchy.getDb().toLowerCase() + "\") || (dbName == \"__NULL__\")) && (URI == \"__NULL__\")"); - if ((authHierarchy.getTable() != null) - && !AccessConstants.ALL - .equalsIgnoreCase(authHierarchy.getTable())) { - filters.append(" && ((tableName == \"" + authHierarchy.getTable().toLowerCase() + "\") || (tableName == \"__NULL__\")) && (URI == \"__NULL__\")"); - if ((authHierarchy.getColumn() != null) - && !AccessConstants.ALL - .equalsIgnoreCase(authHierarchy.getColumn())) { + if (authHierarchy.getTable() != null + && !AccessConstants.ALL.equalsIgnoreCase(authHierarchy.getTable())) { + if (!AccessConstants.SOME.equalsIgnoreCase(authHierarchy.getTable())) { + filters.append(" && ((tableName == \"" + authHierarchy.getTable().toLowerCase() + "\") || (tableName == \"__NULL__\")) && (URI == \"__NULL__\")"); + } + if (authHierarchy.getColumn() != null + && !AccessConstants.ALL.equalsIgnoreCase(authHierarchy.getColumn()) + && !AccessConstants.SOME.equalsIgnoreCase(authHierarchy.getColumn())) { filters.append(" && ((columnName == \"" + authHierarchy.getColumn().toLowerCase() + "\") || (columnName == \"__NULL__\")) && (URI == \"__NULL__\")"); } } @@ -974,7 +1015,7 @@ List getMSentryPrivilegesByAuth(Set roleNames, TSentry pm = openTransaction(); Query query = pm.newQuery(MSentryPrivilege.class); StringBuilder filters = new StringBuilder(); - if ((roleNames.size() == 0)||(roleNames == null)) { + if (roleNames == null || roleNames.isEmpty()) { filters.append(" !roles.isEmpty() "); } else { query.declareVariables("org.apache.sentry.provider.db.service.model.MSentryRole role"); @@ -985,7 +1026,7 @@ List getMSentryPrivilegesByAuth(Set roleNames, TSentry filters.append("roles.contains(role) " + "&& (" + Joiner.on(" || ").join(rolesFiler) + ") "); } - if ((authHierarchy.getServer() != null)) { + if (authHierarchy.getServer() != null) { filters.append("&& serverName == \"" + authHierarchy.getServer().toLowerCase() + "\""); if (authHierarchy.getDb() != null) { @@ -1007,9 +1048,7 @@ List getMSentryPrivilegesByAuth(Set roleNames, TSentry // if no server, then return empty resultset return new ArrayList(); } - FetchGroup grp = pm.getFetchGroup( - org.apache.sentry.provider.db.service.model.MSentryPrivilege.class, - "fetchRole"); + FetchGroup grp = pm.getFetchGroup(MSentryPrivilege.class, "fetchRole"); grp.addMember("roles"); pm.getFetchPlan().addGroup("fetchRole"); query.setFilter(filters.toString()); @@ -1092,13 +1131,13 @@ public Set getTSentryPrivileges(Set roleNames, TSentry if (authHierarchy.getServer() == null) { throw new SentryInvalidInputException("serverName cannot be null !!"); } - if ((authHierarchy.getTable() != null) && (authHierarchy.getDb() == null)) { + if (authHierarchy.getTable() != null && authHierarchy.getDb() == null) { throw new SentryInvalidInputException("dbName cannot be null when tableName is present !!"); } - if ((authHierarchy.getColumn() != null) && (authHierarchy.getTable() == null)) { + if (authHierarchy.getColumn() != null && authHierarchy.getTable() == null) { throw new SentryInvalidInputException("tableName cannot be null when columnName is present !!"); } - if ((authHierarchy.getUri() == null) && (authHierarchy.getDb() == null)) { + if (authHierarchy.getUri() == null && authHierarchy.getDb() == null) { throw new SentryInvalidInputException("One of uri or dbName must not be null !!"); } return convertToTSentryPrivileges(getMSentryPrivileges(roleNames, authHierarchy)); @@ -1126,7 +1165,7 @@ private Set getMSentryRolesByGroupName(String groupName) query.setUnique(true); sentryGroup = (MSentryGroup) query.execute(groupName); if (sentryGroup == null) { - throw new SentryNoSuchObjectException("Group " + groupName); + throw new SentryNoSuchObjectException("Group: " + groupName + " doesn't exist"); } else { pm.retrieve(sentryGroup); } @@ -1270,7 +1309,7 @@ static String toAuthorizable(MSentryPrivilege privilege) { if (!isNULL(privilege.getAction()) && !privilege.getAction().equalsIgnoreCase(AccessConstants.ALL)) { authorizable - .add(KV_JOINER.join(ProviderConstants.PRIVILEGE_NAME.toLowerCase(), + .add(KV_JOINER.join(PolicyConstants.PRIVILEGE_NAME.toLowerCase(), privilege.getAction())); } return AUTHORIZABLE_JOINER.join(authorizable); @@ -1278,7 +1317,9 @@ static String toAuthorizable(MSentryPrivilege privilege) { @VisibleForTesting static Set toTrimedLower(Set s) { - if (null == s) return new HashSet(); + if (null == s) { + return new HashSet(); + } Set result = Sets.newHashSet(); for (String v : s) { result.add(v.trim().toLowerCase()); @@ -1330,7 +1371,7 @@ private TSentryGroup convertToTSentryGroup(MSentryGroup mSentryGroup) { return group; } - private TSentryPrivilege convertToTSentryPrivilege(MSentryPrivilege mSentryPrivilege) { + protected TSentryPrivilege convertToTSentryPrivilege(MSentryPrivilege mSentryPrivilege) { TSentryPrivilege privilege = new TSentryPrivilege(); convertToTSentryPrivilege(mSentryPrivilege, privilege); return privilege; @@ -1573,7 +1614,7 @@ private void dropOrRenamePrivilegeForAllRoles(PersistenceManager pm, List mPrivileges = getMSentryPrivileges(tPrivilege, pm); if (mPrivileges != null && !mPrivileges.isEmpty()) { for (MSentryPrivilege mPrivilege : mPrivileges) { - roleSet.addAll(ImmutableSet.copyOf((mPrivilege.getRoles()))); + roleSet.addAll(ImmutableSet.copyOf(mPrivilege.getRoles())); } } @@ -1856,12 +1897,15 @@ public void run() { */ public void incPrivRemoval(int numDeletions) { if (privCleanerThread != null) { - lock.lock(); - currentNotifies += numDeletions; - if (currentNotifies > NOTIFY_THRESHOLD) { - cond.signal(); + try { + lock.lock(); + currentNotifies += numDeletions; + if (currentNotifies > NOTIFY_THRESHOLD) { + cond.signal(); + } + } finally { + lock.unlock(); } - lock.unlock(); } } @@ -1968,4 +2012,342 @@ private void removeOrphanedPrivileges() { } } } + + // get all mapping data for [group,role] + public Map> getGroupNameRoleNamesMap() { + boolean rollbackTransaction = true; + PersistenceManager pm = null; + try { + pm = openTransaction(); + Query query = pm.newQuery(MSentryGroup.class); + List mSentryGroups = (List) query.execute(); + Map> sentryGroupNameRoleNamesMap = Maps.newHashMap(); + if (mSentryGroups != null) { + // change the List -> Map> + for (MSentryGroup mSentryGroup : mSentryGroups) { + String groupName = mSentryGroup.getGroupName(); + Set roleNames = Sets.newHashSet(); + for (MSentryRole mSentryRole : mSentryGroup.getRoles()) { + roleNames.add(mSentryRole.getRoleName()); + } + if (roleNames.size() > 0) { + sentryGroupNameRoleNamesMap.put(groupName, roleNames); + } + } + } + commitTransaction(pm); + rollbackTransaction = false; + return sentryGroupNameRoleNamesMap; + } finally { + if (rollbackTransaction) { + rollbackTransaction(pm); + } + } + } + + // get all mapping data for [role,privilege] + public Map> getRoleNameTPrivilegesMap() throws Exception { + boolean rollbackTransaction = true; + PersistenceManager pm = null; + try { + pm = openTransaction(); + Query query = pm.newQuery(MSentryRole.class); + List mSentryRoles = (List) query.execute(); + Map> sentryRolePrivilegesMap = Maps.newHashMap(); + if (mSentryRoles != null) { + // change the List -> Map> + for (MSentryRole mSentryRole : mSentryRoles) { + Set privilegeSet = convertToTSentryPrivileges(mSentryRole + .getPrivileges()); + if (privilegeSet != null && !privilegeSet.isEmpty()) { + sentryRolePrivilegesMap.put(mSentryRole.getRoleName(), privilegeSet); + } + } + } + commitTransaction(pm); + rollbackTransaction = false; + return sentryRolePrivilegesMap; + } finally { + if (rollbackTransaction) { + rollbackTransaction(pm); + } + } + } + + // Get the all exist role names, will return an empty set + // if no role names exist. + public Set getAllRoleNames() { + + boolean rollbackTransaction = true; + PersistenceManager pm = null; + + try { + pm = openTransaction(); + + Set existRoleNames = getAllRoleNames(pm); + + commitTransaction(pm); + rollbackTransaction = false; + + return existRoleNames; + } finally { + if (rollbackTransaction) { + rollbackTransaction(pm); + } + } + } + + // get the all exist role names + private Set getAllRoleNames(PersistenceManager pm) { + Query query = pm.newQuery(MSentryRole.class); + List mSentryRoles = (List) query.execute(); + Set existRoleNames = Sets.newHashSet(); + if (mSentryRoles != null) { + for (MSentryRole mSentryRole : mSentryRoles) { + existRoleNames.add(mSentryRole.getRoleName()); + } + } + return existRoleNames; + } + + // get the all exist groups + private Map getGroupNameTGroupMap(PersistenceManager pm) { + Query query = pm.newQuery(MSentryGroup.class); + List mSentryGroups = (List) query.execute(); + Map existGroupsMap = Maps.newHashMap(); + if (mSentryGroups != null) { + // change the List -> Map> + for (MSentryGroup mSentryGroup : mSentryGroups) { + existGroupsMap.put(mSentryGroup.getGroupName(), mSentryGroup); + } + } + return existGroupsMap; + } + + // get the all exist privileges + private List getPrivilegesList(PersistenceManager pm) { + Query query = pm.newQuery(MSentryPrivilege.class); + List resultList = (List) query.execute(); + if (resultList == null) { + resultList = Lists.newArrayList(); + } + return resultList; + } + + @VisibleForTesting + protected Map getRolesMap() { + boolean rollbackTransaction = true; + PersistenceManager pm = null; + try { + pm = openTransaction(); + + Query query = pm.newQuery(MSentryRole.class); + List mSentryRoles = (List) query.execute(); + Map existRolesMap = Maps.newHashMap(); + if (mSentryRoles != null) { + // change the List -> Map> + for (MSentryRole mSentryRole : mSentryRoles) { + existRolesMap.put(mSentryRole.getRoleName(), mSentryRole); + } + } + + commitTransaction(pm); + rollbackTransaction = false; + return existRolesMap; + } finally { + if (rollbackTransaction) { + rollbackTransaction(pm); + } + } + } + + @VisibleForTesting + protected Map getGroupNameTGroupMap() { + boolean rollbackTransaction = true; + PersistenceManager pm = null; + try { + pm = openTransaction(); + Map resultMap = getGroupNameTGroupMap(pm); + commitTransaction(pm); + rollbackTransaction = false; + return resultMap; + } finally { + if (rollbackTransaction) { + rollbackTransaction(pm); + } + } + } + + @VisibleForTesting + protected List getPrivilegesList() { + boolean rollbackTransaction = true; + PersistenceManager pm = null; + try { + pm = openTransaction(); + List resultList = getPrivilegesList(pm); + commitTransaction(pm); + rollbackTransaction = false; + return resultList; + } finally { + if (rollbackTransaction) { + rollbackTransaction(pm); + } + } + } + + /** + * Import the sentry mapping data. + * + * @param tSentryMappingData + * Include 2 maps to save the mapping data, the following is the example of the data + * structure: + * for the following mapping data: + * group1=role1,role2 + * group2=role2,role3 + * role1=server=server1->db=db1 + * role2=server=server1->db=db1->table=tbl1,server=server1->db=db1->table=tbl2 + * role3=server=server1->url=hdfs://localhost/path + * + * The GroupRolesMap in TSentryMappingData will be saved as: + * { + * TSentryGroup(group1)={role1, role2}, + * TSentryGroup(group2)={role2, role3} + * } + * The RolePrivilegesMap in TSentryMappingData will be saved as: + * { + * role1={TSentryPrivilege(server=server1->db=db1)}, + * role2={TSentryPrivilege(server=server1->db=db1->table=tbl1), + * TSentryPrivilege(server=server1->db=db1->table=tbl2)}, + * role3={TSentryPrivilege(server=server1->url=hdfs://localhost/path)} + * } + * @param isOverwriteForRole + * The option for merging or overwriting the existing data during import, true for + * overwriting, false for merging + */ + public void importSentryMetaData(TSentryMappingData tSentryMappingData, boolean isOverwriteForRole) + throws Exception { + boolean rollbackTransaction = true; + PersistenceManager pm = null; + // change all role name in lowercase + TSentryMappingData mappingData = lowercaseRoleName(tSentryMappingData); + try { + pm = openTransaction(); + Set existRoleNames = getAllRoleNames(pm); + // + Map> importedRoleGroupsMap = covertToRoleNameTGroupsMap(mappingData + .getGroupRolesMap()); + Set importedRoleNames = importedRoleGroupsMap.keySet(); + // if import with overwrite role, drop the duplicated roles in current DB first. + if (isOverwriteForRole) { + dropDuplicatedRoleForImport(pm, existRoleNames, importedRoleNames); + // refresh the existRoleNames for the drop role + existRoleNames = getAllRoleNames(pm); + } + + // import the mapping data for [role,privilege], the existRoleNames will be updated + importSentryRolePrivilegeMapping(pm, existRoleNames, mappingData.getRolePrivilegesMap()); + + importSentryGroupRoleMapping(pm, existRoleNames, importedRoleGroupsMap); + + commitTransaction(pm); + rollbackTransaction = false; + } finally { + if (rollbackTransaction) { + rollbackTransaction(pm); + } + } + } + + // covert the Map[group->roles] to Map[role->groups] + private Map> covertToRoleNameTGroupsMap( + Map> groupRolesMap) { + Map> roleGroupsMap = Maps.newHashMap(); + if (groupRolesMap != null) { + for (Map.Entry> entry : groupRolesMap.entrySet()) { + Set roleNames = entry.getValue(); + if (roleNames != null) { + for (String roleName : roleNames) { + Set tSentryGroups = roleGroupsMap.get(roleName); + if (tSentryGroups == null) { + tSentryGroups = Sets.newHashSet(); + } + tSentryGroups.add(new TSentryGroup(entry.getKey())); + roleGroupsMap.put(roleName, tSentryGroups); + } + } + } + } + return roleGroupsMap; + } + + private void importSentryGroupRoleMapping(PersistenceManager pm, Set existRoleNames, + Map> importedRoleGroupsMap) throws Exception { + if (importedRoleGroupsMap == null || importedRoleGroupsMap.keySet() == null) { + return; + } + for (Map.Entry> entry : importedRoleGroupsMap.entrySet()) { + if (!existRoleNames.contains(entry.getKey())) { + createSentryRoleCore(pm, entry.getKey()); + } + alterSentryRoleAddGroupsCore(pm, entry.getKey(), entry.getValue()); + } + } + + // drop all duplicated with the imported role + private void dropDuplicatedRoleForImport(PersistenceManager pm, Set existRoleNames, + Set importedRoleNames) throws Exception { + Set duplicatedRoleNames = Sets.intersection(existRoleNames, importedRoleNames); + for (String droppedRoleName : duplicatedRoleNames) { + dropSentryRoleCore(pm, droppedRoleName); + } + } + + // change all role name in lowercase + private TSentryMappingData lowercaseRoleName(TSentryMappingData tSentryMappingData) { + Map> sentryGroupRolesMap = tSentryMappingData.getGroupRolesMap(); + Map> sentryRolePrivilegesMap = tSentryMappingData + .getRolePrivilegesMap(); + + Map> newSentryGroupRolesMap = Maps.newHashMap(); + Map> newSentryRolePrivilegesMap = Maps.newHashMap(); + // for mapping data [group,role] + for (Map.Entry> entry : sentryGroupRolesMap.entrySet()) { + Collection lowcaseRoles = Collections2.transform(entry.getValue(), + new Function() { + @Override + public String apply(String input) { + return input.toString().toLowerCase(); + } + }); + newSentryGroupRolesMap.put(entry.getKey(), Sets.newHashSet(lowcaseRoles)); + } + + // for mapping data [role,privilege] + for (String roleName : sentryRolePrivilegesMap.keySet()) { + newSentryRolePrivilegesMap.put(roleName.toLowerCase(), sentryRolePrivilegesMap.get(roleName)); + } + + tSentryMappingData.setGroupRolesMap(newSentryGroupRolesMap); + tSentryMappingData.setRolePrivilegesMap(newSentryRolePrivilegesMap); + return tSentryMappingData; + } + + // import the mapping data for [role,privilege] + private void importSentryRolePrivilegeMapping(PersistenceManager pm, Set existRoleNames, + Map> sentryRolePrivilegesMap) throws Exception { + if (sentryRolePrivilegesMap != null) { + for (Map.Entry> entry : sentryRolePrivilegesMap.entrySet()) { + // if the rolenName doesn't exist, create it. + if (!existRoleNames.contains(entry.getKey())) { + createSentryRoleCore(pm, entry.getKey()); + existRoleNames.add(entry.getKey()); + } + // get the privileges for the role + Set tSentryPrivileges = entry.getValue(); + for (TSentryPrivilege tSentryPrivilege : tSentryPrivileges) { + alterSentryRoleGrantPrivilegeCore(pm, entry.getKey(), tSentryPrivilege); + } + } + } + } } diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/SentryStoreSchemaInfo.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/SentryStoreSchemaInfo.java index 983e792fa..fdadcb8ec 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/SentryStoreSchemaInfo.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/SentryStoreSchemaInfo.java @@ -37,7 +37,7 @@ public class SentryStoreSchemaInfo { private final String sentrySchemaVersions[]; private final String sentryScriptDir; - private static final String SENTRY_VERSION = "1.5.0"; + private static final String SENTRY_VERSION = "1.6.0"; public SentryStoreSchemaInfo(String sentryScriptDir, String dbType) throws SentryUserException { @@ -47,9 +47,7 @@ public SentryStoreSchemaInfo(String sentryScriptDir, String dbType) List upgradeOrderList = new ArrayList(); String upgradeListFile = getSentryStoreScriptDir() + File.separator + VERSION_UPGRADE_LIST + "." + dbType; - try { - BufferedReader bfReader = new BufferedReader(new FileReader( - upgradeListFile)); + try (BufferedReader bfReader = new BufferedReader(new FileReader(upgradeListFile))) { String currSchemaVersion; while ((currSchemaVersion = bfReader.readLine()) != null) { upgradeOrderList.add(currSchemaVersion.trim()); diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/ServiceManager.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/ServiceManager.java index 0e3c0bb33..9f921d4d3 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/ServiceManager.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/ServiceManager.java @@ -21,8 +21,6 @@ import java.io.IOException; import java.net.InetSocketAddress; -import org.apache.curator.framework.CuratorFramework; -import org.apache.curator.framework.imps.CuratorFrameworkState; import org.apache.curator.x.discovery.ServiceDiscovery; import org.apache.curator.x.discovery.ServiceDiscoveryBuilder; import org.apache.curator.x.discovery.ServiceInstance; diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/ServiceRegister.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/ServiceRegister.java index 1e17f9aed..79dfe48a6 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/ServiceRegister.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/ServiceRegister.java @@ -18,7 +18,6 @@ package org.apache.sentry.provider.db.service.persistent; -import org.apache.curator.framework.imps.CuratorFrameworkState; import org.apache.curator.x.discovery.ServiceDiscoveryBuilder; import org.apache.curator.x.discovery.ServiceInstance; import org.apache.curator.x.discovery.details.InstanceSerializer; diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/ConfServlet.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/ConfServlet.java new file mode 100644 index 000000000..9e7fca83e --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/ConfServlet.java @@ -0,0 +1,69 @@ +package org.apache.sentry.provider.db.service.thrift; + +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; +import java.io.Writer; + +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.hadoop.conf.Configuration; + +/** + * Servlet to print out all sentry configuration. + */ +public class ConfServlet extends HttpServlet { + public static final String CONF_CONTEXT_ATTRIBUTE = "sentry.conf"; + public static final String FORMAT_JSON = "json"; + public static final String FORMAT_XML = "xml"; + public static final String FORMAT_PARAM = "format"; + private static final long serialVersionUID = 1L; + + @Override + public void doGet(HttpServletRequest request, HttpServletResponse response) + throws ServletException, IOException { + String format = request.getParameter(FORMAT_PARAM); + if (format == null) { + format = FORMAT_XML; + } + + if (FORMAT_XML.equals(format)) { + response.setContentType("text/xml; charset=utf-8"); + } else if (FORMAT_JSON.equals(format)) { + response.setContentType("application/json; charset=utf-8"); + } + + Configuration conf = (Configuration)getServletContext().getAttribute( + CONF_CONTEXT_ATTRIBUTE); + assert conf != null; + + Writer out = response.getWriter(); + if (FORMAT_JSON.equals(format)) { + Configuration.dumpConfiguration(conf, out); + } else if (FORMAT_XML.equals(format)) { + conf.writeXml(out); + } else { + response.sendError(HttpServletResponse.SC_BAD_REQUEST, "Bad format: " + format); + } + out.close(); + } +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryAuthFilter.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryAuthFilter.java index 311fbb533..388e3296c 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryAuthFilter.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryAuthFilter.java @@ -51,13 +51,14 @@ public class SentryAuthFilter extends AuthenticationFilter { @Override protected void doFilter(FilterChain filterChain, HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException { - super.doFilter(filterChain, request, response); String userName = request.getRemoteUser(); LOG.debug("Authenticating user: " + userName + " from request."); if (!allowUsers.contains(userName)) { response.sendError(HttpServletResponse.SC_FORBIDDEN, - userName + " is unauthorized. status code: " + HttpServletResponse.SC_FORBIDDEN); + "Unauthorized user status code: " + HttpServletResponse.SC_FORBIDDEN); + throw new ServletException(userName + " is unauthorized. status code: " + HttpServletResponse.SC_FORBIDDEN); } + super.doFilter(filterChain, request, response); } /** diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryMetrics.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryMetrics.java index 55bec0b0f..6eb00a1c4 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryMetrics.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryMetrics.java @@ -18,7 +18,9 @@ package org.apache.sentry.provider.db.service.thrift; import com.codahale.metrics.ConsoleReporter; +import com.codahale.metrics.Counter; import com.codahale.metrics.Gauge; +import com.codahale.metrics.Histogram; import com.codahale.metrics.JmxReporter; import com.codahale.metrics.Metric; import com.codahale.metrics.MetricRegistry; @@ -69,6 +71,27 @@ public class SentryMetrics { public final Timer listPrivilegesByAuthorizableTimer = SentryMetricsServletContextListener.METRIC_REGISTRY.timer( MetricRegistry.name(SentryPolicyStoreProcessor.class, "list-privileges-by-authorizable")); + /** + * Return a Timer with name. + */ + public final Timer getTimer(String name) { + return SentryMetricsServletContextListener.METRIC_REGISTRY.timer(name); + } + + /** + * Return a Histogram with name. + */ + public final Histogram getHistogram(String name) { + return SentryMetricsServletContextListener.METRIC_REGISTRY.histogram(name); + } + + /** + * Return a Counter with name. + */ + public final Counter getCounter(String name) { + return SentryMetricsServletContextListener.METRIC_REGISTRY.counter(name); + } + private SentryMetrics() { registerMetricSet("gc", new GarbageCollectorMetricSet(), SentryMetricsServletContextListener.METRIC_REGISTRY); registerMetricSet("buffers", new BufferPoolMetricSet(ManagementFactory.getPlatformMBeanServer()), diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryPolicyServiceClient.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryPolicyServiceClient.java index 7a9f0df59..de50adb1c 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryPolicyServiceClient.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryPolicyServiceClient.java @@ -28,17 +28,17 @@ public interface SentryPolicyServiceClient { - public void createRole(String requestorUserName, String roleName) throws SentryUserException; + void createRole(String requestorUserName, String roleName) throws SentryUserException; - public void dropRole(String requestorUserName, String roleName) throws SentryUserException; + void dropRole(String requestorUserName, String roleName) throws SentryUserException; - public void dropRoleIfExists(String requestorUserName, String roleName) + void dropRoleIfExists(String requestorUserName, String roleName) throws SentryUserException; - public Set listRolesByGroupName(String requestorUserName, String groupName) + Set listRolesByGroupName(String requestorUserName, String groupName) throws SentryUserException; - public Set listAllPrivilegesByRoleName(String requestorUserName, String roleName) + Set listAllPrivilegesByRoleName(String requestorUserName, String roleName) throws SentryUserException; /** @@ -50,115 +50,121 @@ public Set listAllPrivilegesByRoleName(String requestorUserNam * @return Set of thrift sentry privilege objects * @throws SentryUserException */ - public Set listPrivilegesByRoleName(String requestorUserName, String roleName, + Set listPrivilegesByRoleName(String requestorUserName, String roleName, List authorizable) throws SentryUserException; - public Set listRoles(String requestorUserName) throws SentryUserException; + Set listRoles(String requestorUserName) throws SentryUserException; - public Set listUserRoles(String requestorUserName) throws SentryUserException; + Set listUserRoles(String requestorUserName) throws SentryUserException; - public TSentryPrivilege grantURIPrivilege(String requestorUserName, String roleName, + TSentryPrivilege grantURIPrivilege(String requestorUserName, String roleName, String server, String uri) throws SentryUserException; - public TSentryPrivilege grantURIPrivilege(String requestorUserName, String roleName, + TSentryPrivilege grantURIPrivilege(String requestorUserName, String roleName, String server, String uri, Boolean grantOption) throws SentryUserException; - public void grantServerPrivilege(String requestorUserName, String roleName, String server, + void grantServerPrivilege(String requestorUserName, String roleName, String server, String action) throws SentryUserException; - public TSentryPrivilege grantServerPrivilege(String requestorUserName, String roleName, + TSentryPrivilege grantServerPrivilege(String requestorUserName, String roleName, + String server, Boolean grantOption) throws SentryUserException; + + TSentryPrivilege grantServerPrivilege(String requestorUserName, String roleName, String server, String action, Boolean grantOption) throws SentryUserException; - public TSentryPrivilege grantDatabasePrivilege(String requestorUserName, String roleName, + TSentryPrivilege grantDatabasePrivilege(String requestorUserName, String roleName, String server, String db, String action) throws SentryUserException; - public TSentryPrivilege grantDatabasePrivilege(String requestorUserName, String roleName, + TSentryPrivilege grantDatabasePrivilege(String requestorUserName, String roleName, String server, String db, String action, Boolean grantOption) throws SentryUserException; - public TSentryPrivilege grantTablePrivilege(String requestorUserName, String roleName, + TSentryPrivilege grantTablePrivilege(String requestorUserName, String roleName, String server, String db, String table, String action) throws SentryUserException; - public TSentryPrivilege grantTablePrivilege(String requestorUserName, String roleName, + TSentryPrivilege grantTablePrivilege(String requestorUserName, String roleName, String server, String db, String table, String action, Boolean grantOption) throws SentryUserException; - public TSentryPrivilege grantColumnPrivilege(String requestorUserName, String roleName, + TSentryPrivilege grantColumnPrivilege(String requestorUserName, String roleName, String server, String db, String table, String columnName, String action) throws SentryUserException; - public TSentryPrivilege grantColumnPrivilege(String requestorUserName, String roleName, + TSentryPrivilege grantColumnPrivilege(String requestorUserName, String roleName, String server, String db, String table, String columnName, String action, Boolean grantOption) throws SentryUserException; - public Set grantColumnsPrivileges(String requestorUserName, String roleName, + Set grantColumnsPrivileges(String requestorUserName, String roleName, String server, String db, String table, List columnNames, String action) throws SentryUserException; - public Set grantColumnsPrivileges(String requestorUserName, String roleName, + Set grantColumnsPrivileges(String requestorUserName, String roleName, String server, String db, String table, List columnNames, String action, Boolean grantOption) throws SentryUserException; - public void revokeURIPrivilege(String requestorUserName, String roleName, String server, + void revokeURIPrivilege(String requestorUserName, String roleName, String server, String uri) throws SentryUserException; - public void revokeURIPrivilege(String requestorUserName, String roleName, String server, + void revokeURIPrivilege(String requestorUserName, String roleName, String server, String uri, Boolean grantOption) throws SentryUserException; - public void revokeServerPrivilege(String requestorUserName, String roleName, String server) - throws SentryUserException; + void revokeServerPrivilege(String requestorUserName, String roleName, String server, + String action) throws SentryUserException; - public void revokeServerPrivilege(String requestorUserName, String roleName, String server, - Boolean grantOption) throws SentryUserException; + void revokeServerPrivilege(String requestorUserName, String roleName, String server, + String action, Boolean grantOption) throws SentryUserException; - public void revokeDatabasePrivilege(String requestorUserName, String roleName, String server, + void revokeServerPrivilege(String requestorUserName, String roleName, String server, + boolean grantOption) throws SentryUserException; + + void revokeDatabasePrivilege(String requestorUserName, String roleName, String server, String db, String action) throws SentryUserException; - public void revokeDatabasePrivilege(String requestorUserName, String roleName, String server, + void revokeDatabasePrivilege(String requestorUserName, String roleName, String server, String db, String action, Boolean grantOption) throws SentryUserException; - public void revokeTablePrivilege(String requestorUserName, String roleName, String server, + void revokeTablePrivilege(String requestorUserName, String roleName, String server, String db, String table, String action) throws SentryUserException; - public void revokeTablePrivilege(String requestorUserName, String roleName, String server, + void revokeTablePrivilege(String requestorUserName, String roleName, String server, String db, String table, String action, Boolean grantOption) throws SentryUserException; - public void revokeColumnPrivilege(String requestorUserName, String roleName, String server, + void revokeColumnPrivilege(String requestorUserName, String roleName, String server, String db, String table, String columnName, String action) throws SentryUserException; - public void revokeColumnPrivilege(String requestorUserName, String roleName, String server, + void revokeColumnPrivilege(String requestorUserName, String roleName, String server, String db, String table, String columnName, String action, Boolean grantOption) throws SentryUserException; - public void revokeColumnsPrivilege(String requestorUserName, String roleName, String server, + void revokeColumnsPrivilege(String requestorUserName, String roleName, String server, String db, String table, List columns, String action) throws SentryUserException; - public void revokeColumnsPrivilege(String requestorUserName, String roleName, String server, + void revokeColumnsPrivilege(String requestorUserName, String roleName, String server, String db, String table, List columns, String action, Boolean grantOption) throws SentryUserException; - public Set listPrivilegesForProvider(Set groups, ActiveRoleSet roleSet, + Set listPrivilegesForProvider(Set groups, ActiveRoleSet roleSet, Authorizable... authorizable) throws SentryUserException; - public void grantRoleToGroup(String requestorUserName, String groupName, String roleName) + void grantRoleToGroup(String requestorUserName, String groupName, String roleName) throws SentryUserException; - public void revokeRoleFromGroup(String requestorUserName, String groupName, String roleName) + void revokeRoleFromGroup(String requestorUserName, String groupName, String roleName) throws SentryUserException; - public void grantRoleToGroups(String requestorUserName, String roleName, Set groups) + void grantRoleToGroups(String requestorUserName, String roleName, Set groups) throws SentryUserException; - public void revokeRoleFromGroups(String requestorUserName, String roleName, Set groups) + void revokeRoleFromGroups(String requestorUserName, String roleName, Set groups) throws SentryUserException; - public void dropPrivileges(String requestorUserName, + void dropPrivileges(String requestorUserName, List authorizableObjects) throws SentryUserException; - public void renamePrivileges(String requestorUserName, + void renamePrivileges(String requestorUserName, List oldAuthorizables, List newAuthorizables) throws SentryUserException; - public Map listPrivilegsbyAuthorizable( + Map listPrivilegsbyAuthorizable( String requestorUserName, Set> authorizables, Set groups, ActiveRoleSet roleSet) throws SentryUserException; @@ -172,7 +178,15 @@ public Map listPrivilegsbyAuthorizable * @return The value of the propertyName * @throws SentryUserException */ - public String getConfigValue(String propertyName, String defaultValue) throws SentryUserException; + String getConfigValue(String propertyName, String defaultValue) throws SentryUserException; + + void close(); - public void close(); + // Import the sentry mapping data with map structure + void importPolicy(Map>> policyFileMappingData, + String requestorUserName, boolean isOverwriteRole) throws SentryUserException; + + // export the sentry mapping data with map structure + Map>> exportPolicy(String requestorUserName) + throws SentryUserException; } diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryPolicyServiceClientDefaultImpl.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryPolicyServiceClientDefaultImpl.java index 44681ca5d..edc566128 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryPolicyServiceClientDefaultImpl.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryPolicyServiceClientDefaultImpl.java @@ -27,6 +27,7 @@ import javax.security.auth.callback.CallbackHandler; +import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SaslRpcServer; @@ -38,6 +39,9 @@ import org.apache.sentry.core.common.Authorizable; import org.apache.sentry.core.model.db.AccessConstants; import org.apache.sentry.core.model.db.DBModelAuthorizable; +import org.apache.sentry.provider.common.PolicyFileConstants; +import org.apache.sentry.service.thrift.SentryServiceUtil; +import org.apache.sentry.service.thrift.ServiceConstants; import org.apache.sentry.service.thrift.ServiceConstants.ClientConfig; import org.apache.sentry.service.thrift.ServiceConstants.PrivilegeScope; import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; @@ -58,8 +62,14 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; +import com.google.common.collect.Maps; import com.google.common.collect.Sets; +/* + A Sentry Client in which all the operations are synchronized for thread safety + Note: When using this client, if there is an exception in RPC, socket can get into an inconsistent state. + So it is important to recreate the client, which uses a new socket. + */ public class SentryPolicyServiceClientDefaultImpl implements SentryPolicyServiceClient { private final Configuration conf; @@ -93,7 +103,7 @@ public UgiSaslClientTransport(String mechanism, String authorizationId, // open the SASL transport with using the current UserGroupInformation // This is needed to get the current login context stored @Override - public void open() throws TTransportException { + public synchronized void open() throws TTransportException { if (ugi == null) { baseOpen(); } else { @@ -158,9 +168,11 @@ public SentryPolicyServiceClientDefaultImpl(Configuration conf) throws IOExcepti throw new IOException("Transport exception while opening transport: " + e.getMessage(), e); } LOGGER.debug("Successfully opened transport: " + transport + " to " + serverAddress); + long maxMessageSize = conf.getLong(ServiceConstants.ClientConfig.SENTRY_POLICY_CLIENT_THRIFT_MAX_MESSAGE_SIZE, + ServiceConstants.ClientConfig.SENTRY_POLICY_CLIENT_THRIFT_MAX_MESSAGE_SIZE_DEFAULT); TMultiplexedProtocol protocol = new TMultiplexedProtocol( - new TBinaryProtocol(transport), - SentryPolicyStoreProcessor.SENTRY_POLICY_SERVICE_NAME); + new TBinaryProtocol(transport, maxMessageSize, maxMessageSize, true, true), + SentryPolicyStoreProcessor.SENTRY_POLICY_SERVICE_NAME); client = new SentryPolicyService.Client(protocol); LOGGER.debug("Successfully created client"); } @@ -179,19 +191,19 @@ public synchronized void createRole(String requestorUserName, String roleName) } } - public void dropRole(String requestorUserName, + public synchronized void dropRole(String requestorUserName, String roleName) throws SentryUserException { dropRole(requestorUserName, roleName, false); } - public void dropRoleIfExists(String requestorUserName, + public synchronized void dropRoleIfExists(String requestorUserName, String roleName) throws SentryUserException { dropRole(requestorUserName, roleName, true); } - private void dropRole(String requestorUserName, + private synchronized void dropRole(String requestorUserName, String roleName, boolean ifExists) throws SentryUserException { TDropSentryRoleRequest request = new TDropSentryRoleRequest(); @@ -235,7 +247,7 @@ public synchronized Set listRolesByGroupName( } } - public Set listAllPrivilegesByRoleName(String requestorUserName, String roleName) + public synchronized Set listAllPrivilegesByRoleName(String requestorUserName, String roleName) throws SentryUserException { return listPrivilegesByRoleName(requestorUserName, roleName, null); } @@ -248,7 +260,7 @@ public Set listAllPrivilegesByRoleName(String requestorUserNam * @return Set of thrift sentry privilege objects * @throws SentryUserException */ - public Set listPrivilegesByRoleName(String requestorUserName, + public synchronized Set listPrivilegesByRoleName(String requestorUserName, String roleName, List authorizable) throws SentryUserException { TListSentryPrivilegesRequest request = new TListSentryPrivilegesRequest(); @@ -269,59 +281,70 @@ public Set listPrivilegesByRoleName(String requestorUserName, } } - public Set listRoles(String requestorUserName) + public synchronized Set listRoles(String requestorUserName) throws SentryUserException { return listRolesByGroupName(requestorUserName, null); } - public Set listUserRoles(String requestorUserName) + public synchronized Set listUserRoles(String requestorUserName) throws SentryUserException { return listRolesByGroupName(requestorUserName, AccessConstants.ALL); } - public TSentryPrivilege grantURIPrivilege(String requestorUserName, + public synchronized TSentryPrivilege grantURIPrivilege(String requestorUserName, String roleName, String server, String uri) throws SentryUserException { return grantPrivilege(requestorUserName, roleName, PrivilegeScope.URI, server, uri, null, null, null, AccessConstants.ALL); } - public TSentryPrivilege grantURIPrivilege(String requestorUserName, + public synchronized TSentryPrivilege grantURIPrivilege(String requestorUserName, String roleName, String server, String uri, Boolean grantOption) throws SentryUserException { return grantPrivilege(requestorUserName, roleName, PrivilegeScope.URI, server, uri, null, null, null, AccessConstants.ALL, grantOption); } - public void grantServerPrivilege(String requestorUserName, + public synchronized void grantServerPrivilege(String requestorUserName, String roleName, String server, String action) throws SentryUserException { grantPrivilege(requestorUserName, roleName, PrivilegeScope.SERVER, server, null, null, null, null, action); } - public TSentryPrivilege grantServerPrivilege(String requestorUserName, + @Deprecated + /*** + * Should use grantServerPrivilege(String requestorUserName, + * String roleName, String server, String action, Boolean grantOption) + */ + public synchronized TSentryPrivilege grantServerPrivilege(String requestorUserName, + String roleName, String server, Boolean grantOption) throws SentryUserException { + return grantServerPrivilege(requestorUserName, roleName, server, + AccessConstants.ALL, grantOption); + } + + public synchronized TSentryPrivilege grantServerPrivilege(String requestorUserName, String roleName, String server, String action, Boolean grantOption) throws SentryUserException { return grantPrivilege(requestorUserName, roleName, PrivilegeScope.SERVER, server, null, null, null, null, action, grantOption); } - public TSentryPrivilege grantDatabasePrivilege(String requestorUserName, + public synchronized TSentryPrivilege grantDatabasePrivilege(String requestorUserName, String roleName, String server, String db, String action) throws SentryUserException { return grantPrivilege(requestorUserName, roleName, PrivilegeScope.DATABASE, server, null, db, null, null, action); } - public TSentryPrivilege grantDatabasePrivilege(String requestorUserName, + public synchronized TSentryPrivilege grantDatabasePrivilege(String requestorUserName, String roleName, String server, String db, String action, Boolean grantOption) throws SentryUserException { return grantPrivilege(requestorUserName, roleName, PrivilegeScope.DATABASE, server, null, db, null, null, action, grantOption); } - public TSentryPrivilege grantTablePrivilege(String requestorUserName, + public synchronized TSentryPrivilege grantTablePrivilege(String requestorUserName, String roleName, String server, String db, String table, String action) throws SentryUserException { return grantPrivilege(requestorUserName, roleName, PrivilegeScope.TABLE, server, @@ -329,14 +352,14 @@ public TSentryPrivilege grantTablePrivilege(String requestorUserName, db, table, null, action); } - public TSentryPrivilege grantTablePrivilege(String requestorUserName, + public synchronized TSentryPrivilege grantTablePrivilege(String requestorUserName, String roleName, String server, String db, String table, String action, Boolean grantOption) throws SentryUserException { return grantPrivilege(requestorUserName, roleName, PrivilegeScope.TABLE, server, null, db, table, null, action, grantOption); } - public TSentryPrivilege grantColumnPrivilege(String requestorUserName, + public synchronized TSentryPrivilege grantColumnPrivilege(String requestorUserName, String roleName, String server, String db, String table, String columnName, String action) throws SentryUserException { return grantPrivilege(requestorUserName, roleName, PrivilegeScope.COLUMN, server, @@ -344,14 +367,14 @@ public TSentryPrivilege grantColumnPrivilege(String requestorUserName, db, table, columnName, action); } - public TSentryPrivilege grantColumnPrivilege(String requestorUserName, + public synchronized TSentryPrivilege grantColumnPrivilege(String requestorUserName, String roleName, String server, String db, String table, String columnName, String action, Boolean grantOption) throws SentryUserException { return grantPrivilege(requestorUserName, roleName, PrivilegeScope.COLUMN, server, null, db, table, columnName, action, grantOption); } - public Set grantColumnsPrivileges(String requestorUserName, + public synchronized Set grantColumnsPrivileges(String requestorUserName, String roleName, String server, String db, String table, List columnNames, String action) throws SentryUserException { return grantPrivileges(requestorUserName, roleName, PrivilegeScope.COLUMN, server, @@ -359,7 +382,7 @@ public Set grantColumnsPrivileges(String requestorUserName, db, table, columnNames, action); } - public Set grantColumnsPrivileges(String requestorUserName, + public synchronized Set grantColumnsPrivileges(String requestorUserName, String roleName, String server, String db, String table, List columnNames, String action, Boolean grantOption) throws SentryUserException { return grantPrivileges(requestorUserName, roleName, PrivilegeScope.COLUMN, @@ -409,7 +432,7 @@ private TSentryPrivilege grantPrivilege(String requestorUserName, request.setProtocol_version(ThriftConstants.TSENTRY_SERVICE_VERSION_CURRENT); request.setRequestorUserName(requestorUserName); request.setRoleName(roleName); - Set privileges = convertColumnPrivilege(requestorUserName, scope, + Set privileges = convertColumnPrivilege(scope, serverName, uri, db, table, column, action, grantOption); request.setPrivileges(privileges); try { @@ -442,7 +465,7 @@ private Set grantPrivileges(String requestorUserName, request.setProtocol_version(ThriftConstants.TSENTRY_SERVICE_VERSION_CURRENT); request.setRequestorUserName(requestorUserName); request.setRoleName(roleName); - Set privileges = convertColumnPrivileges(requestorUserName, scope, + Set privileges = convertColumnPrivileges(scope, serverName, uri, db, table, columns, action, grantOption); request.setPrivileges(privileges); try { @@ -454,49 +477,56 @@ private Set grantPrivileges(String requestorUserName, } } - public void revokeURIPrivilege(String requestorUserName, + public synchronized void revokeURIPrivilege(String requestorUserName, String roleName, String server, String uri) throws SentryUserException { revokePrivilege(requestorUserName, roleName, PrivilegeScope.URI, server, uri, null, null, null, AccessConstants.ALL); } - public void revokeURIPrivilege(String requestorUserName, + public synchronized void revokeURIPrivilege(String requestorUserName, String roleName, String server, String uri, Boolean grantOption) throws SentryUserException { revokePrivilege(requestorUserName, roleName, PrivilegeScope.URI, server, uri, null, null, null, AccessConstants.ALL, grantOption); } - public void revokeServerPrivilege(String requestorUserName, - String roleName, String server) + public synchronized void revokeServerPrivilege(String requestorUserName, + String roleName, String server, String action) + throws SentryUserException { + revokePrivilege(requestorUserName, roleName, + PrivilegeScope.SERVER, server, null, null, null, null, action); + } + + public synchronized void revokeServerPrivilege(String requestorUserName, + String roleName, String server, String action, Boolean grantOption) throws SentryUserException { revokePrivilege(requestorUserName, roleName, - PrivilegeScope.SERVER, server, null, null, null, null, AccessConstants.ALL); + PrivilegeScope.SERVER, server, null, null, null, null, action, grantOption); } - public void revokeServerPrivilege(String requestorUserName, - String roleName, String server, Boolean grantOption) + public synchronized void revokeServerPrivilege(String requestorUserName, + String roleName, String server, boolean grantOption) throws SentryUserException { revokePrivilege(requestorUserName, roleName, - PrivilegeScope.SERVER, server, null, null, null, null, AccessConstants.ALL, grantOption); + PrivilegeScope.SERVER, server, null, null, null, null, AccessConstants.ALL, grantOption); } - public void revokeDatabasePrivilege(String requestorUserName, + public synchronized void revokeDatabasePrivilege(String requestorUserName, String roleName, String server, String db, String action) throws SentryUserException { revokePrivilege(requestorUserName, roleName, PrivilegeScope.DATABASE, server, null, db, null, null, action); } - public void revokeDatabasePrivilege(String requestorUserName, + public synchronized void revokeDatabasePrivilege(String requestorUserName, String roleName, String server, String db, String action, Boolean grantOption) throws SentryUserException { revokePrivilege(requestorUserName, roleName, PrivilegeScope.DATABASE, server, null, db, null, null, action, grantOption); } - public void revokeTablePrivilege(String requestorUserName, + public synchronized void revokeTablePrivilege(String requestorUserName, String roleName, String server, String db, String table, String action) throws SentryUserException { revokePrivilege(requestorUserName, roleName, @@ -504,7 +534,7 @@ public void revokeTablePrivilege(String requestorUserName, db, table, null, action); } - public void revokeTablePrivilege(String requestorUserName, + public synchronized void revokeTablePrivilege(String requestorUserName, String roleName, String server, String db, String table, String action, Boolean grantOption) throws SentryUserException { revokePrivilege(requestorUserName, roleName, @@ -512,39 +542,39 @@ public void revokeTablePrivilege(String requestorUserName, db, table, null, action, grantOption); } - public void revokeColumnPrivilege(String requestorUserName, String roleName, + public synchronized void revokeColumnPrivilege(String requestorUserName, String roleName, String server, String db, String table, String columnName, String action) throws SentryUserException { ImmutableList.Builder listBuilder = ImmutableList.builder(); listBuilder.add(columnName); revokePrivilege(requestorUserName, roleName, - PrivilegeScope.TABLE, server, null, + PrivilegeScope.COLUMN, server, null, db, table, listBuilder.build(), action); } - public void revokeColumnPrivilege(String requestorUserName, String roleName, + public synchronized void revokeColumnPrivilege(String requestorUserName, String roleName, String server, String db, String table, String columnName, String action, Boolean grantOption) throws SentryUserException { ImmutableList.Builder listBuilder = ImmutableList.builder(); listBuilder.add(columnName); revokePrivilege(requestorUserName, roleName, - PrivilegeScope.TABLE, server, null, + PrivilegeScope.COLUMN, server, null, db, table, listBuilder.build(), action, grantOption); } - public void revokeColumnsPrivilege(String requestorUserName, String roleName, + public synchronized void revokeColumnsPrivilege(String requestorUserName, String roleName, String server, String db, String table, List columns, String action) throws SentryUserException { revokePrivilege(requestorUserName, roleName, - PrivilegeScope.TABLE, server, null, + PrivilegeScope.COLUMN, server, null, db, table, columns, action); } - public void revokeColumnsPrivilege(String requestorUserName, String roleName, + public synchronized void revokeColumnsPrivilege(String requestorUserName, String roleName, String server, String db, String table, List columns, String action, Boolean grantOption) throws SentryUserException { revokePrivilege(requestorUserName, roleName, - PrivilegeScope.TABLE, server, null, + PrivilegeScope.COLUMN, server, null, db, table, columns, action, grantOption); } @@ -563,7 +593,7 @@ private void revokePrivilege(String requestorUserName, String roleName, request.setProtocol_version(ThriftConstants.TSENTRY_SERVICE_VERSION_CURRENT); request.setRequestorUserName(requestorUserName); request.setRoleName(roleName); - Set privileges = convertColumnPrivileges(requestorUserName, scope, + Set privileges = convertColumnPrivileges(scope, serverName, uri, db, table, columns, action, grantOption); request.setPrivileges(privileges); try { @@ -574,7 +604,7 @@ private void revokePrivilege(String requestorUserName, String roleName, } } - private Set convertColumnPrivileges(String requestorUserName, + private Set convertColumnPrivileges( PrivilegeScope scope, String serverName, String uri, String db, String table, List columns, String action, Boolean grantOption) { ImmutableSet.Builder setBuilder = ImmutableSet.builder(); @@ -608,7 +638,7 @@ private Set convertColumnPrivileges(String requestorUserName, return setBuilder.build(); } - private Set convertColumnPrivilege(String requestorUserName, + private Set convertColumnPrivilege( PrivilegeScope scope, String serverName, String uri, String db, String table, String column, String action, Boolean grantOption) { ImmutableSet.Builder setBuilder = ImmutableSet.builder(); @@ -637,13 +667,13 @@ private TSentryGrantOption convertTSentryGrantOption(Boolean grantOption) { return TSentryGrantOption.FALSE; } - public Set listPrivilegesForProvider(Set groups, ActiveRoleSet roleSet, Authorizable... authorizable) + public synchronized Set listPrivilegesForProvider(Set groups, ActiveRoleSet roleSet, Authorizable... authorizable) throws SentryUserException { TSentryActiveRoleSet thriftRoleSet = new TSentryActiveRoleSet(roleSet.isAll(), roleSet.getRoles()); TListSentryPrivilegesForProviderRequest request = new TListSentryPrivilegesForProviderRequest(ThriftConstants. TSENTRY_SERVICE_VERSION_CURRENT, groups, thriftRoleSet); - if ((authorizable != null)&&(authorizable.length > 0)) { + if (authorizable != null && authorizable.length > 0) { TSentryAuthorizable tSentryAuthorizable = setupSentryAuthorizable(Lists .newArrayList(authorizable)); request.setAuthorizableHierarchy(tSentryAuthorizable); @@ -784,7 +814,7 @@ public synchronized Map listPrivilegsb * @return The value of the propertyName * @throws SentryUserException */ - public String getConfigValue(String propertyName, String defaultValue) + public synchronized String getConfigValue(String propertyName, String defaultValue) throws SentryUserException { TSentryConfigValueRequest request = new TSentryConfigValueRequest( ThriftConstants.TSENTRY_SERVICE_VERSION_CURRENT, propertyName); @@ -800,9 +830,116 @@ public String getConfigValue(String propertyName, String defaultValue) } } - public void close() { + public synchronized void close() { if (transport != null) { transport.close(); } } + + /** + * Import the sentry mapping data, convert the mapping data from map structure to + * TSentryMappingData, and call the import API. + * + * @param policyFileMappingData + * Include 2 maps to save the mapping data, the following is the example of the data + * structure: + * for the following mapping data: + * group1=role1,role2 + * group2=role2,role3 + * role1=server=server1->db=db1 + * role2=server=server1->db=db1->table=tbl1,server=server1->db=db1->table=tbl2 + * role3=server=server1->url=hdfs://localhost/path + * + * The policyFileMappingData will be inputed as: + * { + * groups={[group1={role1, role2}], group2=[role2, role3]}, + * roles={role1=[server=server1->db=db1], + * role2=[server=server1->db=db1->table=tbl1,server=server1->db=db1->table=tbl2], + * role3=[server=server1->url=hdfs://localhost/path] + * } + * } + * @param requestorUserName + * The name of the request user + */ + public synchronized void importPolicy(Map>> policyFileMappingData, + String requestorUserName, boolean isOverwriteRole) + throws SentryUserException { + try { + TSentryMappingData tSentryMappingData = new TSentryMappingData(); + // convert the mapping data for [group,role] from map structure to + // TSentryMappingData.GroupRolesMap + tSentryMappingData.setGroupRolesMap(policyFileMappingData.get(PolicyFileConstants.GROUPS)); + // convert the mapping data for [role,privilege] from map structure to + // TSentryMappingData.RolePrivilegesMap + tSentryMappingData + .setRolePrivilegesMap(convertRolePrivilegesMapForSentryDB(policyFileMappingData + .get(PolicyFileConstants.ROLES))); + TSentryImportMappingDataRequest request = new TSentryImportMappingDataRequest( + ThriftConstants.TSENTRY_SERVICE_VERSION_CURRENT, requestorUserName, isOverwriteRole, + tSentryMappingData); + TSentryImportMappingDataResponse response = client.import_sentry_mapping_data(request); + Status.throwIfNotOk(response.getStatus()); + } catch (TException e) { + throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); + } + } + + // convert the mapping data for [role,privilege] from map structure to + // TSentryMappingData.RolePrivilegesMap + private Map> convertRolePrivilegesMapForSentryDB( + Map> rolePrivilegesMap) { + Map> rolePrivilegesMapResult = Maps.newHashMap(); + if (rolePrivilegesMap != null) { + for (Map.Entry> entry : rolePrivilegesMap.entrySet()) { + Set tempTSentryPrivileges = Sets.newHashSet(); + Set tempPrivileges = entry.getValue(); + for (String tempPrivilege : tempPrivileges) { + tempTSentryPrivileges.add(SentryServiceUtil.convertToTSentryPrivilege(tempPrivilege)); + } + rolePrivilegesMapResult.put(entry.getKey(), tempTSentryPrivileges); + } + } + return rolePrivilegesMapResult; + } + + // export the sentry mapping data with map structure + public synchronized Map>> exportPolicy(String requestorUserName) + throws SentryUserException { + TSentryExportMappingDataRequest request = new TSentryExportMappingDataRequest( + ThriftConstants.TSENTRY_SERVICE_VERSION_CURRENT, requestorUserName); + try { + TSentryExportMappingDataResponse response = client.export_sentry_mapping_data(request); + Status.throwIfNotOk(response.getStatus()); + TSentryMappingData tSentryMappingData = response.getMappingData(); + Map>> resultMap = Maps.newHashMap(); + resultMap.put(PolicyFileConstants.GROUPS, tSentryMappingData.getGroupRolesMap()); + resultMap.put(PolicyFileConstants.ROLES, + convertRolePrivilegesMapForPolicyFile(tSentryMappingData.getRolePrivilegesMap())); + return resultMap; + } catch (TException e) { + throw new SentryUserException(THRIFT_EXCEPTION_MESSAGE, e); + } + } + + // convert the mapping data for [roleName,privilege] from TSentryMappingData.RolePrivilegesMap to + // map structure + private Map> convertRolePrivilegesMapForPolicyFile( + Map> rolePrivilegesMap) { + Map> rolePrivilegesMapForFile = Maps.newHashMap(); + if (rolePrivilegesMap != null) { + for (Map.Entry> entry : rolePrivilegesMap.entrySet()) { + Set tempSentryPrivileges = entry.getValue(); + Set tempStrPrivileges = Sets.newHashSet(); + for (TSentryPrivilege tSentryPrivilege : tempSentryPrivileges) { + // convert TSentryPrivilege to privilege in string + String privilegeStr = SentryServiceUtil.convertTSentryPrivilegeToStr(tSentryPrivilege); + if (!StringUtils.isEmpty(privilegeStr)) { + tempStrPrivileges.add(privilegeStr); + } + } + rolePrivilegesMapForFile.put(entry.getKey(), tempStrPrivileges); + } + } + return rolePrivilegesMapForFile; + } } \ No newline at end of file diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryPolicyStoreProcessor.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryPolicyStoreProcessor.java index b4c49da1d..8881d8278 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryPolicyStoreProcessor.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryPolicyStoreProcessor.java @@ -18,26 +18,16 @@ package org.apache.sentry.provider.db.service.thrift; -import java.io.IOException; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; -import java.util.HashMap; import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; import java.util.regex.Pattern; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import com.codahale.metrics.Timer; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.Table; import org.apache.sentry.SentryUserException; import org.apache.sentry.core.model.db.AccessConstants; import org.apache.sentry.provider.common.GroupMappingService; @@ -47,6 +37,7 @@ import org.apache.sentry.provider.db.SentryNoSuchObjectException; import org.apache.sentry.provider.db.SentryPolicyStorePlugin; import org.apache.sentry.provider.db.SentryPolicyStorePlugin.SentryPluginException; +import org.apache.sentry.provider.db.SentryThriftAPIMismatchException; import org.apache.sentry.provider.db.log.entity.JsonLogEntity; import org.apache.sentry.provider.db.log.entity.JsonLogEntityFactory; import org.apache.sentry.provider.db.log.util.Constants; @@ -55,10 +46,9 @@ import org.apache.sentry.provider.db.service.persistent.SentryStore; import org.apache.sentry.provider.db.service.persistent.ServiceRegister; import org.apache.sentry.provider.db.service.thrift.PolicyStoreConstants.PolicyStoreServerConfig; +import org.apache.sentry.service.thrift.ServiceConstants; import org.apache.sentry.service.thrift.ServiceConstants.ConfUtilties; -import org.apache.sentry.service.thrift.ServiceConstants.ClientConfig; import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; -import org.apache.sentry.service.thrift.ProcessorFactory; import org.apache.sentry.service.thrift.ServiceConstants.ThriftConstants; import org.apache.sentry.service.thrift.Status; import org.apache.sentry.service.thrift.TSentryResponseStatus; @@ -66,6 +56,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.codahale.metrics.Timer; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.base.Splitter; @@ -101,9 +92,9 @@ public SentryPolicyStoreProcessor(String name, Configuration conf) throws Except this.notificationHandlerInvoker = new NotificationHandlerInvoker(conf, createHandlers(conf)); isReady = false; - if(conf.getBoolean(ServerConfig.SENTRY_HA_ENABLED, - ServerConfig.SENTRY_HA_ENABLED_DEFAULT)){ - haContext = HAContext.getHAContext(conf); + if (conf.getBoolean(ServerConfig.SENTRY_HA_ENABLED, + ServerConfig.SENTRY_HA_ENABLED_DEFAULT)) { + haContext = HAContext.getHAServerContext(conf); sentryStore = new SentryStore(conf); ServiceRegister reg = new ServiceRegister(haContext); reg.regService(conf.get(ServerConfig.RPC_ADDRESS), @@ -139,7 +130,7 @@ private void initMetrics() { sentryMetrics.addSentryStoreGauges(sentryStore); String sentryReporting = conf.get(ServerConfig.SENTRY_REPORTER); - if( sentryReporting != null) { + if (sentryReporting != null) { SentryMetrics.Reporting reporting; try { reporting = SentryMetrics.Reporting.valueOf(sentryReporting.toUpperCase()); @@ -160,6 +151,7 @@ public void stop() { try { haContext.getCuratorFramework().close(); } catch (Exception e) { + LOGGER.warn("Error in stopping processor", e); } } } @@ -215,7 +207,8 @@ private boolean inAdminGroups(Set requestorGroups) { requestorGroups = toTrimedLower(requestorGroups); if (Sets.intersection(adminGroups, requestorGroups).isEmpty()) { return false; - } else return true; + } + return true; } private void authorize(String requestorUser, Set requestorGroups) throws SentryAccessDeniedException { @@ -233,6 +226,7 @@ public TCreateSentryRoleResponse create_sentry_role( final Timer.Context timerContext = sentryMetrics.createRoleTimer.time(); TCreateSentryRoleResponse response = new TCreateSentryRoleResponse(); try { + validateClientVersion(request.getProtocol_version()); authorize(request.getRequestorUserName(), getRequestorGroups(request.getRequestorUserName())); CommitContext commitContext = sentryStore.createSentryRole(request.getRoleName()); @@ -246,6 +240,9 @@ public TCreateSentryRoleResponse create_sentry_role( } catch (SentryAccessDeniedException e) { LOGGER.error(e.getMessage(), e); response.setStatus(Status.AccessDenied(e.getMessage(), e)); + } catch (SentryThriftAPIMismatchException e) { + LOGGER.error(e.getMessage(), e); + response.setStatus(Status.THRIFT_VERSION_MISMATCH(e.getMessage(), e)); } catch (Exception e) { String msg = "Unknown error for request: " + request + ", message: " + e.getMessage(); LOGGER.error(msg, e); @@ -254,8 +251,14 @@ public TCreateSentryRoleResponse create_sentry_role( timerContext.stop(); } - AUDIT_LOGGER.info(JsonLogEntityFactory.getInstance().createJsonLogEntity( - request, response, conf).toJsonFormatLog()); + try { + AUDIT_LOGGER.info(JsonLogEntityFactory.getInstance() + .createJsonLogEntity(request, response, conf).toJsonFormatLog()); + } catch (Exception e) { + // if any exception, log the exception. + String msg = "Error creating audit log for create role: " + e.getMessage(); + LOGGER.error(msg, e); + } return response; } @@ -266,6 +269,7 @@ public TCreateSentryRoleResponse create_sentry_role( TAlterSentryRoleGrantPrivilegeResponse response = new TAlterSentryRoleGrantPrivilegeResponse(); try { + validateClientVersion(request.getProtocol_version()); // There should only one field be set if ( !(request.isSetPrivileges()^request.isSetPrivilege()) ) { throw new SentryUserException("SENTRY API version is not right!"); @@ -288,7 +292,7 @@ public TCreateSentryRoleResponse create_sentry_role( plugin.onAlterSentryRoleGrantPrivilege(request); } } catch (SentryNoSuchObjectException e) { - String msg = "Role: " + request.getRoleName() + " doesn't exist."; + String msg = "Role: " + request.getRoleName() + " doesn't exist"; LOGGER.error(msg, e); response.setStatus(Status.NoSuchObject(msg, e)); } catch (SentryInvalidInputException e) { @@ -298,6 +302,9 @@ public TCreateSentryRoleResponse create_sentry_role( } catch (SentryAccessDeniedException e) { LOGGER.error(e.getMessage(), e); response.setStatus(Status.AccessDenied(e.getMessage(), e)); + } catch (SentryThriftAPIMismatchException e) { + LOGGER.error(e.getMessage(), e); + response.setStatus(Status.THRIFT_VERSION_MISMATCH(e.getMessage(), e)); } catch (Exception e) { String msg = "Unknown error for request: " + request + ", message: " + e.getMessage(); LOGGER.error(msg, e); @@ -306,10 +313,16 @@ public TCreateSentryRoleResponse create_sentry_role( timerContext.stop(); } - Set jsonLogEntitys = JsonLogEntityFactory.getInstance().createJsonLogEntitys( - request, response, conf); - for (JsonLogEntity jsonLogEntity : jsonLogEntitys) { - AUDIT_LOGGER.info(jsonLogEntity.toJsonFormatLog()); + try { + Set jsonLogEntitys = JsonLogEntityFactory.getInstance().createJsonLogEntitys( + request, response, conf); + for (JsonLogEntity jsonLogEntity : jsonLogEntitys) { + AUDIT_LOGGER.info(jsonLogEntity.toJsonFormatLog()); + } + } catch (Exception e) { + // if any exception, log the exception. + String msg = "Error creating audit log for grant privilege to role: " + e.getMessage(); + LOGGER.error(msg, e); } return response; } @@ -320,6 +333,7 @@ public TCreateSentryRoleResponse create_sentry_role( final Timer.Context timerContext = sentryMetrics.revokeTimer.time(); TAlterSentryRoleRevokePrivilegeResponse response = new TAlterSentryRoleRevokePrivilegeResponse(); try { + validateClientVersion(request.getProtocol_version()); // There should only one field be set if ( !(request.isSetPrivileges()^request.isSetPrivilege()) ) { throw new SentryUserException("SENTRY API version is not right!"); @@ -363,6 +377,9 @@ public TCreateSentryRoleResponse create_sentry_role( } catch (SentryAccessDeniedException e) { LOGGER.error(e.getMessage(), e); response.setStatus(Status.AccessDenied(e.getMessage(), e)); + } catch (SentryThriftAPIMismatchException e) { + LOGGER.error(e.getMessage(), e); + response.setStatus(Status.THRIFT_VERSION_MISMATCH(e.getMessage(), e)); } catch (Exception e) { String msg = "Unknown error for request: " + request + ", message: " + e.getMessage(); LOGGER.error(msg, e); @@ -371,10 +388,16 @@ public TCreateSentryRoleResponse create_sentry_role( timerContext.stop(); } - Set jsonLogEntitys = JsonLogEntityFactory.getInstance().createJsonLogEntitys( - request, response, conf); - for (JsonLogEntity jsonLogEntity : jsonLogEntitys) { - AUDIT_LOGGER.info(jsonLogEntity.toJsonFormatLog()); + try { + Set jsonLogEntitys = JsonLogEntityFactory.getInstance().createJsonLogEntitys( + request, response, conf); + for (JsonLogEntity jsonLogEntity : jsonLogEntitys) { + AUDIT_LOGGER.info(jsonLogEntity.toJsonFormatLog()); + } + } catch (Exception e) { + // if any exception, log the exception. + String msg = "Error creating audit log for revoke privilege from role: " + e.getMessage(); + LOGGER.error(msg, e); } return response; } @@ -386,6 +409,7 @@ public TDropSentryRoleResponse drop_sentry_role( TDropSentryRoleResponse response = new TDropSentryRoleResponse(); TSentryResponseStatus status; try { + validateClientVersion(request.getProtocol_version()); authorize(request.getRequestorUserName(), getRequestorGroups(request.getRequestorUserName())); CommitContext commitContext = sentryStore.dropSentryRole(request.getRoleName()); @@ -396,12 +420,15 @@ public TDropSentryRoleResponse drop_sentry_role( plugin.onDropSentryRole(request); } } catch (SentryNoSuchObjectException e) { - String msg = "Role :" + request + " does not exist."; + String msg = "Role :" + request + " doesn't exist"; LOGGER.error(msg, e); response.setStatus(Status.NoSuchObject(msg, e)); } catch (SentryAccessDeniedException e) { LOGGER.error(e.getMessage(), e); response.setStatus(Status.AccessDenied(e.getMessage(), e)); + } catch (SentryThriftAPIMismatchException e) { + LOGGER.error(e.getMessage(), e); + response.setStatus(Status.THRIFT_VERSION_MISMATCH(e.getMessage(), e)); } catch (Exception e) { String msg = "Unknown error for request: " + request + ", message: " + e.getMessage(); LOGGER.error(msg, e); @@ -410,8 +437,14 @@ public TDropSentryRoleResponse drop_sentry_role( timerContext.stop(); } - AUDIT_LOGGER.info(JsonLogEntityFactory.getInstance().createJsonLogEntity( - request, response, conf).toJsonFormatLog()); + try { + AUDIT_LOGGER.info(JsonLogEntityFactory.getInstance() + .createJsonLogEntity(request, response, conf).toJsonFormatLog()); + } catch (Exception e) { + // if any exception, log the exception. + String msg = "Error creating audit log for drop role: " + e.getMessage(); + LOGGER.error(msg, e); + } return response; } @@ -421,6 +454,7 @@ public TAlterSentryRoleAddGroupsResponse alter_sentry_role_add_groups( final Timer.Context timerContext = sentryMetrics.grantRoleTimer.time(); TAlterSentryRoleAddGroupsResponse response = new TAlterSentryRoleAddGroupsResponse(); try { + validateClientVersion(request.getProtocol_version()); authorize(request.getRequestorUserName(), getRequestorGroups(request.getRequestorUserName())); CommitContext commitContext = sentryStore.alterSentryRoleAddGroups(request.getRequestorUserName(), @@ -432,12 +466,15 @@ public TAlterSentryRoleAddGroupsResponse alter_sentry_role_add_groups( plugin.onAlterSentryRoleAddGroups(request); } } catch (SentryNoSuchObjectException e) { - String msg = "Role: " + request + " does not exist."; + String msg = "Role: " + request + " doesn't exist"; LOGGER.error(msg, e); response.setStatus(Status.NoSuchObject(msg, e)); } catch (SentryAccessDeniedException e) { LOGGER.error(e.getMessage(), e); response.setStatus(Status.AccessDenied(e.getMessage(), e)); + } catch (SentryThriftAPIMismatchException e) { + LOGGER.error(e.getMessage(), e); + response.setStatus(Status.THRIFT_VERSION_MISMATCH(e.getMessage(), e)); } catch (Exception e) { String msg = "Unknown error for request: " + request + ", message: " + e.getMessage(); LOGGER.error(msg, e); @@ -446,8 +483,14 @@ public TAlterSentryRoleAddGroupsResponse alter_sentry_role_add_groups( timerContext.stop(); } - AUDIT_LOGGER.info(JsonLogEntityFactory.getInstance().createJsonLogEntity( - request, response, conf).toJsonFormatLog()); + try { + AUDIT_LOGGER.info(JsonLogEntityFactory.getInstance() + .createJsonLogEntity(request, response, conf).toJsonFormatLog()); + } catch (Exception e) { + // if any exception, log the exception. + String msg = "Error creating audit log for add role to group: " + e.getMessage(); + LOGGER.error(msg, e); + } return response; } @@ -457,6 +500,7 @@ public TAlterSentryRoleDeleteGroupsResponse alter_sentry_role_delete_groups( final Timer.Context timerContext = sentryMetrics.revokeRoleTimer.time(); TAlterSentryRoleDeleteGroupsResponse response = new TAlterSentryRoleDeleteGroupsResponse(); try { + validateClientVersion(request.getProtocol_version()); authorize(request.getRequestorUserName(), getRequestorGroups(request.getRequestorUserName())); CommitContext commitContext = sentryStore.alterSentryRoleDeleteGroups(request.getRoleName(), @@ -474,6 +518,9 @@ public TAlterSentryRoleDeleteGroupsResponse alter_sentry_role_delete_groups( } catch (SentryAccessDeniedException e) { LOGGER.error(e.getMessage(), e); response.setStatus(Status.AccessDenied(e.getMessage(), e)); + } catch (SentryThriftAPIMismatchException e) { + LOGGER.error(e.getMessage(), e); + response.setStatus(Status.THRIFT_VERSION_MISMATCH(e.getMessage(), e)); } catch (Exception e) { String msg = "Unknown error adding groups to role: " + request; LOGGER.error(msg, e); @@ -482,8 +529,14 @@ public TAlterSentryRoleDeleteGroupsResponse alter_sentry_role_delete_groups( timerContext.stop(); } - AUDIT_LOGGER.info(JsonLogEntityFactory.getInstance().createJsonLogEntity( - request, response, conf).toJsonFormatLog()); + try { + AUDIT_LOGGER.info(JsonLogEntityFactory.getInstance() + .createJsonLogEntity(request, response, conf).toJsonFormatLog()); + } catch (Exception e) { + // if any exception, log the exception. + String msg = "Error creating audit log for delete role from group: " + e.getMessage(); + LOGGER.error(msg, e); + } return response; } @@ -497,6 +550,7 @@ public TListSentryRolesResponse list_sentry_roles_by_group( String subject = request.getRequestorUserName(); boolean checkAllGroups = false; try { + validateClientVersion(request.getProtocol_version()); Set groups = getRequestorGroups(subject); // Don't check admin permissions for listing requestor's own roles if (AccessConstants.ALL.equalsIgnoreCase(request.getGroupName())) { @@ -517,12 +571,15 @@ public TListSentryRolesResponse list_sentry_roles_by_group( response.setStatus(Status.OK()); } catch (SentryNoSuchObjectException e) { response.setRoles(roleSet); - String msg = "Role: " + request + " couldn't be retrieved."; + String msg = "Request: " + request + " couldn't be completed, message: " + e.getMessage(); LOGGER.error(msg, e); response.setStatus(Status.NoSuchObject(msg, e)); } catch (SentryAccessDeniedException e) { LOGGER.error(e.getMessage(), e); response.setStatus(Status.AccessDenied(e.getMessage(), e)); + } catch (SentryThriftAPIMismatchException e) { + LOGGER.error(e.getMessage(), e); + response.setStatus(Status.THRIFT_VERSION_MISMATCH(e.getMessage(), e)); } catch (Exception e) { String msg = "Unknown error for request: " + request + ", message: " + e.getMessage(); LOGGER.error(msg, e); @@ -542,6 +599,7 @@ public TListSentryPrivilegesResponse list_sentry_privileges_by_role( Set privilegeSet = new HashSet(); String subject = request.getRequestorUserName(); try { + validateClientVersion(request.getProtocol_version()); Set groups = getRequestorGroups(subject); Boolean admin = inAdminGroups(groups); if(!admin) { @@ -566,6 +624,9 @@ public TListSentryPrivilegesResponse list_sentry_privileges_by_role( } catch (SentryAccessDeniedException e) { LOGGER.error(e.getMessage(), e); response.setStatus(Status.AccessDenied(e.getMessage(), e)); + } catch (SentryThriftAPIMismatchException e) { + LOGGER.error(e.getMessage(), e); + response.setStatus(Status.THRIFT_VERSION_MISMATCH(e.getMessage(), e)); } catch (Exception e) { String msg = "Unknown error for request: " + request + ", message: " + e.getMessage(); LOGGER.error(msg, e); @@ -587,24 +648,27 @@ public TListSentryPrivilegesForProviderResponse list_sentry_privileges_for_provi TListSentryPrivilegesForProviderResponse response = new TListSentryPrivilegesForProviderResponse(); response.setPrivileges(new HashSet()); try { + validateClientVersion(request.getProtocol_version()); Set privilegesForProvider = sentryStore.listSentryPrivilegesForProvider( request.getGroups(), request.getRoleSet(), request.getAuthorizableHierarchy()); response.setPrivileges(privilegesForProvider); - if (((privilegesForProvider == null)||(privilegesForProvider.size() == 0))&&(request.getAuthorizableHierarchy() != null)) { - if (sentryStore.hasAnyServerPrivileges( - request.getGroups(), request.getRoleSet(), request.getAuthorizableHierarchy().getServer())) { - - // REQUIRED for ensuring 'default' Db is accessible by any user - // with privileges to atleast 1 object with the specific server as root - - // Need some way to specify that even though user has no privilege - // For the specific AuthorizableHierarchy.. he has privilege on - // atleast 1 object in the server hierarchy - HashSet serverPriv = Sets.newHashSet("server=+"); - response.setPrivileges(serverPriv); - } + if (privilegesForProvider == null || privilegesForProvider.size() == 0 && request.getAuthorizableHierarchy() != null + && sentryStore.hasAnyServerPrivileges( + request.getGroups(), request.getRoleSet(), request.getAuthorizableHierarchy().getServer())) { + + // REQUIRED for ensuring 'default' Db is accessible by any user + // with privileges to atleast 1 object with the specific server as root + + // Need some way to specify that even though user has no privilege + // For the specific AuthorizableHierarchy.. he has privilege on + // atleast 1 object in the server hierarchy + HashSet serverPriv = Sets.newHashSet("server=+"); + response.setPrivileges(serverPriv); } response.setStatus(Status.OK()); + } catch (SentryThriftAPIMismatchException e) { + LOGGER.error(e.getMessage(), e); + response.setStatus(Status.THRIFT_VERSION_MISMATCH(e.getMessage(), e)); } catch (Exception e) { String msg = "Unknown error for request: " + request + ", message: " + e.getMessage(); LOGGER.error(msg, e); @@ -660,6 +724,7 @@ public TDropPrivilegesResponse drop_sentry_privilege( final Timer.Context timerContext = sentryMetrics.dropPrivilegeTimer.time(); TDropPrivilegesResponse response = new TDropPrivilegesResponse(); try { + validateClientVersion(request.getProtocol_version()); authorize(request.getRequestorUserName(), adminGroups); sentryStore.dropPrivilege(request.getAuthorizable()); for (SentryPolicyStorePlugin plugin : sentryPlugins) { @@ -669,6 +734,9 @@ public TDropPrivilegesResponse drop_sentry_privilege( } catch (SentryAccessDeniedException e) { LOGGER.error(e.getMessage(), e); response.setStatus(Status.AccessDenied(e.getMessage(), e)); + } catch (SentryThriftAPIMismatchException e) { + LOGGER.error(e.getMessage(), e); + response.setStatus(Status.THRIFT_VERSION_MISMATCH(e.getMessage(), e)); } catch (Exception e) { String msg = "Unknown error for request: " + request + ", message: " + e.getMessage(); @@ -686,6 +754,7 @@ public TRenamePrivilegesResponse rename_sentry_privilege( final Timer.Context timerContext = sentryMetrics.renamePrivilegeTimer.time(); TRenamePrivilegesResponse response = new TRenamePrivilegesResponse(); try { + validateClientVersion(request.getProtocol_version()); authorize(request.getRequestorUserName(), adminGroups); sentryStore.renamePrivilege(request.getOldAuthorizable(), request.getNewAuthorizable()); @@ -696,6 +765,9 @@ public TRenamePrivilegesResponse rename_sentry_privilege( } catch (SentryAccessDeniedException e) { LOGGER.error(e.getMessage(), e); response.setStatus(Status.AccessDenied(e.getMessage(), e)); + } catch (SentryThriftAPIMismatchException e) { + LOGGER.error(e.getMessage(), e); + response.setStatus(Status.THRIFT_VERSION_MISMATCH(e.getMessage(), e)); } catch (Exception e) { String msg = "Unknown error for request: " + request + ", message: " + e.getMessage(); @@ -717,6 +789,7 @@ public TListSentryPrivilegesByAuthResponse list_sentry_privileges_by_authorizabl Set requestedGroups = request.getGroups(); TSentryActiveRoleSet requestedRoleSet = request.getRoleSet(); try { + validateClientVersion(request.getProtocol_version()); Set memberGroups = getRequestorGroups(subject); if(!inAdminGroups(memberGroups)) { // disallow non-admin to lookup groups that they are not part of @@ -757,6 +830,9 @@ public TListSentryPrivilegesByAuthResponse list_sentry_privileges_by_authorizabl } catch (SentryAccessDeniedException e) { LOGGER.error(e.getMessage(), e); response.setStatus(Status.AccessDenied(e.getMessage(), e)); + } catch (SentryThriftAPIMismatchException e) { + LOGGER.error(e.getMessage(), e); + response.setStatus(Status.THRIFT_VERSION_MISMATCH(e.getMessage(), e)); } catch (Exception e) { String msg = "Unknown error for request: " + request + ", message: " + e.getMessage(); @@ -786,6 +862,12 @@ public TSentryConfigValueResponse get_sentry_config_value( TSentryConfigValueResponse response = new TSentryConfigValueResponse(); String attr = request.getPropertyName(); + try { + validateClientVersion(request.getProtocol_version()); + } catch (SentryThriftAPIMismatchException e) { + LOGGER.error(e.getMessage(), e); + response.setStatus(Status.THRIFT_VERSION_MISMATCH(e.getMessage(), e)); + } // Only allow config parameters like... if (!Pattern.matches(requirePattern, attr) || Pattern.matches(excludePattern, attr)) { @@ -801,4 +883,68 @@ public TSentryConfigValueResponse get_sentry_config_value( response.setStatus(Status.OK()); return response; } + + @VisibleForTesting + static void validateClientVersion(int protocol_version) throws SentryThriftAPIMismatchException { + if (ServiceConstants.ThriftConstants.TSENTRY_SERVICE_VERSION_CURRENT != protocol_version) { + String msg = "Sentry thrift API protocol version mismatch: Client thrift version " + + "is: " + protocol_version + " , server thrift verion " + + "is " + ThriftConstants.TSENTRY_SERVICE_VERSION_CURRENT; + throw new SentryThriftAPIMismatchException(msg); + } + } + + // get the sentry mapping data and return the data with map structure + @Override + public TSentryExportMappingDataResponse export_sentry_mapping_data( + TSentryExportMappingDataRequest request) throws TException { + TSentryExportMappingDataResponse response = new TSentryExportMappingDataResponse(); + try { + String requestor = request.getRequestorUserName(); + Set memberGroups = getRequestorGroups(requestor); + if (!inAdminGroups(memberGroups)) { + // disallow non-admin to import the metadata of sentry + throw new SentryAccessDeniedException("Access denied to " + requestor + + " for export the metadata of sentry."); + } + TSentryMappingData tSentryMappingData = new TSentryMappingData(); + tSentryMappingData.setGroupRolesMap(sentryStore.getGroupNameRoleNamesMap()); + tSentryMappingData.setRolePrivilegesMap(sentryStore.getRoleNameTPrivilegesMap()); + response.setMappingData(tSentryMappingData); + response.setStatus(Status.OK()); + } catch (Exception e) { + String msg = "Unknown error for request: " + request + ", message: " + e.getMessage(); + LOGGER.error(msg, e); + response.setMappingData(new TSentryMappingData()); + response.setStatus(Status.RuntimeError(msg, e)); + } + return response; + } + + // import the sentry mapping data + @Override + public TSentryImportMappingDataResponse import_sentry_mapping_data( + TSentryImportMappingDataRequest request) throws TException { + TSentryImportMappingDataResponse response = new TSentryImportMappingDataResponse(); + try { + String requestor = request.getRequestorUserName(); + Set memberGroups = getRequestorGroups(requestor); + if (!inAdminGroups(memberGroups)) { + // disallow non-admin to import the metadata of sentry + throw new SentryAccessDeniedException("Access denied to " + requestor + + " for import the metadata of sentry."); + } + sentryStore.importSentryMetaData(request.getMappingData(), request.isOverwriteRole()); + response.setStatus(Status.OK()); + } catch (SentryInvalidInputException e) { + String msg = "Invalid input privilege object"; + LOGGER.error(msg, e); + response.setStatus(Status.InvalidInput(msg, e)); + } catch (Exception e) { + String msg = "Unknown error for request: " + request + ", message: " + e.getMessage(); + LOGGER.error(msg, e); + response.setStatus(Status.RuntimeError(msg, e)); + } + return response; + } } diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryProcessorWrapper.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryProcessorWrapper.java index 6f3508de6..a5f11a98f 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryProcessorWrapper.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryProcessorWrapper.java @@ -18,71 +18,20 @@ package org.apache.sentry.provider.db.service.thrift; -import java.net.Socket; - -import org.apache.sentry.provider.db.log.util.CommandUtil; import org.apache.thrift.TException; import org.apache.thrift.protocol.TProtocol; -import org.apache.thrift.transport.TSaslClientTransport; -import org.apache.thrift.transport.TSaslServerTransport; -import org.apache.thrift.transport.TSocket; -import org.apache.thrift.transport.TTransport; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.base.Preconditions; public class SentryProcessorWrapper extends SentryPolicyService.Processor { - private static final Logger LOGGER = LoggerFactory.getLogger(SentryProcessorWrapper.class); - public SentryProcessorWrapper(I iface) { super(iface); } @Override public boolean process(TProtocol in, TProtocol out) throws TException { - setIpAddress(in); - setImpersonator(in); + ThriftUtil.setIpAddress(in); + ThriftUtil.setImpersonator(in); return super.process(in, out); } - - private void setImpersonator(final TProtocol in) { - TTransport transport = in.getTransport(); - if (transport instanceof TSaslServerTransport) { - String impersonator = ((TSaslServerTransport) transport).getSaslServer().getAuthorizationID(); - CommandUtil.setImpersonator(impersonator); - } - } - - private void setIpAddress(final TProtocol in) { - TTransport transport = in.getTransport(); - TSocket tSocket = getUnderlyingSocketFromTransport(transport); - if (tSocket != null) { - setIpAddress(tSocket.getSocket()); - } else { - LOGGER.warn("Unknown Transport, cannot determine ipAddress"); - } - } - - private void setIpAddress(Socket socket) { - CommandUtil.setIpAddress(socket.getInetAddress().toString()); - } - - /** - * Returns the underlying TSocket from the transport, or null of the transport type is - * unknown. - */ - private TSocket getUnderlyingSocketFromTransport(TTransport transport) { - Preconditions.checkNotNull(transport); - if (transport instanceof TSaslServerTransport) { - return (TSocket) ((TSaslServerTransport) transport).getUnderlyingTransport(); - } else if (transport instanceof TSaslClientTransport) { - return (TSocket) ((TSaslClientTransport) transport).getUnderlyingTransport(); - } else if (transport instanceof TSocket) { - return (TSocket) transport; - } - return null; - } } diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryWebServer.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryWebServer.java index 43f28ea91..1bdea2c55 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryWebServer.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryWebServer.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.util.EnumSet; +import java.net.URL; import java.util.EventListener; import java.util.HashMap; import java.util.Map; @@ -33,10 +34,15 @@ import org.apache.hadoop.security.authentication.server.AuthenticationFilter; import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; import org.eclipse.jetty.server.DispatcherType; +import org.eclipse.jetty.server.Handler; +import org.eclipse.jetty.server.handler.ContextHandler; +import org.eclipse.jetty.server.handler.ContextHandlerCollection; +import org.eclipse.jetty.server.handler.ResourceHandler; import org.eclipse.jetty.server.Server; import org.eclipse.jetty.servlet.FilterHolder; import org.eclipse.jetty.servlet.ServletContextHandler; import org.eclipse.jetty.servlet.ServletHolder; +import org.eclipse.jetty.util.resource.Resource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -45,6 +51,8 @@ public class SentryWebServer { private static final Logger LOGGER = LoggerFactory.getLogger(SentryWebServer.class); + private static final String RESOURCE_DIR = "/webapp"; + private static final String WELCOME_PAGE = "SentryService.html"; Server server; int port; @@ -53,13 +61,33 @@ public SentryWebServer(List listeners, int port, Configuration co this.port = port; server = new Server(port); ServletContextHandler servletContextHandler = new ServletContextHandler(); - ServletHolder servletHolder = new ServletHolder(AdminServlet.class); + ServletHolder servletHolder = new ServletHolder(AdminServlet.class); servletContextHandler.addServlet(servletHolder, "/*"); for(EventListener listener:listeners) { servletContextHandler.addEventListener(listener); } + ServletHolder confServletHolder = new ServletHolder(ConfServlet.class); + servletContextHandler.addServlet(confServletHolder, "/conf"); + servletContextHandler.getServletContext() + .setAttribute(ConfServlet.CONF_CONTEXT_ATTRIBUTE, conf); + + ResourceHandler resourceHandler = new ResourceHandler(); + resourceHandler.setDirectoriesListed(true); + URL url = this.getClass().getResource(RESOURCE_DIR); + try { + resourceHandler.setBaseResource(Resource.newResource(url.toString())); + } catch (IOException e) { + LOGGER.error("Got exception while setBaseResource for Sentry Service web UI", e); + } + resourceHandler.setWelcomeFiles(new String[]{WELCOME_PAGE}); + ContextHandler contextHandler= new ContextHandler(); + contextHandler.setHandler(resourceHandler); + + ContextHandlerCollection contextHandlerCollection = new ContextHandlerCollection(); + contextHandlerCollection.setHandlers(new Handler[]{contextHandler, servletContextHandler}); + String authMethod = conf.get(ServerConfig.SENTRY_WEB_SECURITY_TYPE); if (!ServerConfig.SENTRY_WEB_SECURITY_TYPE_NONE.equals(authMethod)) { /** @@ -71,7 +99,8 @@ public SentryWebServer(List listeners, int port, Configuration co FilterHolder filterHolder = servletContextHandler.addFilter(SentryAuthFilter.class, "/*", EnumSet.of(DispatcherType.REQUEST)); filterHolder.setInitParameters(loadWebAuthenticationConf(conf)); } - server.setHandler(servletContextHandler); + + server.setHandler(contextHandlerCollection); } public void start() throws Exception{ @@ -116,7 +145,7 @@ private static void validateConf(Configuration conf) { Preconditions.checkArgument(keytabFile.length() != 0, "Keytab File is not right."); try { UserGroupInformation.setConfiguration(conf); - String hostPrincipal = SecurityUtil.getServerPrincipal(principal, "0.0.0.0"); + String hostPrincipal = SecurityUtil.getServerPrincipal(principal, ServerConfig.RPC_ADDRESS_DEFAULT); UserGroupInformation.loginUserFromKeytab(hostPrincipal, keytabFile); } catch (IOException ex) { throw new IllegalArgumentException("Can't use Kerberos authentication, principal [" @@ -126,4 +155,4 @@ private static void validateConf(Configuration conf) { + principal + "] keytab [" + keytabFile + "]"); } } -} \ No newline at end of file +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/ThriftUtil.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/ThriftUtil.java new file mode 100644 index 000000000..a5d7ca911 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/ThriftUtil.java @@ -0,0 +1,108 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.provider.db.service.thrift; + +import org.apache.thrift.protocol.TProtocol; +import org.apache.thrift.transport.TSaslClientTransport; +import org.apache.thrift.transport.TSaslServerTransport; +import org.apache.thrift.transport.TSocket; +import org.apache.thrift.transport.TTransport; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Preconditions; + +public class ThriftUtil { + + private static final Logger LOGGER = LoggerFactory.getLogger(ThriftUtil.class); + + public static void setImpersonator(final TProtocol in) { + try { + TTransport transport = in.getTransport(); + if (transport instanceof TSaslServerTransport) { + String impersonator = ((TSaslServerTransport) transport).getSaslServer() + .getAuthorizationID(); + setImpersonator(impersonator); + } + } catch (Exception e) { + // If there has exception when get impersonator info, log the error information. + LOGGER.warn("There is an error when get the impersonator:" + e.getMessage()); + } + } + + public static void setIpAddress(final TProtocol in) { + try { + TTransport transport = in.getTransport(); + TSocket tSocket = getUnderlyingSocketFromTransport(transport); + if (tSocket != null) { + setIpAddress(tSocket.getSocket().getInetAddress().toString()); + } else { + LOGGER.warn("Unknown Transport, cannot determine ipAddress"); + } + } catch (Exception e) { + // If there has exception when get impersonator info, log the error information. + LOGGER.warn("There is an error when get the client's ip address:" + e.getMessage()); + } + } + + /** + * Returns the underlying TSocket from the transport, or null of the transport type is unknown. + */ + private static TSocket getUnderlyingSocketFromTransport(TTransport transport) { + Preconditions.checkNotNull(transport); + if (transport instanceof TSaslServerTransport) { + return (TSocket) ((TSaslServerTransport) transport).getUnderlyingTransport(); + } else if (transport instanceof TSaslClientTransport) { + return (TSocket) ((TSaslClientTransport) transport).getUnderlyingTransport(); + } else if (transport instanceof TSocket) { + return (TSocket) transport; + } + return null; + } + + private static ThreadLocal threadLocalIpAddress = new ThreadLocal() { + @Override + protected synchronized String initialValue() { + return ""; + } + }; + + public static void setIpAddress(String ipAddress) { + threadLocalIpAddress.set(ipAddress); + } + + public static String getIpAddress() { + return threadLocalIpAddress.get(); + } + + private static ThreadLocal threadLocalImpersonator = new ThreadLocal() { + @Override + protected synchronized String initialValue() { + return ""; + } + }; + + public static void setImpersonator(String impersonator) { + threadLocalImpersonator.set(impersonator); + } + + public static String getImpersonator() { + return threadLocalImpersonator.get(); + } +} \ No newline at end of file diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/SentrySchemaHelper.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/SentrySchemaHelper.java index e3e04f1b1..e5768c6d4 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/SentrySchemaHelper.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/SentrySchemaHelper.java @@ -34,58 +34,58 @@ public enum CommandType { COMMENT } - static final String DEFAUTL_DELIMITER = ";"; + String DEFAUTL_DELIMITER = ";"; /*** * Find the type of given command * @param dbCommand * @return */ - public boolean isPartialCommand(String dbCommand) throws IllegalArgumentException; + boolean isPartialCommand(String dbCommand) throws IllegalArgumentException; /** Parse the DB specific nesting format and extract the inner script name if any * @param dbCommand command from parent script * @return * @throws IllegalFormatException */ - public String getScriptName(String dbCommand) throws IllegalArgumentException; + String getScriptName(String dbCommand) throws IllegalArgumentException; /*** * Find if the given command is a nested script execution * @param dbCommand * @return */ - public boolean isNestedScript(String dbCommand); + boolean isNestedScript(String dbCommand); /*** * Find if the given command is should be passed to DB * @param dbCommand * @return */ - public boolean isNonExecCommand(String dbCommand); + boolean isNonExecCommand(String dbCommand); /*** * Get the SQL statement delimiter * @return */ - public String getDelimiter(); + String getDelimiter(); /*** * Clear any client specific tags * @return */ - public String cleanseCommand(String dbCommand); + String cleanseCommand(String dbCommand); /*** * Does the DB required table/column names quoted * @return */ - public boolean needsQuotedIdentifier(); + boolean needsQuotedIdentifier(); /*** * Set DB specific options if any * @param dbOps */ - public void setDbOpts(String dbOps); + void setDbOpts(String dbOps); } @@ -112,7 +112,7 @@ public boolean isPartialCommand(String dbCommand) throws IllegalArgumentExceptio @Override public boolean isNonExecCommand(String dbCommand) { - return (dbCommand.startsWith("--") || dbCommand.startsWith("#")); + return dbCommand.startsWith("--") || dbCommand.startsWith("#"); } @Override @@ -214,7 +214,7 @@ public String getDelimiter() { @Override public boolean isNonExecCommand(String dbCommand) { return super.isNonExecCommand(dbCommand) || - (dbCommand.startsWith("/*") && dbCommand.endsWith("*/")) || + dbCommand.startsWith("/*") && dbCommand.endsWith("*/") || dbCommand.startsWith(DELIMITER_TOKEN); } @@ -255,10 +255,9 @@ public boolean needsQuotedIdentifier() { @Override public boolean isNonExecCommand(String dbCommand) { // Skip "standard_conforming_strings" command which is not supported in older postgres - if (POSTGRES_SKIP_STANDARD_STRING.equalsIgnoreCase(getDbOpts())) { - if (dbCommand.startsWith(POSTGRES_STRING_COMMAND_FILTER) || dbCommand.startsWith(POSTGRES_STRING_CLIENT_ENCODING)) { - return true; - } + if (POSTGRES_SKIP_STANDARD_STRING.equalsIgnoreCase(getDbOpts()) + && (dbCommand.startsWith(POSTGRES_STRING_COMMAND_FILTER) || dbCommand.startsWith(POSTGRES_STRING_CLIENT_ENCODING))) { + return true; } return super.isNonExecCommand(dbCommand); } diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/SentrySchemaTool.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/SentrySchemaTool.java index 69086d202..d974d7b90 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/SentrySchemaTool.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/SentrySchemaTool.java @@ -49,6 +49,7 @@ import org.apache.sentry.Command; import org.apache.sentry.SentryUserException; import org.apache.sentry.provider.db.service.persistent.SentryStoreSchemaInfo; +import org.apache.sentry.provider.db.service.thrift.SentryConfigurationException; import org.apache.sentry.provider.db.tools.SentrySchemaHelper.NestedScriptParser; import org.apache.sentry.service.thrift.SentryService; import org.apache.sentry.service.thrift.ServiceConstants; @@ -68,12 +69,12 @@ public class SentrySchemaTool { private final SentryStoreSchemaInfo SentryStoreSchemaInfo; public SentrySchemaTool(Configuration sentryConf, String dbType) - throws SentryUserException { + throws SentryUserException, IOException { this(System.getenv("SENTRY_HOME") + SENTRY_SCRIP_DIR, sentryConf, dbType); } public SentrySchemaTool(String sentryScripPath, Configuration sentryConf, - String dbType) throws SentryUserException { + String dbType) throws SentryUserException, IOException { if (sentryScripPath == null || sentryScripPath.isEmpty()) { throw new SentryUserException("No Sentry script dir provided"); } @@ -83,8 +84,16 @@ public SentrySchemaTool(String sentryScripPath, Configuration sentryConf, dbType); userName = sentryConf.get(ServiceConstants.ServerConfig.SENTRY_STORE_JDBC_USER, ServiceConstants.ServerConfig.SENTRY_STORE_JDBC_USER_DEFAULT); - passWord = sentryConf.get(ServiceConstants.ServerConfig.SENTRY_STORE_JDBC_PASS, - ServiceConstants.ServerConfig.SENTRY_STORE_JDBC_PASS_DEFAULT); + //Password will be read from Credential provider specified using property + // CREDENTIAL_PROVIDER_PATH("hadoop.security.credential.provider.path" in sentry-site.xml + // it falls back to reading directly from sentry-site.xml + char[] passTmp = sentryConf.getPassword(ServiceConstants.ServerConfig.SENTRY_STORE_JDBC_PASS); + if(passTmp != null) { + passWord = new String(passTmp); + } else { + throw new SentryConfigurationException("Error reading " + ServiceConstants.ServerConfig.SENTRY_STORE_JDBC_PASS); + } + try { connectionURL = getValidConfVar(ServiceConstants.ServerConfig.SENTRY_STORE_JDBC_URL); if(dbType.equalsIgnoreCase(SentrySchemaHelper.DB_DERBY)) { @@ -195,7 +204,7 @@ private Connection getConnectionToMetastore(boolean printInfo) System.out.println("Sentry store Connection Driver :\t " + driver); System.out.println("Sentry store connection User:\t " + userName); } - if ((userName == null) || userName.isEmpty()) { + if (userName == null || userName.isEmpty()) { throw new SentryUserException("UserName empty "); } try { @@ -510,11 +519,11 @@ public void run(String[] args) throws Exception { if (line.hasOption("dbType")) { dbType = line.getOptionValue("dbType"); - if ((!dbType.equalsIgnoreCase(SentrySchemaHelper.DB_DERBY) + if (!dbType.equalsIgnoreCase(SentrySchemaHelper.DB_DERBY) && !dbType.equalsIgnoreCase(SentrySchemaHelper.DB_MYSQL) && !dbType.equalsIgnoreCase(SentrySchemaHelper.DB_POSTGRACE) && !dbType.equalsIgnoreCase(SentrySchemaHelper.DB_ORACLE) - && !dbType.equalsIgnoreCase(SentrySchemaHelper.DB_DB2))) { + && !dbType.equalsIgnoreCase(SentrySchemaHelper.DB_DB2)) { System.err.println("Unsupported dbType " + dbType); printAndExit(cmdLineOptions); } diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/SentryShellCommon.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/SentryShellCommon.java new file mode 100644 index 000000000..6ddc1defe --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/SentryShellCommon.java @@ -0,0 +1,247 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.provider.db.tools; + +import com.google.common.annotations.VisibleForTesting; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.GnuParser; +import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.Option; +import org.apache.commons.cli.OptionGroup; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.commons.cli.Parser; +import org.apache.commons.lang.StringUtils; + +/** + * SentryShellCommon provides the function for parsing the argument. + * For hive model and generic model, child class should be implemented as a sentry admin tool. + */ +abstract public class SentryShellCommon { + + protected String roleName; + protected String groupName; + protected String privilegeStr; + protected String confPath; + // flag for the command + protected boolean isCreateRole = false; + protected boolean isDropRole = false; + protected boolean isAddRoleGroup = false; + protected boolean isDeleteRoleGroup = false; + protected boolean isGrantPrivilegeRole = false; + protected boolean isRevokePrivilegeRole = false; + protected boolean isListRole = false; + protected boolean isListPrivilege = false; + protected boolean isPrintHelp = false; + // flag for the parameter check + protected boolean roleNameRequired = false; + protected boolean groupNameRequired = false; + protected boolean privilegeStrRequired = false; + + public final static String OPTION_DESC_HELP = "Shell usage"; + public final static String OPTION_DESC_CONF = "sentry-site file path"; + public final static String OPTION_DESC_ROLE_NAME = "Role name"; + public final static String OPTION_DESC_GROUP_NAME = "Group name"; + public final static String OPTION_DESC_PRIVILEGE = "Privilege string"; + public final static String PREFIX_MESSAGE_MISSING_OPTION = "Missing required option: "; + + public final static String GROUP_SPLIT_CHAR = ","; + + /** + * parse arguments + * + *
+   *   -conf,--sentry_conf                              sentry config file path
+   *   -cr,--create_role            -r                  create role
+   *   -dr,--drop_role              -r                  drop role
+   *   -arg,--add_role_group        -r   -g  add role to group
+   *   -drg,--delete_role_group     -r   -g  delete role from group
+   *   -gpr,--grant_privilege_role  -r   -p  grant privilege to role
+   *   -rpr,--revoke_privilege_role -r   -p  revoke privilege from role
+   *   -lr,--list_role              -g                 list roles for group
+   *   -lp,--list_privilege         -r                  list privilege for role
+   *   -t,--type                                         the shell for hive model or generic model
+   * 
+ * + * @param args + */ + protected boolean parseArgs(String[] args) { + Options simpleShellOptions = new Options(); + + Option crOpt = new Option("cr", "create_role", false, "Create role"); + crOpt.setRequired(false); + + Option drOpt = new Option("dr", "drop_role", false, "Drop role"); + drOpt.setRequired(false); + + Option argOpt = new Option("arg", "add_role_group", false, "Add role to group"); + argOpt.setRequired(false); + + Option drgOpt = new Option("drg", "delete_role_group", false, "Delete role from group"); + drgOpt.setRequired(false); + + Option gprOpt = new Option("gpr", "grant_privilege_role", false, "Grant privilege to role"); + gprOpt.setRequired(false); + + Option rprOpt = new Option("rpr", "revoke_privilege_role", false, "Revoke privilege from role"); + rprOpt.setRequired(false); + + Option lrOpt = new Option("lr", "list_role", false, "List role"); + lrOpt.setRequired(false); + + Option lpOpt = new Option("lp", "list_privilege", false, "List privilege"); + lpOpt.setRequired(false); + + // required args group + OptionGroup simpleShellOptGroup = new OptionGroup(); + simpleShellOptGroup.addOption(crOpt); + simpleShellOptGroup.addOption(drOpt); + simpleShellOptGroup.addOption(argOpt); + simpleShellOptGroup.addOption(drgOpt); + simpleShellOptGroup.addOption(gprOpt); + simpleShellOptGroup.addOption(rprOpt); + simpleShellOptGroup.addOption(lrOpt); + simpleShellOptGroup.addOption(lpOpt); + simpleShellOptGroup.setRequired(true); + simpleShellOptions.addOptionGroup(simpleShellOptGroup); + + // optional args + Option pOpt = new Option("p", "privilege", true, OPTION_DESC_PRIVILEGE); + pOpt.setRequired(false); + simpleShellOptions.addOption(pOpt); + + Option gOpt = new Option("g", "groupname", true, OPTION_DESC_GROUP_NAME); + gOpt.setRequired(false); + simpleShellOptions.addOption(gOpt); + + Option rOpt = new Option("r", "rolename", true, OPTION_DESC_ROLE_NAME); + rOpt.setRequired(false); + simpleShellOptions.addOption(rOpt); + + // this argument should be parsed in the bin/sentryShell + Option tOpt = new Option("t", "type", true, "[hive|solr|sqoop|.....]"); + tOpt.setRequired(false); + simpleShellOptions.addOption(tOpt); + + // file path of sentry-site + Option sentrySitePathOpt = new Option("conf", "sentry_conf", true, OPTION_DESC_CONF); + sentrySitePathOpt.setRequired(true); + simpleShellOptions.addOption(sentrySitePathOpt); + + // help option + Option helpOpt = new Option("h", "help", false, OPTION_DESC_HELP); + helpOpt.setRequired(false); + simpleShellOptions.addOption(helpOpt); + + // this Options is parsed first for help option + Options helpOptions = new Options(); + helpOptions.addOption(helpOpt); + + try { + Parser parser = new GnuParser(); + + // parse help option first + CommandLine cmd = parser.parse(helpOptions, args, true); + for (Option opt : cmd.getOptions()) { + if (opt.getOpt().equals("h")) { + // get the help option, print the usage and exit + usage(simpleShellOptions); + return false; + } + } + + // without help option + cmd = parser.parse(simpleShellOptions, args); + + for (Option opt : cmd.getOptions()) { + if (opt.getOpt().equals("p")) { + privilegeStr = opt.getValue(); + } else if (opt.getOpt().equals("g")) { + groupName = opt.getValue(); + } else if (opt.getOpt().equals("r")) { + roleName = opt.getValue(); + } else if (opt.getOpt().equals("cr")) { + isCreateRole = true; + roleNameRequired = true; + } else if (opt.getOpt().equals("dr")) { + isDropRole = true; + roleNameRequired = true; + } else if (opt.getOpt().equals("arg")) { + isAddRoleGroup = true; + roleNameRequired = true; + groupNameRequired = true; + } else if (opt.getOpt().equals("drg")) { + isDeleteRoleGroup = true; + roleNameRequired = true; + groupNameRequired = true; + } else if (opt.getOpt().equals("gpr")) { + isGrantPrivilegeRole = true; + roleNameRequired = true; + privilegeStrRequired = true; + } else if (opt.getOpt().equals("rpr")) { + isRevokePrivilegeRole = true; + roleNameRequired = true; + privilegeStrRequired = true; + } else if (opt.getOpt().equals("lr")) { + isListRole = true; + } else if (opt.getOpt().equals("lp")) { + isListPrivilege = true; + roleNameRequired = true; + } else if (opt.getOpt().equals("conf")) { + confPath = opt.getValue(); + } + } + checkRequiredParameter(roleNameRequired, roleName, OPTION_DESC_ROLE_NAME); + checkRequiredParameter(groupNameRequired, groupName, OPTION_DESC_GROUP_NAME); + checkRequiredParameter(privilegeStrRequired, privilegeStr, OPTION_DESC_PRIVILEGE); + } catch (ParseException pe) { + System.out.println(pe.getMessage()); + usage(simpleShellOptions); + return false; + } + return true; + } + + private void checkRequiredParameter(boolean isRequired, String paramValue, String paramName) throws ParseException { + if (isRequired && StringUtils.isEmpty(paramValue)) { + throw new ParseException(PREFIX_MESSAGE_MISSING_OPTION + paramName); + } + } + + // print usage + private void usage(Options sentryOptions) { + HelpFormatter formatter = new HelpFormatter(); + formatter.printHelp("sentryShell", sentryOptions); + } + + // hive model and generic model should implement this method + public abstract void run() throws Exception; + + @VisibleForTesting + public boolean executeShell(String[] args) throws Exception { + boolean result = true; + if (parseArgs(args)) { + run(); + } else { + result = false; + } + return result; + } +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/SentryShellHive.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/SentryShellHive.java new file mode 100644 index 000000000..dc7f82944 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/SentryShellHive.java @@ -0,0 +1,98 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.provider.db.tools; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient; +import org.apache.sentry.provider.db.tools.command.hive.*; +import org.apache.sentry.service.thrift.SentryServiceClientFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * SentryShellHive is an admin tool, and responsible for the management of repository. + * The following function are supported: + * create role, drop role, add group to role, delete group from role, grant privilege to role, + * revoke privilege from role, list roles for group, list privilege for role. + */ +public class SentryShellHive extends SentryShellCommon { + + private static final Logger LOGGER = LoggerFactory.getLogger(SentryShellHive.class); + + public void run() throws Exception { + Command command = null; + SentryPolicyServiceClient client = SentryServiceClientFactory.create(getSentryConf()); + UserGroupInformation ugi = UserGroupInformation.getLoginUser(); + String requestorName = ugi.getShortUserName(); + + if (isCreateRole) { + command = new CreateRoleCmd(roleName); + } else if (isDropRole) { + command = new DropRoleCmd(roleName); + } else if (isAddRoleGroup) { + command = new GrantRoleToGroupsCmd(roleName, groupName); + } else if (isDeleteRoleGroup) { + command = new RevokeRoleFromGroupsCmd(roleName, groupName); + } else if (isGrantPrivilegeRole) { + command = new GrantPrivilegeToRoleCmd(roleName, privilegeStr); + } else if (isRevokePrivilegeRole) { + command = new RevokePrivilegeFromRoleCmd(roleName, privilegeStr); + } else if (isListRole) { + command = new ListRolesCmd(groupName); + } else if (isListPrivilege) { + command = new ListPrivilegesCmd(roleName); + } + + // check the requestor name + if (StringUtils.isEmpty(requestorName)) { + // The exception message will be recoreded in log file. + throw new Exception("The requestor name is empty."); + } + + if (command != null) { + command.execute(client, requestorName); + } + } + + private Configuration getSentryConf() { + Configuration conf = new Configuration(); + conf.addResource(new Path(confPath)); + return conf; + } + + public static void main(String[] args) throws Exception { + SentryShellHive sentryShell = new SentryShellHive(); + try { + sentryShell.executeShell(args); + } catch (Exception e) { + LOGGER.error(e.getMessage(), e); + Throwable current = e; + // find the first printable message; + while (current != null && current.getMessage() == null) { + current = current.getCause(); + } + System.out.println("The operation failed." + + (current.getMessage() == null ? "" : " Message: " + current.getMessage())); + } + } + +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/Command.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/Command.java new file mode 100644 index 000000000..79aed4971 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/Command.java @@ -0,0 +1,27 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.db.tools.command.hive; + +import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient; + +/** + * The interface for all admin commands, eg, CreateRoleCmd. + */ +public interface Command { + void execute(SentryPolicyServiceClient client, String requestorName) throws Exception; +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/CommandUtil.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/CommandUtil.java new file mode 100644 index 000000000..fa7fc6e5c --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/CommandUtil.java @@ -0,0 +1,113 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.db.tools.command.hive; + +import org.apache.commons.lang.StringUtils; +import org.apache.sentry.policy.common.KeyValue; +import org.apache.sentry.policy.common.PolicyConstants; +import org.apache.sentry.provider.common.PolicyFileConstants; +import org.apache.sentry.provider.db.service.thrift.TSentryGrantOption; +import org.apache.sentry.provider.db.service.thrift.TSentryPrivilege; +import org.apache.sentry.service.thrift.ServiceConstants; + +public class CommandUtil { + + public static final String SPLIT_CHAR = ","; + + // parse the privilege in String and get the TSentryPrivilege as result + public static TSentryPrivilege convertToTSentryPrivilege(String privilegeStr) throws Exception { + TSentryPrivilege tSentryPrivilege = new TSentryPrivilege(); + for (String authorizable : PolicyConstants.AUTHORIZABLE_SPLITTER.split(privilegeStr)) { + KeyValue tempKV = new KeyValue(authorizable); + String key = tempKV.getKey(); + String value = tempKV.getValue(); + + if (PolicyFileConstants.PRIVILEGE_SERVER_NAME.equalsIgnoreCase(key)) { + tSentryPrivilege.setServerName(value); + } else if (PolicyFileConstants.PRIVILEGE_DATABASE_NAME.equalsIgnoreCase(key)) { + tSentryPrivilege.setDbName(value); + } else if (PolicyFileConstants.PRIVILEGE_TABLE_NAME.equalsIgnoreCase(key)) { + tSentryPrivilege.setTableName(value); + } else if (PolicyFileConstants.PRIVILEGE_COLUMN_NAME.equalsIgnoreCase(key)) { + tSentryPrivilege.setColumnName(value); + } else if (PolicyFileConstants.PRIVILEGE_URI_NAME.equalsIgnoreCase(key)) { + tSentryPrivilege.setURI(value); + } else if (PolicyFileConstants.PRIVILEGE_ACTION_NAME.equalsIgnoreCase(key)) { + tSentryPrivilege.setAction(value); + } else if (PolicyFileConstants.PRIVILEGE_GRANT_OPTION_NAME.equalsIgnoreCase(key)) { + TSentryGrantOption grantOption = "true".equalsIgnoreCase(value) ? TSentryGrantOption.TRUE + : TSentryGrantOption.FALSE; + tSentryPrivilege.setGrantOption(grantOption); + } + } + tSentryPrivilege.setPrivilegeScope(getPrivilegeScope(tSentryPrivilege)); + validatePrivilegeHierarchy(tSentryPrivilege); + return tSentryPrivilege; + } + + // for the different hierarchy for hive: + // 1: server->url + // 2: server->database->table->column + // if both of them are found in the privilege string, the privilege scope will be set as + // PrivilegeScope.URI + private static String getPrivilegeScope(TSentryPrivilege tSentryPrivilege) { + ServiceConstants.PrivilegeScope privilegeScope = ServiceConstants.PrivilegeScope.SERVER; + if (!StringUtils.isEmpty(tSentryPrivilege.getURI())) { + privilegeScope = ServiceConstants.PrivilegeScope.URI; + } else if (!StringUtils.isEmpty(tSentryPrivilege.getColumnName())) { + privilegeScope = ServiceConstants.PrivilegeScope.COLUMN; + } else if (!StringUtils.isEmpty(tSentryPrivilege.getTableName())) { + privilegeScope = ServiceConstants.PrivilegeScope.TABLE; + } else if (!StringUtils.isEmpty(tSentryPrivilege.getDbName())) { + privilegeScope = ServiceConstants.PrivilegeScope.DATABASE; + } + return privilegeScope.toString(); + } + + // check the privilege value for the specific privilege scope + // eg, for the table scope, server and database can't be empty + private static void validatePrivilegeHierarchy(TSentryPrivilege tSentryPrivilege) throws Exception { + String serverName = tSentryPrivilege.getServerName(); + String dbName = tSentryPrivilege.getDbName(); + String tableName = tSentryPrivilege.getTableName(); + String columnName = tSentryPrivilege.getColumnName(); + String uri = tSentryPrivilege.getURI(); + if (ServiceConstants.PrivilegeScope.SERVER.toString().equals(tSentryPrivilege.getPrivilegeScope())) { + if (StringUtils.isEmpty(serverName)) { + throw new IllegalArgumentException("The hierarchy of privilege is not correct."); + } + } else if (ServiceConstants.PrivilegeScope.URI.toString().equals(tSentryPrivilege.getPrivilegeScope())) { + if (StringUtils.isEmpty(serverName) || StringUtils.isEmpty(uri)) { + throw new IllegalArgumentException("The hierarchy of privilege is not correct."); + } + } else if (ServiceConstants.PrivilegeScope.DATABASE.toString().equals(tSentryPrivilege.getPrivilegeScope())) { + if (StringUtils.isEmpty(serverName) || StringUtils.isEmpty(dbName)) { + throw new IllegalArgumentException("The hierarchy of privilege is not correct."); + } + } else if (ServiceConstants.PrivilegeScope.TABLE.toString().equals(tSentryPrivilege.getPrivilegeScope())) { + if (StringUtils.isEmpty(serverName) || StringUtils.isEmpty(dbName) + || StringUtils.isEmpty(tableName)) { + throw new IllegalArgumentException("The hierarchy of privilege is not correct."); + } + } else if (ServiceConstants.PrivilegeScope.COLUMN.toString().equals(tSentryPrivilege.getPrivilegeScope()) + && (StringUtils.isEmpty(serverName) || StringUtils.isEmpty(dbName) + || StringUtils.isEmpty(tableName) || StringUtils.isEmpty(columnName))) { + throw new IllegalArgumentException("The hierarchy of privilege is not correct."); + } + } +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/CreateRoleCmd.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/CreateRoleCmd.java new file mode 100644 index 000000000..5a4834a67 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/CreateRoleCmd.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.db.tools.command.hive; + +import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient; + +/** + * The class for admin command to create role. + */ +public class CreateRoleCmd implements Command { + + private String roleName; + + public CreateRoleCmd(String roleName) { + this.roleName = roleName; + } + + @Override + public void execute(SentryPolicyServiceClient client, String requestorName) throws Exception { + client.createRole(requestorName, roleName); + } +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/DropRoleCmd.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/DropRoleCmd.java new file mode 100644 index 000000000..facec0ebf --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/DropRoleCmd.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.db.tools.command.hive; + +import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient; + +/** + * The class for admin command to drop role. + */ +public class DropRoleCmd implements Command { + + private String roleName; + + public DropRoleCmd(String roleName) { + this.roleName = roleName; + } + + @Override + public void execute(SentryPolicyServiceClient client, String requestorName) throws Exception { + client.dropRole(requestorName, roleName); + } +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/GrantPrivilegeToRoleCmd.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/GrantPrivilegeToRoleCmd.java new file mode 100644 index 000000000..a1ef2f9b3 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/GrantPrivilegeToRoleCmd.java @@ -0,0 +1,61 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.db.tools.command.hive; + +import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient; +import org.apache.sentry.provider.db.service.thrift.TSentryGrantOption; +import org.apache.sentry.provider.db.service.thrift.TSentryPrivilege; +import org.apache.sentry.service.thrift.ServiceConstants; + +/** + * The class for admin command to grant privilege to role. + */ +public class GrantPrivilegeToRoleCmd implements Command { + + private String roleName; + private String privilegeStr; + + public GrantPrivilegeToRoleCmd(String roleName, String privilegeStr) { + this.roleName = roleName; + this.privilegeStr = privilegeStr; + } + + @Override + public void execute(SentryPolicyServiceClient client, String requestorName) throws Exception { + TSentryPrivilege tSentryPrivilege = CommandUtil.convertToTSentryPrivilege(privilegeStr); + boolean grantOption = tSentryPrivilege.getGrantOption().equals(TSentryGrantOption.TRUE) ? true : false; + if (ServiceConstants.PrivilegeScope.SERVER.toString().equals(tSentryPrivilege.getPrivilegeScope())) { + client.grantServerPrivilege(requestorName, roleName, tSentryPrivilege.getServerName(), + tSentryPrivilege.getAction(), grantOption); + } else if (ServiceConstants.PrivilegeScope.DATABASE.toString().equals(tSentryPrivilege.getPrivilegeScope())) { + client.grantDatabasePrivilege(requestorName, roleName, tSentryPrivilege.getServerName(), + tSentryPrivilege.getDbName(), tSentryPrivilege.getAction(), grantOption); + } else if (ServiceConstants.PrivilegeScope.TABLE.toString().equals(tSentryPrivilege.getPrivilegeScope())) { + client.grantTablePrivilege(requestorName, roleName, tSentryPrivilege.getServerName(), + tSentryPrivilege.getDbName(), tSentryPrivilege.getTableName(), + tSentryPrivilege.getAction(), grantOption); + } else if (ServiceConstants.PrivilegeScope.COLUMN.toString().equals(tSentryPrivilege.getPrivilegeScope())) { + client.grantColumnPrivilege(requestorName, roleName, tSentryPrivilege.getServerName(), + tSentryPrivilege.getDbName(), tSentryPrivilege.getTableName(), + tSentryPrivilege.getColumnName(), tSentryPrivilege.getAction(), grantOption); + } else if (ServiceConstants.PrivilegeScope.URI.toString().equals(tSentryPrivilege.getPrivilegeScope())) { + client.grantURIPrivilege(requestorName, roleName, tSentryPrivilege.getServerName(), + tSentryPrivilege.getURI(), grantOption); + } + } +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/GrantRoleToGroupsCmd.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/GrantRoleToGroupsCmd.java new file mode 100644 index 000000000..07a3de453 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/GrantRoleToGroupsCmd.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.db.tools.command.hive; + +import com.google.common.collect.Sets; +import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient; +import org.apache.sentry.provider.db.tools.SentryShellCommon; + +import java.util.Set; + +/** + * The class for admin command to grant role to group. + */ +public class GrantRoleToGroupsCmd implements Command { + + private String roleName; + private String groupNamesStr; + + public GrantRoleToGroupsCmd(String roleName, String groupNamesStr) { + this.roleName = roleName; + this.groupNamesStr = groupNamesStr; + } + + @Override + public void execute(SentryPolicyServiceClient client, String requestorName) throws Exception { + Set groups = Sets.newHashSet(groupNamesStr.split(SentryShellCommon.GROUP_SPLIT_CHAR)); + client.grantRoleToGroups(requestorName, roleName, groups); + } +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/ListPrivilegesCmd.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/ListPrivilegesCmd.java new file mode 100644 index 000000000..d990ef3d3 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/ListPrivilegesCmd.java @@ -0,0 +1,97 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.db.tools.command.hive; + +import com.google.common.collect.Lists; +import org.apache.commons.lang.StringUtils; +import org.apache.sentry.policy.common.PolicyConstants; +import org.apache.sentry.provider.common.PolicyFileConstants; +import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient; +import org.apache.sentry.provider.db.service.thrift.TSentryGrantOption; +import org.apache.sentry.provider.db.service.thrift.TSentryPrivilege; + +import java.util.List; +import java.util.Set; + +/** + * The class for admin command to list privileges. + */ +public class ListPrivilegesCmd implements Command { + + private String roleName; + + public ListPrivilegesCmd(String roleName) { + this.roleName = roleName; + } + + @Override + public void execute(SentryPolicyServiceClient client, String requestorName) throws Exception { + Set privileges = client + .listAllPrivilegesByRoleName(requestorName, roleName); + if (privileges != null) { + for (TSentryPrivilege privilege : privileges) { + String privilegeStr = convertToPrivilegeStr(privilege); + System.out.println(privilegeStr); + } + } + } + + // convert TSentryPrivilege to privilege in string + private String convertToPrivilegeStr(TSentryPrivilege tSentryPrivilege) { + List privileges = Lists.newArrayList(); + if (tSentryPrivilege != null) { + String serverName = tSentryPrivilege.getServerName(); + String dbName = tSentryPrivilege.getDbName(); + String tableName = tSentryPrivilege.getTableName(); + String columnName = tSentryPrivilege.getColumnName(); + String uri = tSentryPrivilege.getURI(); + String action = tSentryPrivilege.getAction(); + String grantOption = (tSentryPrivilege.getGrantOption() == TSentryGrantOption.TRUE ? "true" + : "false"); + if (!StringUtils.isEmpty(serverName)) { + privileges.add(PolicyConstants.KV_JOINER.join(PolicyFileConstants.PRIVILEGE_SERVER_NAME, + serverName)); + if (!StringUtils.isEmpty(uri)) { + privileges.add(PolicyConstants.KV_JOINER.join(PolicyFileConstants.PRIVILEGE_URI_NAME, + uri)); + } else if (!StringUtils.isEmpty(dbName)) { + privileges.add(PolicyConstants.KV_JOINER.join( + PolicyFileConstants.PRIVILEGE_DATABASE_NAME, dbName)); + if (!StringUtils.isEmpty(tableName)) { + privileges.add(PolicyConstants.KV_JOINER.join( + PolicyFileConstants.PRIVILEGE_TABLE_NAME, tableName)); + if (!StringUtils.isEmpty(columnName)) { + privileges.add(PolicyConstants.KV_JOINER.join( + PolicyFileConstants.PRIVILEGE_COLUMN_NAME, columnName)); + } + } + } + if (!StringUtils.isEmpty(action)) { + privileges.add(PolicyConstants.KV_JOINER.join( + PolicyFileConstants.PRIVILEGE_ACTION_NAME, action)); + } + } + // only append the grant option to privilege string if it's true + if ("true".equals(grantOption)) { + privileges.add(PolicyConstants.KV_JOINER.join( + PolicyFileConstants.PRIVILEGE_GRANT_OPTION_NAME, grantOption)); + } + } + return PolicyConstants.AUTHORIZABLE_JOINER.join(privileges); + } +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/ListRolesCmd.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/ListRolesCmd.java new file mode 100644 index 000000000..283f2c03e --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/ListRolesCmd.java @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.db.tools.command.hive; + +import org.apache.commons.lang.StringUtils; +import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient; +import org.apache.sentry.provider.db.service.thrift.TSentryRole; + +import java.util.Set; + +/** + * The class for admin command to list roles. + */ +public class ListRolesCmd implements Command { + + private String groupName; + + public ListRolesCmd(String groupName) { + this.groupName = groupName; + } + + @Override + public void execute(SentryPolicyServiceClient client, String requestorName) throws Exception { + Set roles; + if (StringUtils.isEmpty(groupName)) { + roles = client.listRoles(requestorName); + } else { + roles = client.listRolesByGroupName(requestorName, groupName); + } + if (roles != null) { + for (TSentryRole role : roles) { + System.out.println(role.getRoleName()); + } + } + } +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/RevokePrivilegeFromRoleCmd.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/RevokePrivilegeFromRoleCmd.java new file mode 100644 index 000000000..940503774 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/RevokePrivilegeFromRoleCmd.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.db.tools.command.hive; + +import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient; +import org.apache.sentry.provider.db.service.thrift.TSentryGrantOption; +import org.apache.sentry.provider.db.service.thrift.TSentryPrivilege; +import org.apache.sentry.service.thrift.ServiceConstants; + +/** + * The class for admin command to revoke privileges from role. + */ +public class RevokePrivilegeFromRoleCmd implements Command { + + private String roleName; + String privilegeStr; + + public RevokePrivilegeFromRoleCmd(String roleName, String privilegeStr) { + this.roleName = roleName; + this.privilegeStr = privilegeStr; + } + + @Override + public void execute(SentryPolicyServiceClient client, String requestorName) throws Exception { + TSentryPrivilege tSentryPrivilege = CommandUtil.convertToTSentryPrivilege(privilegeStr); + boolean grantOption = tSentryPrivilege.getGrantOption().equals(TSentryGrantOption.TRUE) ? true : false; + if (ServiceConstants.PrivilegeScope.SERVER.toString().equals(tSentryPrivilege.getPrivilegeScope())) { + client.revokeServerPrivilege(requestorName, roleName, tSentryPrivilege.getServerName(), + grantOption); + } else if (ServiceConstants.PrivilegeScope.DATABASE.toString().equals(tSentryPrivilege.getPrivilegeScope())) { + client.revokeDatabasePrivilege(requestorName, roleName, tSentryPrivilege.getServerName(), + tSentryPrivilege.getDbName(), tSentryPrivilege.getAction(), grantOption); + } else if (ServiceConstants.PrivilegeScope.TABLE.toString().equals(tSentryPrivilege.getPrivilegeScope())) { + client.revokeTablePrivilege(requestorName, roleName, tSentryPrivilege.getServerName(), + tSentryPrivilege.getDbName(), tSentryPrivilege.getTableName(), + tSentryPrivilege.getAction(), grantOption); + } else if (ServiceConstants.PrivilegeScope.COLUMN.toString().equals(tSentryPrivilege.getPrivilegeScope())) { + client.revokeColumnPrivilege(requestorName, roleName, tSentryPrivilege.getServerName(), + tSentryPrivilege.getDbName(), tSentryPrivilege.getTableName(), + tSentryPrivilege.getColumnName(), tSentryPrivilege.getAction(), grantOption); + } else if (ServiceConstants.PrivilegeScope.URI.toString().equals(tSentryPrivilege.getPrivilegeScope())) { + client.revokeURIPrivilege(requestorName, roleName, tSentryPrivilege.getServerName(), + tSentryPrivilege.getURI(), grantOption); + } + } + +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/RevokeRoleFromGroupsCmd.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/RevokeRoleFromGroupsCmd.java new file mode 100644 index 000000000..86773ca46 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/tools/command/hive/RevokeRoleFromGroupsCmd.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.db.tools.command.hive; + +import com.google.common.collect.Sets; +import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient; + +import java.util.Set; + +/** + * The class for admin command to revoke role from group. + */ +public class RevokeRoleFromGroupsCmd implements Command { + + private String roleName; + private String groupNamesStr; + + public RevokeRoleFromGroupsCmd(String roleName, String groupNamesStr) { + this.roleName = roleName; + this.groupNamesStr = groupNamesStr; + } + + @Override + public void execute(SentryPolicyServiceClient client, String requestorName) throws Exception { + Set groups = Sets.newHashSet(groupNamesStr.split(CommandUtil.SPLIT_CHAR)); + client.revokeRoleFromGroups(requestorName, roleName, groups); + } +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/HAClientInvocationHandler.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/HAClientInvocationHandler.java index 4947ad1ae..a58fa415c 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/HAClientInvocationHandler.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/HAClientInvocationHandler.java @@ -18,7 +18,6 @@ package org.apache.sentry.service.thrift; import java.io.IOException; -import java.lang.reflect.InvocationHandler; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.net.InetSocketAddress; @@ -37,7 +36,7 @@ import com.google.common.base.Preconditions; -public class HAClientInvocationHandler implements InvocationHandler { +public class HAClientInvocationHandler extends SentryClientInvocationHandler { private static final Logger LOGGER = LoggerFactory.getLogger(HAClientInvocationHandler.class); @@ -55,45 +54,37 @@ public HAClientInvocationHandler(Configuration conf) throws Exception { } @Override - public Object invoke(Object proxy, Method method, Object[] args) throws + public Object invokeImpl(Object proxy, Method method, Object[] args) throws SentryUserException { Object result = null; - while (true) { - try { - if (!method.isAccessible()) { - method.setAccessible(true); - } - // The client is initialized in the first call instead of constructor. - // This way we can propagate the connection exception to caller cleanly - if (client == null) { - renewSentryClient(); - } - result = method.invoke(client, args); - } catch (IllegalAccessException e) { - throw new SentryUserException(e.getMessage(), e.getCause()); - } catch (InvocationTargetException e) { - if (e.getTargetException() instanceof SentryUserException) { - throw (SentryUserException)e.getTargetException(); - } else { - LOGGER.warn(THRIFT_EXCEPTION_MESSAGE + ": Error in connect current" + - " service, will retry other service.", e); - if (client != null) { - client.close(); - client = null; - } - } - } catch (IOException e1) { - // close() doesn't throw exception we supress that in case of connection - // loss. Changing SentryPolicyServiceClient#close() to throw an - // exception would be a backward incompatible change for Sentry clients. - if ("close".equals(method.getName())) { - return null; + try { + if (!method.isAccessible()) { + method.setAccessible(true); + } + // The client is initialized in the first call instead of constructor. + // This way we can propagate the connection exception to caller cleanly + if (client == null) { + renewSentryClient(); + } + result = method.invoke(client, args); + } catch (IllegalAccessException e) { + throw new SentryUserException(e.getMessage(), e.getCause()); + } catch (InvocationTargetException e) { + if (e.getTargetException() instanceof SentryUserException) { + throw (SentryUserException)e.getTargetException(); + } else { + LOGGER.warn(THRIFT_EXCEPTION_MESSAGE + ": Error in connect current" + + " service, will retry other service.", e); + if (client != null) { + client.close(); + client = null; } - throw new SentryUserException("Error connecting to sentry service " - + e1.getMessage(), e1); } - return result; + } catch (IOException e1) { + throw new SentryUserException("Error connecting to sentry service " + + e1.getMessage(), e1); } + return result; } // Retrieve the new connection endpoint from ZK and connect to new server @@ -138,4 +129,11 @@ private void checkClientConf() { ServerConfig.PRINCIPAL + " : " + serverPrincipal + " should contain " + SecurityUtil.HOSTNAME_PATTERN); } } + + @Override + public void close() { + if (client != null) { + client.close(); + } + } } diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/JaasConfiguration.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/JaasConfiguration.java index d5f55fe4a..64ecae279 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/JaasConfiguration.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/JaasConfiguration.java @@ -72,7 +72,7 @@ public static Configuration getInstance() { * @param principal The principal of the user * @param keytab The location of the keytab */ - public static void addEntry(String name, String principal, String keytab) { + public static void addEntryForKeytab(String name, String principal, String keytab) { Map options = new HashMap(); options.put("keyTab", keytab); options.put("principal", principal); @@ -84,6 +84,22 @@ public static void addEntry(String name, String principal, String keytab) { entries.put(name, entry); } + /** + * Add an entry to the jaas configuration with the passed in name. The other + * necessary options will be set for you. + * + * @param name The name of the entry (e.g. "Client") + */ + public static void addEntryForTicketCache(String sectionName) { + Map options = new HashMap(); + options.put("useKeyTab", "false"); + options.put("storeKey", "false"); + options.put("useTicketCache", "true"); + AppConfigurationEntry entry = new AppConfigurationEntry(krb5LoginModuleName, + AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, options); + entries.put(sectionName, entry); + } + /** * Removes the specified entry. * diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/PoolClientInvocationHandler.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/PoolClientInvocationHandler.java new file mode 100644 index 000000000..b4056e9b1 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/PoolClientInvocationHandler.java @@ -0,0 +1,154 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.service.thrift; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; + +import org.apache.commons.pool2.PooledObjectFactory; +import org.apache.commons.pool2.impl.AbandonedConfig; +import org.apache.commons.pool2.impl.GenericObjectPool; +import org.apache.commons.pool2.impl.GenericObjectPoolConfig; +import org.apache.hadoop.conf.Configuration; +import org.apache.sentry.SentryUserException; +import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient; +import org.apache.sentry.service.thrift.ServiceConstants.ClientConfig; +import org.apache.thrift.transport.TTransportException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * The PoolClientInvocationHandler is a proxy class for handling thrift call. For every thrift call, + * get the instance of SentryPolicyServiceBaseClient from the commons-pool, and return the instance + * to the commons-pool after complete the call. For any exception with the call, discard the + * instance and create a new one added to the commons-pool. Then, get the instance and do the call + * again. For the thread safe, the commons-pool will manage the connection pool, and every thread + * can get the connection by borrowObject() and return the connection to the pool by returnObject(). + */ + +public class PoolClientInvocationHandler extends SentryClientInvocationHandler { + + private static final Logger LOGGER = LoggerFactory.getLogger(PoolClientInvocationHandler.class); + + private final Configuration conf; + private PooledObjectFactory poolFactory; + private GenericObjectPool pool; + private GenericObjectPoolConfig poolConfig; + private int connectionRetryTotal; + + private static final String POOL_EXCEPTION_MESSAGE = "Pool exception occured "; + + public PoolClientInvocationHandler(Configuration conf) throws Exception { + this.conf = conf; + readConfiguration(); + poolFactory = new SentryServiceClientPoolFactory(conf); + pool = new GenericObjectPool(poolFactory, poolConfig, new AbandonedConfig()); + } + + @Override + public Object invokeImpl(Object proxy, Method method, Object[] args) throws Exception { + int retryCount = 0; + Object result = null; + while (retryCount < connectionRetryTotal) { + try { + // The wapper here is for the retry of thrift call, the default retry number is 3. + result = invokeFromPool(method, args); + break; + } catch (TTransportException e) { + // TTransportException means there has connection problem, create a new connection and try + // again. Get the lock of pool and add new connection. + synchronized (pool) { + // If there has room, create new instance and add it to the commons-pool, this instance + // will be back first from the commons-pool because the configuration is LIFO. + if (pool.getNumIdle() + pool.getNumActive() < pool.getMaxTotal()) { + pool.addObject(); + } + } + // Increase the retry num, and throw the exception if can't retry again. + retryCount++; + if (retryCount == connectionRetryTotal) { + throw new SentryUserException(e.getMessage(), e); + } + } + } + return result; + } + + private Object invokeFromPool(Method method, Object[] args) throws Exception { + Object result = null; + SentryPolicyServiceClient client; + try { + // get the connection from the pool, don't know if the connection is broken. + client = pool.borrowObject(); + } catch (Exception e) { + LOGGER.debug(POOL_EXCEPTION_MESSAGE, e); + throw new SentryUserException(e.getMessage(), e); + } + try { + // do the thrift call + result = method.invoke(client, args); + } catch (InvocationTargetException e) { + // Get the target exception, check if SentryUserException or TTransportException is wrapped. + // TTransportException means there has connection problem with the pool. + Throwable targetException = e.getCause(); + if (targetException != null && targetException instanceof SentryUserException) { + Throwable sentryTargetException = targetException.getCause(); + // If there has connection problem, eg, invalid connection if the service restarted, + // sentryTargetException instanceof TTransportException = true. + if (sentryTargetException != null && sentryTargetException instanceof TTransportException) { + // If the exception is caused by connection problem, destroy the instance and + // remove it from the commons-pool. Throw the TTransportException for reconnect. + pool.invalidateObject(client); + throw new TTransportException(sentryTargetException); + } + // The exception is thrown by thrift call, eg, SentryAccessDeniedException. + throw (SentryUserException) targetException; + } + throw e; + } finally{ + try { + // return the instance to commons-pool + pool.returnObject(client); + } catch (Exception e) { + LOGGER.error(POOL_EXCEPTION_MESSAGE, e); + throw e; + } + } + return result; + } + + @Override + public void close() { + try { + pool.close(); + } catch (Exception e) { + LOGGER.debug(POOL_EXCEPTION_MESSAGE, e); + } + } + + private void readConfiguration() { + poolConfig = new GenericObjectPoolConfig(); + // config the pool size for commons-pool + poolConfig.setMaxTotal(conf.getInt(ClientConfig.SENTRY_POOL_MAX_TOTAL, ClientConfig.SENTRY_POOL_MAX_TOTAL_DEFAULT)); + poolConfig.setMinIdle(conf.getInt(ClientConfig.SENTRY_POOL_MIN_IDLE, ClientConfig.SENTRY_POOL_MIN_IDLE_DEFAULT)); + poolConfig.setMaxIdle(conf.getInt(ClientConfig.SENTRY_POOL_MAX_IDLE, ClientConfig.SENTRY_POOL_MAX_IDLE_DEFAULT)); + // get the retry number for reconnecting service + connectionRetryTotal = conf.getInt(ClientConfig.SENTRY_POOL_RETRY_TOTAL, + ClientConfig.SENTRY_POOL_RETRY_TOTAL_DEFAULT); + } +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/ProcessorFactory.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/ProcessorFactory.java index 07b3472d8..a3bb6ab19 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/ProcessorFactory.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/ProcessorFactory.java @@ -22,6 +22,7 @@ public abstract class ProcessorFactory { protected final Configuration conf; + public ProcessorFactory(Configuration conf) { this.conf = conf; } diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryClientInvocationHandler.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryClientInvocationHandler.java new file mode 100644 index 000000000..a41be7fea --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryClientInvocationHandler.java @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.service.thrift; + +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.Method; + +/** + * SentryClientInvocationHandler is the base interface for all the InvocationHandler in SENTRY + */ +public abstract class SentryClientInvocationHandler implements InvocationHandler { + + /** + * Close the InvocationHandler: An InvocationHandler may create some contexts, + * these contexts should be close when the method "close()" of client be called. + */ + @Override + public final Object invoke(Object proxy, Method method, Object[] args) throws Exception { + // close() doesn't throw exception we supress that in case of connection + // loss. Changing SentryPolicyServiceClient#close() to throw an + // exception would be a backward incompatible change for Sentry clients. + if ("close".equals(method.getName()) && null == args) { + close(); + return null; + } + return invokeImpl(proxy, method, args); + } + + /** + * Subclass should implement this method for special function + */ + public abstract Object invokeImpl(Object proxy, Method method, Object[] args) throws Exception; + + /** + * An abstract method "close", an invocationHandler should close its contexts at here. + */ + public abstract void close(); + +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryKerberosContext.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryKerberosContext.java index fc7bc0535..93481cb32 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryKerberosContext.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryKerberosContext.java @@ -108,11 +108,11 @@ public void run() { LOGGER.info("Sentry Ticket renewer thread started"); while (!shutDownRenewer) { KerberosTicket tgt = getTGT(); - long nextRefresh = getRefreshTime(tgt); if (tgt == null) { LOGGER.warn("No ticket found in the cache"); return; } + long nextRefresh = getRefreshTime(tgt); while (System.currentTimeMillis() < nextRefresh) { Thread.sleep(1000); if (shutDownRenewer) { diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryService.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryService.java index 9dda1fbb2..26a32e48a 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryService.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryService.java @@ -29,6 +29,7 @@ import java.util.EventListener; import java.util.List; import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; @@ -54,7 +55,6 @@ import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; import org.apache.thrift.TMultiplexedProcessor; import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TCompactProtocol; import org.apache.thrift.server.TServer; import org.apache.thrift.server.TServerEventHandler; import org.apache.thrift.server.TThreadPoolServer; @@ -91,6 +91,7 @@ private static enum Status { private Status status; private int webServerPort; private SentryWebServer sentryWebServer; + private long maxMessageSize; public SentryService(Configuration conf) { this.conf = conf; @@ -110,6 +111,8 @@ public SentryService(Configuration conf) { ServerConfig.RPC_MAX_THREADS_DEFAULT); minThreads = conf.getInt(ServerConfig.RPC_MIN_THREADS, ServerConfig.RPC_MIN_THREADS_DEFAULT); + maxMessageSize = conf.getLong(ServerConfig.SENTRY_POLICY_SERVER_THRIFT_MAX_MESSAGE_SIZE, + ServerConfig.SENTRY_POLICY_SERVER_THRIFT_MAX_MESSAGE_SIZE_DEFAULT); if (kerberos) { // Use Hadoop libraries to translate the _HOST placeholder with actual hostname try { @@ -191,9 +194,14 @@ private void runServer() throws Exception { try { Constructor constructor = clazz .getConstructor(Configuration.class); + LOGGER.info("ProcessorFactory being used: " + clazz.getCanonicalName()); ProcessorFactory factory = (ProcessorFactory) constructor .newInstance(conf); - registeredProcessor = factory.register(processor) || registeredProcessor; + boolean status = factory.register(processor); + if(!status) { + LOGGER.error("Failed to register " + clazz.getCanonicalName()); + } + registeredProcessor = status || registeredProcessor; } catch (Exception e) { throw new IllegalStateException("Could not create " + processorFactory, e); @@ -217,7 +225,7 @@ private void runServer() throws Exception { TThreadPoolServer.Args args = new TThreadPoolServer.Args( serverTransport).processor(processor) .transportFactory(transportFactory) - .protocolFactory(new TBinaryProtocol.Factory()) + .protocolFactory(new TBinaryProtocol.Factory(true, true, maxMessageSize, maxMessageSize)) .minWorkerThreads(minThreads).maxWorkerThreads(maxThreads); thriftServer = new TThreadPoolServer(args); LOGGER.info("Serving on " + address); @@ -298,13 +306,9 @@ public synchronized void stop() throws Exception{ } // wait for the service thread to finish execution - public synchronized void waitForShutDown() { + public synchronized void waitOnFuture() throws ExecutionException, InterruptedException { LOGGER.info("Waiting on future.get()"); - try { serviceStatus.get(); - } catch (Exception e) { - LOGGER.debug("Error during the shutdown", e); - } } private MultiException addMultiException(MultiException exception, Exception e) { @@ -390,7 +394,7 @@ public void run() { // Let's wait on the service to stop try { - server.waitForShutDown(); + server.waitOnFuture(); } finally { server.serviceExecutor.shutdown(); } diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryServiceClientFactory.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryServiceClientFactory.java index 574f23cb6..09fe42e57 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryServiceClientFactory.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryServiceClientFactory.java @@ -24,7 +24,7 @@ import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient; import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClientDefaultImpl; -import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; +import org.apache.sentry.service.thrift.ServiceConstants.ClientConfig; public class SentryServiceClientFactory { @@ -32,8 +32,14 @@ private SentryServiceClientFactory() { } public static SentryPolicyServiceClient create(Configuration conf) throws Exception { - boolean haEnabled = conf.getBoolean(ServerConfig.SENTRY_HA_ENABLED, false); - if (haEnabled) { + boolean haEnabled = conf.getBoolean(ClientConfig.SERVER_HA_ENABLED, false); + boolean pooled = conf.getBoolean(ClientConfig.SENTRY_POOL_ENABLED, false); + if (pooled) { + return (SentryPolicyServiceClient) Proxy + .newProxyInstance(SentryPolicyServiceClientDefaultImpl.class.getClassLoader(), + SentryPolicyServiceClientDefaultImpl.class.getInterfaces(), + new PoolClientInvocationHandler(conf)); + } else if (haEnabled) { return (SentryPolicyServiceClient) Proxy .newProxyInstance(SentryPolicyServiceClientDefaultImpl.class.getClassLoader(), SentryPolicyServiceClientDefaultImpl.class.getInterfaces(), diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryServiceClientPoolFactory.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryServiceClientPoolFactory.java new file mode 100644 index 000000000..3a38b243e --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryServiceClientPoolFactory.java @@ -0,0 +1,78 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.service.thrift; + +import java.lang.reflect.Proxy; + +import org.apache.commons.pool2.BasePooledObjectFactory; +import org.apache.commons.pool2.PooledObject; +import org.apache.commons.pool2.impl.DefaultPooledObject; +import org.apache.hadoop.conf.Configuration; +import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient; +import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClientDefaultImpl; +import org.apache.sentry.service.thrift.ServiceConstants.ClientConfig; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * SentryServiceClientPoolFactory is for connection pool to manage the object. Implement the related + * method to create object, destroy object and wrap object. + */ + +public class SentryServiceClientPoolFactory extends BasePooledObjectFactory { + + private static final Logger LOGGER = LoggerFactory.getLogger(SentryServiceClientPoolFactory.class); + + private Configuration conf; + + public SentryServiceClientPoolFactory(Configuration conf) { + this.conf = conf; + } + + @Override + public SentryPolicyServiceClient create() throws Exception { + LOGGER.debug("Creating Sentry Service Client..."); + boolean haEnabled = conf.getBoolean(ClientConfig.SERVER_HA_ENABLED, false); + if (haEnabled) { + return (SentryPolicyServiceClient) Proxy + .newProxyInstance(SentryPolicyServiceClientDefaultImpl.class.getClassLoader(), + SentryPolicyServiceClientDefaultImpl.class.getInterfaces(), + new HAClientInvocationHandler(conf)); + } else { + return new SentryPolicyServiceClientDefaultImpl(conf); + } + } + + @Override + public PooledObject wrap(SentryPolicyServiceClient client) { + return new DefaultPooledObject(client); + } + + @Override + public void destroyObject(PooledObject pooledObject) { + SentryPolicyServiceClient client = pooledObject.getObject(); + LOGGER.debug("Destroying Sentry Service Client: " + client); + if (client != null) { + // The close() of TSocket or TSaslClientTransport is called actually, and there has no + // exception even there has some problems, eg, the client is closed already. + // The close here is just try to close the socket and the client will be destroyed soon. + client.close(); + } + } +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryServiceUtil.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryServiceUtil.java new file mode 100644 index 000000000..5b293ecb4 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryServiceUtil.java @@ -0,0 +1,127 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.service.thrift; + +import java.util.List; + +import org.apache.commons.lang.StringUtils; +import org.apache.sentry.policy.common.KeyValue; +import org.apache.sentry.policy.common.PolicyConstants; +import org.apache.sentry.provider.common.PolicyFileConstants; +import org.apache.sentry.provider.db.service.thrift.TSentryGrantOption; +import org.apache.sentry.provider.db.service.thrift.TSentryPrivilege; +import org.apache.sentry.service.thrift.ServiceConstants.PrivilegeScope; + +import com.google.common.collect.Lists; + +public class SentryServiceUtil { + + // parse the privilege in String and get the TSentryPrivilege as result + public static TSentryPrivilege convertToTSentryPrivilege(String privilegeStr) { + TSentryPrivilege tSentryPrivilege = new TSentryPrivilege(); + for (String authorizable : PolicyConstants.AUTHORIZABLE_SPLITTER.split(privilegeStr)) { + KeyValue tempKV = new KeyValue(authorizable); + String key = tempKV.getKey(); + String value = tempKV.getValue(); + + if (PolicyFileConstants.PRIVILEGE_SERVER_NAME.equalsIgnoreCase(key)) { + tSentryPrivilege.setServerName(value); + } else if (PolicyFileConstants.PRIVILEGE_DATABASE_NAME.equalsIgnoreCase(key)) { + tSentryPrivilege.setDbName(value); + } else if (PolicyFileConstants.PRIVILEGE_TABLE_NAME.equalsIgnoreCase(key)) { + tSentryPrivilege.setTableName(value); + } else if (PolicyFileConstants.PRIVILEGE_COLUMN_NAME.equalsIgnoreCase(key)) { + tSentryPrivilege.setColumnName(value); + } else if (PolicyFileConstants.PRIVILEGE_URI_NAME.equalsIgnoreCase(key)) { + tSentryPrivilege.setURI(value); + } else if (PolicyFileConstants.PRIVILEGE_ACTION_NAME.equalsIgnoreCase(key)) { + tSentryPrivilege.setAction(value); + } else if (PolicyFileConstants.PRIVILEGE_GRANT_OPTION_NAME.equalsIgnoreCase(key)) { + TSentryGrantOption grantOption = "true".equalsIgnoreCase(value) ? TSentryGrantOption.TRUE + : TSentryGrantOption.FALSE; + tSentryPrivilege.setGrantOption(grantOption); + } + } + tSentryPrivilege.setPrivilegeScope(getPrivilegeScope(tSentryPrivilege)); + return tSentryPrivilege; + } + + // for the different hierarchy for hive: + // 1: server->url + // 2: server->database->table->column + // if both of them are found in the privilege string, the privilege scope will be set as + // PrivilegeScope.URI + public static String getPrivilegeScope(TSentryPrivilege tSentryPrivilege) { + PrivilegeScope privilegeScope = PrivilegeScope.SERVER; + if (!StringUtils.isEmpty(tSentryPrivilege.getURI())) { + privilegeScope = PrivilegeScope.URI; + } else if (!StringUtils.isEmpty(tSentryPrivilege.getColumnName())) { + privilegeScope = PrivilegeScope.COLUMN; + } else if (!StringUtils.isEmpty(tSentryPrivilege.getTableName())) { + privilegeScope = PrivilegeScope.TABLE; + } else if (!StringUtils.isEmpty(tSentryPrivilege.getDbName())) { + privilegeScope = PrivilegeScope.DATABASE; + } + return privilegeScope.toString(); + } + + // convert TSentryPrivilege to privilege in string + public static String convertTSentryPrivilegeToStr(TSentryPrivilege tSentryPrivilege) { + List privileges = Lists.newArrayList(); + if (tSentryPrivilege != null) { + String serverName = tSentryPrivilege.getServerName(); + String dbName = tSentryPrivilege.getDbName(); + String tableName = tSentryPrivilege.getTableName(); + String columnName = tSentryPrivilege.getColumnName(); + String uri = tSentryPrivilege.getURI(); + String action = tSentryPrivilege.getAction(); + String grantOption = (tSentryPrivilege.getGrantOption() == TSentryGrantOption.TRUE ? "true" + : "false"); + if (!StringUtils.isEmpty(serverName)) { + privileges.add(PolicyConstants.KV_JOINER.join(PolicyFileConstants.PRIVILEGE_SERVER_NAME, + serverName)); + if (!StringUtils.isEmpty(uri)) { + privileges.add(PolicyConstants.KV_JOINER.join(PolicyFileConstants.PRIVILEGE_URI_NAME, + uri)); + } else if (!StringUtils.isEmpty(dbName)) { + privileges.add(PolicyConstants.KV_JOINER.join( + PolicyFileConstants.PRIVILEGE_DATABASE_NAME, dbName)); + if (!StringUtils.isEmpty(tableName)) { + privileges.add(PolicyConstants.KV_JOINER.join( + PolicyFileConstants.PRIVILEGE_TABLE_NAME, tableName)); + if (!StringUtils.isEmpty(columnName)) { + privileges.add(PolicyConstants.KV_JOINER.join( + PolicyFileConstants.PRIVILEGE_COLUMN_NAME, columnName)); + } + } + } + if (!StringUtils.isEmpty(action)) { + privileges.add(PolicyConstants.KV_JOINER.join( + PolicyFileConstants.PRIVILEGE_ACTION_NAME, action)); + } + } + // only append the grant option to privilege string if it's true + if ("true".equals(grantOption)) { + privileges.add(PolicyConstants.KV_JOINER.join( + PolicyFileConstants.PRIVILEGE_GRANT_OPTION_NAME, grantOption)); + } + } + return PolicyConstants.AUTHORIZABLE_JOINER.join(privileges); + } +} diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/ServiceConstants.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/ServiceConstants.java index c8f745027..94bd2a95c 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/ServiceConstants.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/ServiceConstants.java @@ -22,9 +22,10 @@ import javax.security.sasl.Sasl; +import org.apache.sentry.provider.db.service.thrift.SentryMetrics; + import com.google.common.base.Splitter; import com.google.common.collect.ImmutableMap; -import org.apache.sentry.provider.db.service.thrift.SentryMetrics; public class ServiceConstants { @@ -61,7 +62,7 @@ public static class ServerConfig { public static final String RPC_PORT = "sentry.service.server.rpc-port"; public static final int RPC_PORT_DEFAULT = 8038; public static final String RPC_ADDRESS = "sentry.service.server.rpc-address"; - public static final String RPC_ADDRESS_DEFAULT = "0.0.0.0"; + public static final String RPC_ADDRESS_DEFAULT = "0.0.0.0"; //NOPMD public static final String RPC_MAX_THREADS = "sentry.service.server-max-threads"; public static final int RPC_MAX_THREADS_DEFAULT = 500; public static final String RPC_MIN_THREADS = "sentry.service.server-min-threads"; @@ -82,7 +83,6 @@ public static class ServerConfig { public static final String SENTRY_STORE_JDBC_USER = "sentry.store.jdbc.user"; public static final String SENTRY_STORE_JDBC_USER_DEFAULT = "Sentry"; public static final String SENTRY_STORE_JDBC_PASS = "sentry.store.jdbc.password"; - public static final String SENTRY_STORE_JDBC_PASS_DEFAULT = "Sentry"; public static final String SENTRY_STORE_JDBC_DRIVER = "sentry.store.jdbc.driver"; public static final String SENTRY_STORE_JDBC_DRIVER_DEFAULT = "org.apache.derby.jdbc.EmbeddedDriver"; @@ -125,6 +125,8 @@ public static class ServerConfig { // principal and keytab for client to be able to connect to secure ZK. Needed for Sentry HA with secure ZK public static final String SERVER_HA_ZOOKEEPER_CLIENT_PRINCIPAL = "sentry.zookeeper.client.principal"; public static final String SERVER_HA_ZOOKEEPER_CLIENT_KEYTAB = "sentry.zookeeper.client.keytab"; + public static final String SERVER_HA_ZOOKEEPER_CLIENT_TICKET_CACHE = "sentry.zookeeper.client.ticketcache"; + public static final String SERVER_HA_ZOOKEEPER_CLIENT_TICKET_CACHE_DEFAULT = "false"; public static final ImmutableMap SENTRY_STORE_DEFAULTS = ImmutableMap.builder() .put("datanucleus.connectionPoolingType", "BoneCP") @@ -152,7 +154,7 @@ public static class ServerConfig { public static final String SENTRY_WEB_ENABLE = "sentry.service.web.enable"; public static final Boolean SENTRY_WEB_ENABLE_DEFAULT = false; public static final String SENTRY_WEB_PORT = "sentry.service.web.port"; - public static final int SENTRY_WEB_PORT_DEFAULT = 51000; + public static final int SENTRY_WEB_PORT_DEFAULT = 29000; public static final String SENTRY_REPORTER = "sentry.service.reporter"; public static final String SENTRY_REPORTER_JMX = SentryMetrics.Reporting.JMX.name(); //case insensitive public static final String SENTRY_REPORTER_CONSOLE = SentryMetrics.Reporting.CONSOLE.name();//case insensitive @@ -165,7 +167,15 @@ public static class ServerConfig { public static final String SENTRY_WEB_SECURITY_PRINCIPAL = SENTRY_WEB_SECURITY_PREFIX + ".kerberos.principal"; public static final String SENTRY_WEB_SECURITY_KEYTAB = SENTRY_WEB_SECURITY_PREFIX + ".kerberos.keytab"; public static final String SENTRY_WEB_SECURITY_ALLOW_CONNECT_USERS = SENTRY_WEB_SECURITY_PREFIX + ".allow.connect.users"; + + // max message size for thrift messages + public static String SENTRY_POLICY_SERVER_THRIFT_MAX_MESSAGE_SIZE = "sentry.policy.server.thrift.max.message.size"; + public static long SENTRY_POLICY_SERVER_THRIFT_MAX_MESSAGE_SIZE_DEFAULT = 100 * 1024 * 1024; + + // action factories for external components + public static final String SENTRY_COMPONENT_ACTION_FACTORY_FORMAT = "sentry.%s.action.factory"; } + public static class ClientConfig { public static final ImmutableMap SASL_PROPERTIES = ServiceConstants.SASL_PROPERTIES; public static final String SERVER_RPC_PORT = "sentry.service.client.server.rpc-port"; @@ -182,13 +192,38 @@ public static class ClientConfig { public static final String SENTRY_HA_ZOOKEEPER_NAMESPACE = ServerConfig.SENTRY_HA_ZOOKEEPER_NAMESPACE; public static final String SERVER_HA_ZOOKEEPER_NAMESPACE_DEFAULT = ServerConfig.SENTRY_HA_ZOOKEEPER_NAMESPACE_DEFAULT; + // connection pool configuration + public static final String SENTRY_POOL_ENABLED = "sentry.service.client.connection.pool.enabled"; + public static final boolean SENTRY_POOL_ENABLED_DEFAULT = false; + + // commons-pool configuration for pool size + public static final String SENTRY_POOL_MAX_TOTAL = "sentry.service.client.connection.pool.max-total"; + public static final int SENTRY_POOL_MAX_TOTAL_DEFAULT = 8; + public static final String SENTRY_POOL_MAX_IDLE = "sentry.service.client.connection.pool.max-idle"; + public static final int SENTRY_POOL_MAX_IDLE_DEFAULT = 8; + public static final String SENTRY_POOL_MIN_IDLE = "sentry.service.client.connection.pool.min-idle"; + public static final int SENTRY_POOL_MIN_IDLE_DEFAULT = 0; + + // retry num for getting the connection from connection pool + public static final String SENTRY_POOL_RETRY_TOTAL = "sentry.service.client.connection.pool.retry-total"; + public static final int SENTRY_POOL_RETRY_TOTAL_DEFAULT = 3; + + // max message size for thrift messages + public static String SENTRY_POLICY_CLIENT_THRIFT_MAX_MESSAGE_SIZE = "sentry.policy.client.thrift.max.message.size"; + public static long SENTRY_POLICY_CLIENT_THRIFT_MAX_MESSAGE_SIZE_DEFAULT = 100 * 1024 * 1024; + + // client retry settings + public static final String RETRY_COUNT_CONF = "sentry.provider.backend.db.retry.count"; + public static final int RETRY_COUNT_DEFAULT = 3; + public static final String RETRY_INTERVAL_SEC_CONF = "sentry.provider.backend.db.retry.interval.seconds"; + public static final int RETRY_INTERVAL_SEC_DEFAULT = 30; } /** * Thrift generates terrible constant class names */ public static class ThriftConstants extends org.apache.sentry.service.thrift.sentry_common_serviceConstants { - public static final int TSENTRY_SERVICE_VERSION_CURRENT = TSENTRY_SERVICE_V1; + public static final int TSENTRY_SERVICE_VERSION_CURRENT = TSENTRY_SERVICE_V2; } /* Privilege operation scope */ diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/Status.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/Status.java index c93dad5c7..ed541d0d4 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/Status.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/Status.java @@ -27,6 +27,7 @@ import org.apache.sentry.provider.db.SentryAlreadyExistsException; import org.apache.sentry.provider.db.SentryInvalidInputException; import org.apache.sentry.provider.db.SentryNoSuchObjectException; +import org.apache.sentry.provider.db.SentryThriftAPIMismatchException; import org.apache.sentry.service.thrift.ServiceConstants.ThriftConstants; /** @@ -39,6 +40,7 @@ public enum Status { RUNTIME_ERROR(ThriftConstants.TSENTRY_STATUS_RUNTIME_ERROR), INVALID_INPUT(ThriftConstants.TSENTRY_STATUS_INVALID_INPUT), ACCESS_DENIED(ThriftConstants.TSENTRY_STATUS_ACCESS_DENIED), + THRIFT_VERSION_MISMATCH(ThriftConstants.TSENTRY_STATUS_THRIFT_VERSION_MISMATCH), UNKNOWN(-1) ; private int code; @@ -77,6 +79,9 @@ public static TSentryResponseStatus Create(Status value, String message) { public static TSentryResponseStatus InvalidInput(String message, Throwable t) { return Create(Status.INVALID_INPUT, message, t); } + public static TSentryResponseStatus THRIFT_VERSION_MISMATCH(String message, Throwable t) { + return Create(Status.THRIFT_VERSION_MISMATCH, message, t); + } public static TSentryResponseStatus Create(Status value, String message, @Nullable Throwable t) { TSentryResponseStatus status = new TSentryResponseStatus(); status.setValue(value.getCode()); @@ -106,6 +111,8 @@ public static void throwIfNotOk(TSentryResponseStatus thriftStatus) throw new SentryInvalidInputException(serverErrorToString(thriftStatus), thriftStatus.getMessage()); case ACCESS_DENIED: throw new SentryAccessDeniedException(serverErrorToString(thriftStatus), thriftStatus.getMessage()); + case THRIFT_VERSION_MISMATCH: + throw new SentryThriftAPIMismatchException(serverErrorToString(thriftStatus), thriftStatus.getMessage()); case UNKNOWN: throw new AssertionError(serverErrorToString(thriftStatus)); default: diff --git a/sentry-provider/sentry-provider-db/src/main/resources/001-SENTRY-327.postgres.sql b/sentry-provider/sentry-provider-db/src/main/resources/001-SENTRY-327.postgres.sql index 04353d178..1b670ec9b 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/001-SENTRY-327.postgres.sql +++ b/sentry-provider/sentry-provider-db/src/main/resources/001-SENTRY-327.postgres.sql @@ -1,2 +1,2 @@ -- SENTRY-327 -ALTER TABLE SENTRY_DB_PRIVILEGE ADD COLUMN WITH_GRANT_OPTION CHAR(1) NOT NULL DEFAULT 'N'; +ALTER TABLE "SENTRY_DB_PRIVILEGE" ADD COLUMN "WITH_GRANT_OPTION" CHAR(1) NOT NULL DEFAULT 'N'; diff --git a/sentry-provider/sentry-provider-db/src/main/resources/002-SENTRY-339.derby.sql b/sentry-provider/sentry-provider-db/src/main/resources/002-SENTRY-339.derby.sql index aceac06fd..647e9e284 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/002-SENTRY-339.derby.sql +++ b/sentry-provider/sentry-provider-db/src/main/resources/002-SENTRY-339.derby.sql @@ -3,3 +3,11 @@ DROP INDEX SENTRYPRIVILEGENAME; CREATE UNIQUE INDEX SENTRYPRIVILEGENAME ON SENTRY_DB_PRIVILEGE ("SERVER_NAME",DB_NAME,"TABLE_NAME",URI,"ACTION",WITH_GRANT_OPTION); ALTER TABLE SENTRY_DB_PRIVILEGE DROP COLUMN PRIVILEGE_NAME; + +ALTER TABLE SENTRY_DB_PRIVILEGE ALTER COLUMN DB_NAME SET DEFAULT '__NULL__'; +ALTER TABLE SENTRY_DB_PRIVILEGE ALTER COLUMN TABLE_NAME SET DEFAULT '__NULL__'; +ALTER TABLE SENTRY_DB_PRIVILEGE ALTER COLUMN URI SET DEFAULT '__NULL__'; + +UPDATE SENTRY_DB_PRIVILEGE SET DB_NAME = DEFAULT WHERE DB_NAME is null; +UPDATE SENTRY_DB_PRIVILEGE SET TABLE_NAME = DEFAULT WHERE TABLE_NAME is null; +UPDATE SENTRY_DB_PRIVILEGE SET URI = DEFAULT WHERE URI is null; diff --git a/sentry-provider/sentry-provider-db/src/main/resources/002-SENTRY-339.mysql.sql b/sentry-provider/sentry-provider-db/src/main/resources/002-SENTRY-339.mysql.sql index a786eccdb..cd4ec7c84 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/002-SENTRY-339.mysql.sql +++ b/sentry-provider/sentry-provider-db/src/main/resources/002-SENTRY-339.mysql.sql @@ -2,3 +2,12 @@ ALTER TABLE `SENTRY_DB_PRIVILEGE` DROP INDEX `SENTRY_DB_PRIV_PRIV_NAME_UNIQ`; ALTER TABLE `SENTRY_DB_PRIVILEGE` ADD UNIQUE `SENTRY_DB_PRIV_PRIV_NAME_UNIQ` (`SERVER_NAME`,`DB_NAME`,`TABLE_NAME`,`URI`(250),`ACTION`,`WITH_GRANT_OPTION`); ALTER TABLE `SENTRY_DB_PRIVILEGE` DROP `PRIVILEGE_NAME`; + +ALTER TABLE SENTRY_DB_PRIVILEGE ALTER COLUMN DB_NAME SET DEFAULT '__NULL__'; +ALTER TABLE SENTRY_DB_PRIVILEGE ALTER COLUMN TABLE_NAME SET DEFAULT '__NULL__'; +ALTER TABLE SENTRY_DB_PRIVILEGE ALTER COLUMN URI SET DEFAULT '__NULL__'; + +UPDATE SENTRY_DB_PRIVILEGE SET DB_NAME = DEFAULT WHERE DB_NAME is null; +UPDATE SENTRY_DB_PRIVILEGE SET TABLE_NAME = DEFAULT WHERE TABLE_NAME is null; +UPDATE SENTRY_DB_PRIVILEGE SET URI = DEFAULT WHERE URI is null; + diff --git a/sentry-provider/sentry-provider-db/src/main/resources/002-SENTRY-339.oracle.sql b/sentry-provider/sentry-provider-db/src/main/resources/002-SENTRY-339.oracle.sql index f64f69041..f5f596d1e 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/002-SENTRY-339.oracle.sql +++ b/sentry-provider/sentry-provider-db/src/main/resources/002-SENTRY-339.oracle.sql @@ -1,4 +1,13 @@ -- SENTRY-339 -ALTER TABLE SENTRY_DB_PRIVILEGE DROP CONSTRAINT "SENTRY_DB_PRIV_PRIV_NAME_UNIQ"; +ALTER TABLE SENTRY_DB_PRIVILEGE DROP CONSTRAINT "SENTRY_DB_PRIV_PRIV_NAME_UNIQ" DROP INDEX; ALTER TABLE SENTRY_DB_PRIVILEGE ADD CONSTRAINT "SENTRY_DB_PRIV_PRIV_NAME_UNIQ" UNIQUE ("SERVER_NAME","DB_NAME","TABLE_NAME","URI","ACTION","WITH_GRANT_OPTION"); ALTER TABLE SENTRY_DB_PRIVILEGE DROP COLUMN PRIVILEGE_NAME; + +ALTER TABLE SENTRY_DB_PRIVILEGE MODIFY DB_NAME DEFAULT '__NULL__'; +ALTER TABLE SENTRY_DB_PRIVILEGE MODIFY TABLE_NAME DEFAULT '__NULL__'; +ALTER TABLE SENTRY_DB_PRIVILEGE MODIFY URI DEFAULT '__NULL__'; + +UPDATE SENTRY_DB_PRIVILEGE SET DB_NAME = DEFAULT WHERE DB_NAME is null; +UPDATE SENTRY_DB_PRIVILEGE SET TABLE_NAME = DEFAULT WHERE TABLE_NAME is null; +UPDATE SENTRY_DB_PRIVILEGE SET URI = DEFAULT WHERE URI is null; + diff --git a/sentry-provider/sentry-provider-db/src/main/resources/002-SENTRY-339.postgres.sql b/sentry-provider/sentry-provider-db/src/main/resources/002-SENTRY-339.postgres.sql index 2c9867250..458e4477d 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/002-SENTRY-339.postgres.sql +++ b/sentry-provider/sentry-provider-db/src/main/resources/002-SENTRY-339.postgres.sql @@ -1,4 +1,13 @@ -- SENTRY-339 -ALTER TABLE SENTRY_DB_PRIVILEGE DROP CONSTRAINT "SENTRY_DB_PRIV_PRIV_NAME_UNIQ"; -ALTER TABLE SENTRY_DB_PRIVILEGE ADD CONSTRAINT "SENTRY_DB_PRIV_PRIV_NAME_UNIQ" UNIQUE ("SERVER_NAME","DB_NAME","TABLE_NAME","URI", "ACTION","WITH_GRANT_OPTION"); -ALTER TABLE SENTRY_DB_PRIVILEGE DROP COLUMN PRIVILEGE_NAME; +ALTER TABLE "SENTRY_DB_PRIVILEGE" DROP CONSTRAINT "SENTRY_DB_PRIV_PRIV_NAME_UNIQ"; +ALTER TABLE "SENTRY_DB_PRIVILEGE" ADD CONSTRAINT "SENTRY_DB_PRIV_PRIV_NAME_UNIQ" UNIQUE ("SERVER_NAME","DB_NAME","TABLE_NAME","URI", "ACTION","WITH_GRANT_OPTION"); +ALTER TABLE "SENTRY_DB_PRIVILEGE" DROP COLUMN "PRIVILEGE_NAME"; + +ALTER TABLE "SENTRY_DB_PRIVILEGE" ALTER COLUMN "DB_NAME" SET DEFAULT '__NULL__'; +AlTER TABLE "SENTRY_DB_PRIVILEGE" ALTER COLUMN "TABLE_NAME" SET DEFAULT '__NULL__'; +ALTER TABLE "SENTRY_DB_PRIVILEGE" ALTER COLUMN "URI" SET DEFAULT '__NULL__'; + +UPDATE "SENTRY_DB_PRIVILEGE" SET "DB_NAME" = DEFAULT where "DB_NAME" is null; +UPDATE "SENTRY_DB_PRIVILEGE" SET "TABLE_NAME" = DEFAULT where "TABLE_NAME" is null; +UPDATE "SENTRY_DB_PRIVILEGE" SET "URI" = DEFAULT where "URI" is null; + diff --git a/sentry-provider/sentry-provider-db/src/main/resources/003-SENTRY-380.postgres.sql b/sentry-provider/sentry-provider-db/src/main/resources/003-SENTRY-380.postgres.sql index e6fa26bbd..95a2ef169 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/003-SENTRY-380.postgres.sql +++ b/sentry-provider/sentry-provider-db/src/main/resources/003-SENTRY-380.postgres.sql @@ -1,7 +1,7 @@ -- SENTRY-380 -ALTER TABLE `SENTRY_DB_PRIVILEGE` DROP `GRANTOR_PRINCIPAL`; -ALTER TABLE `SENTRY_ROLE` DROP `GRANTOR_PRINCIPAL`; -ALTER TABLE `SENTRY_GROUP` DROP `GRANTOR_PRINCIPAL`; +ALTER TABLE "SENTRY_DB_PRIVILEGE" DROP "GRANTOR_PRINCIPAL"; +ALTER TABLE "SENTRY_ROLE" DROP "GRANTOR_PRINCIPAL"; +ALTER TABLE "SENTRY_GROUP" DROP "GRANTOR_PRINCIPAL"; -ALTER TABLE `SENTRY_ROLE_DB_PRIVILEGE_MAP` ADD `GRANTOR_PRINCIPAL` character varying(128); -ALTER TABLE `SENTRY_ROLE_GROUP_MAP` ADD `character varying(128); \ No newline at end of file +ALTER TABLE "SENTRY_ROLE_DB_PRIVILEGE_MAP" ADD "GRANTOR_PRINCIPAL" character varying(128); +ALTER TABLE "SENTRY_ROLE_GROUP_MAP" ADD "GRANTOR_PRINCIPAL" character varying(128); diff --git a/sentry-provider/sentry-provider-db/src/main/resources/004-SENTRY-74.derby.sql b/sentry-provider/sentry-provider-db/src/main/resources/004-SENTRY-74.derby.sql index b82e97f3d..da1f4d6a7 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/004-SENTRY-74.derby.sql +++ b/sentry-provider/sentry-provider-db/src/main/resources/004-SENTRY-74.derby.sql @@ -1,4 +1,4 @@ -- SENTRY-74 -ALTER TABLE SENTRY_DB_PRIVILEGE ADD COLUMN COLUMN_NAME VARCHAR(4000); +ALTER TABLE SENTRY_DB_PRIVILEGE ADD COLUMN COLUMN_NAME VARCHAR(4000) DEFAULT '__NULL__'; DROP INDEX SENTRYPRIVILEGENAME; CREATE UNIQUE INDEX SENTRYPRIVILEGENAME ON SENTRY_DB_PRIVILEGE ("SERVER_NAME",DB_NAME,"TABLE_NAME","COLUMN_NAME",URI,"ACTION",WITH_GRANT_OPTION); diff --git a/sentry-provider/sentry-provider-db/src/main/resources/004-SENTRY-74.mysql.sql b/sentry-provider/sentry-provider-db/src/main/resources/004-SENTRY-74.mysql.sql index c475a2c91..1419ca3e3 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/004-SENTRY-74.mysql.sql +++ b/sentry-provider/sentry-provider-db/src/main/resources/004-SENTRY-74.mysql.sql @@ -1,4 +1,4 @@ -- SENTRY-74 -ALTER TABLE `SENTRY_DB_PRIVILEGE` ADD `COLUMN_NAME` VARCHAR(128) DEFAULT NULL; +ALTER TABLE `SENTRY_DB_PRIVILEGE` ADD `COLUMN_NAME` VARCHAR(128) DEFAULT '__NULL__'; ALTER TABLE `SENTRY_DB_PRIVILEGE` DROP INDEX `SENTRY_DB_PRIV_PRIV_NAME_UNIQ`; ALTER TABLE `SENTRY_DB_PRIVILEGE` ADD UNIQUE `SENTRY_DB_PRIV_PRIV_NAME_UNIQ` (`SERVER_NAME`,`DB_NAME`,`TABLE_NAME`,`COLUMN_NAME`,`URI`(250),`ACTION`,`WITH_GRANT_OPTION`); diff --git a/sentry-provider/sentry-provider-db/src/main/resources/004-SENTRY-74.oracle.sql b/sentry-provider/sentry-provider-db/src/main/resources/004-SENTRY-74.oracle.sql index a78b76f1f..a70ae0a37 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/004-SENTRY-74.oracle.sql +++ b/sentry-provider/sentry-provider-db/src/main/resources/004-SENTRY-74.oracle.sql @@ -1,4 +1,4 @@ -- SENTRY-74 -ALTER TABLE SENTRY_DB_PRIVILEGE ADD COLUMN_NAME VARCHAR2(128) DEFAULT NULL; -ALTER TABLE SENTRY_DB_PRIVILEGE DROP CONSTRAINT "SENTRY_DB_PRIV_PRIV_NAME_UNIQ"; +ALTER TABLE SENTRY_DB_PRIVILEGE ADD COLUMN_NAME VARCHAR2(128) DEFAULT '__NULL__'; +ALTER TABLE SENTRY_DB_PRIVILEGE DROP CONSTRAINT "SENTRY_DB_PRIV_PRIV_NAME_UNIQ" DROP INDEX; ALTER TABLE SENTRY_DB_PRIVILEGE ADD CONSTRAINT "SENTRY_DB_PRIV_PRIV_NAME_UNIQ" UNIQUE ("SERVER_NAME","DB_NAME","TABLE_NAME","COLUMN_NAME","URI","ACTION","WITH_GRANT_OPTION"); diff --git a/sentry-provider/sentry-provider-db/src/main/resources/004-SENTRY-74.postgres.sql b/sentry-provider/sentry-provider-db/src/main/resources/004-SENTRY-74.postgres.sql index 74ed9c385..81bdfa38d 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/004-SENTRY-74.postgres.sql +++ b/sentry-provider/sentry-provider-db/src/main/resources/004-SENTRY-74.postgres.sql @@ -1,4 +1,4 @@ -- SENTRY-74 -ALTER TABLE SENTRY_DB_PRIVILEGE ADD COLUMN COLUMN_NAME character varying(128) DEFAULT NULL; -ALTER TABLE SENTRY_DB_PRIVILEGE DROP CONSTRAINT "SENTRY_DB_PRIV_PRIV_NAME_UNIQ"; -ALTER TABLE SENTRY_DB_PRIVILEGE ADD CONSTRAINT "SENTRY_DB_PRIV_PRIV_NAME_UNIQ" UNIQUE ("SERVER_NAME","DB_NAME","TABLE_NAME","COLUMN_NAME","URI", "ACTION","WITH_GRANT_OPTION"); +ALTER TABLE "SENTRY_DB_PRIVILEGE" ADD COLUMN "COLUMN_NAME" character varying(128) DEFAULT '__NULL__'; +ALTER TABLE "SENTRY_DB_PRIVILEGE" DROP CONSTRAINT "SENTRY_DB_PRIV_PRIV_NAME_UNIQ"; +ALTER TABLE "SENTRY_DB_PRIVILEGE" ADD CONSTRAINT "SENTRY_DB_PRIV_PRIV_NAME_UNIQ" UNIQUE ("SERVER_NAME","DB_NAME","TABLE_NAME","COLUMN_NAME","URI", "ACTION","WITH_GRANT_OPTION"); diff --git a/sentry-provider/sentry-provider-db/src/main/resources/005-SENTRY-398.derby.sql b/sentry-provider/sentry-provider-db/src/main/resources/005-SENTRY-398.derby.sql index ce4f41801..c038b8166 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/005-SENTRY-398.derby.sql +++ b/sentry-provider/sentry-provider-db/src/main/resources/005-SENTRY-398.derby.sql @@ -5,15 +5,15 @@ CREATE TABLE SENTRY_GM_PRIVILEGE "ACTION" VARCHAR(40), COMPONENT_NAME VARCHAR(400), CREATE_TIME BIGINT NOT NULL, - WITH_GRANT_OPTION CHAR(1), - RESOURCE_NAME_0 VARCHAR(400), - RESOURCE_NAME_1 VARCHAR(400), - RESOURCE_NAME_2 VARCHAR(400), - RESOURCE_NAME_3 VARCHAR(400), - RESOURCE_TYPE_0 VARCHAR(400), - RESOURCE_TYPE_1 VARCHAR(400), - RESOURCE_TYPE_2 VARCHAR(400), - RESOURCE_TYPE_3 VARCHAR(400), + WITH_GRANT_OPTION CHAR(1) NOT NULL DEFAULT 'N', + RESOURCE_NAME_0 VARCHAR(400) DEFAULT '__NULL__', + RESOURCE_NAME_1 VARCHAR(400) DEFAULT '__NULL__', + RESOURCE_NAME_2 VARCHAR(400) DEFAULT '__NULL__', + RESOURCE_NAME_3 VARCHAR(400) DEFAULT '__NULL__', + RESOURCE_TYPE_0 VARCHAR(400) DEFAULT '__NULL__', + RESOURCE_TYPE_1 VARCHAR(400) DEFAULT '__NULL__', + RESOURCE_TYPE_2 VARCHAR(400) DEFAULT '__NULL__', + RESOURCE_TYPE_3 VARCHAR(400) DEFAULT '__NULL__', "SCOPE" VARCHAR(40), SERVICE_NAME VARCHAR(400) ); diff --git a/sentry-provider/sentry-provider-db/src/main/resources/005-SENTRY-398.mysql.sql b/sentry-provider/sentry-provider-db/src/main/resources/005-SENTRY-398.mysql.sql index 6d054b895..920737f13 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/005-SENTRY-398.mysql.sql +++ b/sentry-provider/sentry-provider-db/src/main/resources/005-SENTRY-398.mysql.sql @@ -5,15 +5,15 @@ CREATE TABLE `SENTRY_GM_PRIVILEGE` `ACTION` VARCHAR(32) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL, `COMPONENT_NAME` VARCHAR(32) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL, `CREATE_TIME` BIGINT NOT NULL, - `WITH_GRANT_OPTION` CHAR(1) NOT NULL, - `RESOURCE_NAME_0` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, - `RESOURCE_NAME_1` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, - `RESOURCE_NAME_2` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, - `RESOURCE_NAME_3` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, - `RESOURCE_TYPE_0` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, - `RESOURCE_TYPE_1` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, - `RESOURCE_TYPE_2` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, - `RESOURCE_TYPE_3` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, + `WITH_GRANT_OPTION` CHAR(1) NOT NULL DEFAULT 'N', + `RESOURCE_NAME_0` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `RESOURCE_NAME_1` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `RESOURCE_NAME_2` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `RESOURCE_NAME_3` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `RESOURCE_TYPE_0` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `RESOURCE_TYPE_1` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `RESOURCE_TYPE_2` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `RESOURCE_TYPE_3` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', `SCOPE` VARCHAR(128) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL, `SERVICE_NAME` VARCHAR(64) BINARY CHARACTER SET utf8 COLLATE utf8_bin NOT NULL ) ENGINE=INNODB DEFAULT CHARSET=utf8; @@ -21,7 +21,8 @@ CREATE TABLE `SENTRY_GM_PRIVILEGE` ALTER TABLE `SENTRY_GM_PRIVILEGE` ADD CONSTRAINT `SENTRY_GM_PRIVILEGE_PK` PRIMARY KEY (`GM_PRIVILEGE_ID`); -- Constraints for table SENTRY_GM_PRIVILEGE for class(es) [org.apache.sentry.provider.db.service.model.MSentryGMPrivilege] -CREATE UNIQUE INDEX `GM_PRIVILEGE_INDEX` ON `SENTRY_GM_PRIVILEGE` (`COMPONENT_NAME`,`SERVICE_NAME`,`RESOURCE_NAME_0`,`RESOURCE_TYPE_0`,`RESOURCE_NAME_1`,`RESOURCE_TYPE_1`,`RESOURCE_NAME_2`,`RESOURCE_TYPE_2`,`RESOURCE_NAME_3`,`RESOURCE_TYPE_3`,`ACTION`,`WITH_GRANT_OPTION`); +ALTER TABLE `SENTRY_GM_PRIVILEGE` + ADD UNIQUE `GM_PRIVILEGE_UNIQUE` (`COMPONENT_NAME`,`SERVICE_NAME`,`RESOURCE_NAME_0`,`RESOURCE_TYPE_0`,`RESOURCE_NAME_1`,`RESOURCE_TYPE_1`,`RESOURCE_NAME_2`,`RESOURCE_TYPE_2`,`RESOURCE_NAME_3`,`RESOURCE_TYPE_3`,`ACTION`,`WITH_GRANT_OPTION`); ALTER TABLE `SENTRY_GM_PRIVILEGE` ADD INDEX `SENTRY_GM_PRIV_COMP_IDX` (`COMPONENT_NAME`); @@ -58,4 +59,4 @@ ALTER TABLE `SENTRY_ROLE_GM_PRIVILEGE_MAP` ALTER TABLE `SENTRY_ROLE_GM_PRIVILEGE_MAP` ADD CONSTRAINT `SEN_RL_GM_PRV_MAP_SN_DB_PRV_FK` - FOREIGN KEY (`GM_PRIVILEGE_ID`) REFERENCES `SENTRY_GM_PRIVILEGE`(`GM_PRIVILEGE_ID`); \ No newline at end of file + FOREIGN KEY (`GM_PRIVILEGE_ID`) REFERENCES `SENTRY_GM_PRIVILEGE`(`GM_PRIVILEGE_ID`); diff --git a/sentry-provider/sentry-provider-db/src/main/resources/005-SENTRY-398.oracle.sql b/sentry-provider/sentry-provider-db/src/main/resources/005-SENTRY-398.oracle.sql index 61c743afa..412bc4557 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/005-SENTRY-398.oracle.sql +++ b/sentry-provider/sentry-provider-db/src/main/resources/005-SENTRY-398.oracle.sql @@ -2,19 +2,19 @@ CREATE TABLE "SENTRY_GM_PRIVILEGE" ( "GM_PRIVILEGE_ID" NUMBER NOT NULL, "COMPONENT_NAME" VARCHAR2(32) NOT NULL, - "SERVICE_NAME" VARCHAR2(64) NOT NULL, - "RESOURCE_NAME_0" VARCHAR2(64) NULL, - "RESOURCE_NAME_1" VARCHAR2(64) NULL, - "RESOURCE_NAME_2" VARCHAR2(64) NULL, - "RESOURCE_NAME_3" VARCHAR2(64) NULL, - "RESOURCE_TYPE_0" VARCHAR2(64) NULL, - "RESOURCE_TYPE_1" VARCHAR2(64) NULL, - "RESOURCE_TYPE_2" VARCHAR2(64) NULL, - "RESOURCE_TYPE_3" VARCHAR2(64) NULL, - "ACTION" VARCHAR2(32) NOT NULL, - "scope" VARCHAR2(128) NOT NULL, "CREATE_TIME" NUMBER NOT NULL, - "WITH_GRANT_OPTION" CHAR(1) NOT NULL + "WITH_GRANT_OPTION" CHAR(1) DEFAULT 'N' NOT NULL, + "RESOURCE_NAME_0" VARCHAR2(64) DEFAULT '__NULL__', + "RESOURCE_NAME_1" VARCHAR2(64) DEFAULT '__NULL__', + "RESOURCE_NAME_2" VARCHAR2(64) DEFAULT '__NULL__', + "RESOURCE_NAME_3" VARCHAR2(64) DEFAULT '__NULL__', + "RESOURCE_TYPE_0" VARCHAR2(64) DEFAULT '__NULL__', + "RESOURCE_TYPE_1" VARCHAR2(64) DEFAULT '__NULL__', + "RESOURCE_TYPE_2" VARCHAR2(64) DEFAULT '__NULL__', + "RESOURCE_TYPE_3" VARCHAR2(64) DEFAULT '__NULL__', + "ACTION" VARCHAR2(32) NOT NULL, + "SCOPE" VARCHAR2(128) NOT NULL, + "SERVICE_NAME" VARCHAR2(64) NOT NULL ); ALTER TABLE "SENTRY_GM_PRIVILEGE" @@ -52,4 +52,4 @@ ALTER TABLE "SENTRY_ROLE_GM_PRIVILEGE_MAP" ALTER TABLE "SENTRY_ROLE_GM_PRIVILEGE_MAP" ADD CONSTRAINT "SEN_RL_GM_PRV_MAP_SN_DB_PRV_FK" - FOREIGN KEY ("GM_PRIVILEGE_ID") REFERENCES "SENTRY_GM_PRIVILEGE"("GM_PRIVILEGE_ID") INITIALLY DEFERRED; \ No newline at end of file + FOREIGN KEY ("GM_PRIVILEGE_ID") REFERENCES "SENTRY_GM_PRIVILEGE"("GM_PRIVILEGE_ID") INITIALLY DEFERRED; diff --git a/sentry-provider/sentry-provider-db/src/main/resources/005-SENTRY-398.postgres.sql b/sentry-provider/sentry-provider-db/src/main/resources/005-SENTRY-398.postgres.sql index 54c4c012f..e9e165572 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/005-SENTRY-398.postgres.sql +++ b/sentry-provider/sentry-provider-db/src/main/resources/005-SENTRY-398.postgres.sql @@ -2,19 +2,19 @@ CREATE TABLE "SENTRY_GM_PRIVILEGE" ( "GM_PRIVILEGE_ID" BIGINT NOT NULL, "COMPONENT_NAME" character varying(32) NOT NULL, - "SERVICE_NAME" character varying(64) NOT NULL, - "RESOURCE_NAME_0" character varying(64) DEFAULT NULL::character varying, - "RESOURCE_NAME_1" character varying(64) DEFAULT NULL::character varying, - "RESOURCE_NAME_2" character varying(64) DEFAULT NULL::character varying, - "RESOURCE_NAME_3" character varying(64) DEFAULT NULL::character varying, - "RESOURCE_TYPE_0" character varying(64) DEFAULT NULL::character varying, - "RESOURCE_TYPE_1" character varying(64) DEFAULT NULL::character varying, - "RESOURCE_TYPE_2" character varying(64) DEFAULT NULL::character varying, - "RESOURCE_TYPE_3" character varying(64) DEFAULT NULL::character varying, - "ACTION" character varying(32) NOT NULL, - "scope" character varying(128) NOT NULL, "CREATE_TIME" BIGINT NOT NULL, - "WITH_GRANT_OPTION" CHAR(1) NOT NULL + "WITH_GRANT_OPTION" CHAR(1) NOT NULL DEFAULT 'N', + "RESOURCE_NAME_0" character varying(64) DEFAULT '__NULL__', + "RESOURCE_NAME_1" character varying(64) DEFAULT '__NULL__', + "RESOURCE_NAME_2" character varying(64) DEFAULT '__NULL__', + "RESOURCE_NAME_3" character varying(64) DEFAULT '__NULL__', + "RESOURCE_TYPE_0" character varying(64) DEFAULT '__NULL__', + "RESOURCE_TYPE_1" character varying(64) DEFAULT '__NULL__', + "RESOURCE_TYPE_2" character varying(64) DEFAULT '__NULL__', + "RESOURCE_TYPE_3" character varying(64) DEFAULT '__NULL__', + "ACTION" character varying(32) NOT NULL, + "SCOPE" character varying(128) NOT NULL, + "SERVICE_NAME" character varying(64) NOT NULL ); ALTER TABLE ONLY "SENTRY_GM_PRIVILEGE" ADD CONSTRAINT "SENTRY_GM_PRIV_PK" PRIMARY KEY ("GM_PRIVILEGE_ID"); @@ -51,4 +51,4 @@ ALTER TABLE ONLY "SENTRY_ROLE_GM_PRIVILEGE_MAP" ALTER TABLE ONLY "SENTRY_ROLE_GM_PRIVILEGE_MAP" ADD CONSTRAINT "SEN_RL_GM_PRV_MAP_SN_DB_PRV_FK" - FOREIGN KEY ("GM_PRIVILEGE_ID") REFERENCES "SENTRY_GM_PRIVILEGE"("GM_PRIVILEGE_ID") DEFERRABLE; \ No newline at end of file + FOREIGN KEY ("GM_PRIVILEGE_ID") REFERENCES "SENTRY_GM_PRIVILEGE"("GM_PRIVILEGE_ID") DEFERRABLE; diff --git a/sentry-provider/sentry-provider-db/src/main/resources/sentry-db2-1.6.0.sql b/sentry-provider/sentry-provider-db/src/main/resources/sentry-db2-1.6.0.sql new file mode 100644 index 000000000..0f8f0af34 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/resources/sentry-db2-1.6.0.sql @@ -0,0 +1,155 @@ +--Licensed to the Apache Software Foundation (ASF) under one or more +--contributor license agreements. See the NOTICE file distributed with +--this work for additional information regarding copyright ownership. +--The ASF licenses this file to You under the Apache License, Version 2.0 +--(the "License"); you may not use this file except in compliance with +--the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +--Unless required by applicable law or agreed to in writing, software +--distributed under the License is distributed on an "AS IS" BASIS, +--WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +--See the License for the specific language governing permissions and +--limitations under the License. + +-- Table SENTRY_DB_PRIVILEGE for classes [org.apache.sentry.provider.db.service.model.MSentryPrivilege] +CREATE TABLE SENTRY_DB_PRIVILEGE +( + DB_PRIVILEGE_ID BIGINT NOT NULL generated always as identity (start with 1), + URI VARCHAR(4000), + "ACTION" VARCHAR(40), + CREATE_TIME BIGINT NOT NULL, + DB_NAME VARCHAR(4000), + PRIVILEGE_SCOPE VARCHAR(40), + "SERVER_NAME" VARCHAR(4000), + "TABLE_NAME" VARCHAR(4000), + "COLUMN_NAME" VARCHAR(4000), + WITH_GRANT_OPTION CHAR(1) NOT NULL +); + +ALTER TABLE SENTRY_DB_PRIVILEGE ADD CONSTRAINT SENTRY_DB_PRIVILEGE_PK PRIMARY KEY (DB_PRIVILEGE_ID); + +-- Table SENTRY_ROLE for classes [org.apache.sentry.provider.db.service.model.MSentryRole] +CREATE TABLE SENTRY_ROLE +( + ROLE_ID BIGINT NOT NULL generated always as identity (start with 1), + CREATE_TIME BIGINT NOT NULL, + ROLE_NAME VARCHAR(128) +); + +ALTER TABLE SENTRY_ROLE ADD CONSTRAINT SENTRY_ROLE_PK PRIMARY KEY (ROLE_ID); + +-- Table SENTRY_GROUP for classes [org.apache.sentry.provider.db.service.model.MSentryGroup] +CREATE TABLE SENTRY_GROUP +( + GROUP_ID BIGINT NOT NULL generated always as identity (start with 1), + CREATE_TIME BIGINT NOT NULL, + GROUP_NAME VARCHAR(128) +); + +ALTER TABLE SENTRY_GROUP ADD CONSTRAINT SENTRY_GROUP_PK PRIMARY KEY (GROUP_ID); + +-- Table SENTRY_ROLE_GROUP_MAP for join relationship +CREATE TABLE SENTRY_ROLE_GROUP_MAP +( + GROUP_ID BIGINT NOT NULL, + ROLE_ID BIGINT NOT NULL, + GRANTOR_PRINCIPAL VARCHAR(128) +); + +ALTER TABLE SENTRY_ROLE_GROUP_MAP ADD CONSTRAINT SENTRY_ROLE_GROUP_MAP_PK PRIMARY KEY (GROUP_ID,ROLE_ID); + +-- Table SENTRY_ROLE_DB_PRIVILEGE_MAP for join relationship +CREATE TABLE SENTRY_ROLE_DB_PRIVILEGE_MAP +( + ROLE_ID BIGINT NOT NULL, + DB_PRIVILEGE_ID BIGINT NOT NULL, + GRANTOR_PRINCIPAL VARCHAR(128) +); + +ALTER TABLE SENTRY_ROLE_DB_PRIVILEGE_MAP ADD CONSTRAINT SENTRY_ROLE_DB_PRIVILEGE_MAP_PK PRIMARY KEY (ROLE_ID,DB_PRIVILEGE_ID); + +CREATE TABLE "SENTRY_VERSION" ( + VER_ID BIGINT NOT NULL, + SCHEMA_VERSION VARCHAR(127), + VERSION_COMMENT VARCHAR(255) +); + +ALTER TABLE SENTRY_VERSION ADD CONSTRAINT SENTRY_VERSION_PK PRIMARY KEY (VER_ID); + +-- Constraints for table SENTRY_DB_PRIVILEGE for class(es) [org.apache.sentry.provider.db.service.model.MSentryPrivilege] +CREATE UNIQUE INDEX SENTRYPRIVILEGENAME ON SENTRY_DB_PRIVILEGE ("SERVER_NAME",DB_NAME,"TABLE_NAME","COLUMN_NAME",URI,"ACTION",WITH_GRANT_OPTION); + + +-- Constraints for table SENTRY_ROLE for class(es) [org.apache.sentry.provider.db.service.model.MSentryRole] +CREATE UNIQUE INDEX SENTRYROLENAME ON SENTRY_ROLE (ROLE_NAME); + + +-- Constraints for table SENTRY_GROUP for class(es) [org.apache.sentry.provider.db.service.model.MSentryGroup] +CREATE UNIQUE INDEX SENTRYGROUPNAME ON SENTRY_GROUP (GROUP_NAME); + + +-- Constraints for table SENTRY_ROLE_GROUP_MAP +CREATE INDEX SENTRY_ROLE_GROUP_MAP_N49 ON SENTRY_ROLE_GROUP_MAP (GROUP_ID); + +CREATE INDEX SENTRY_ROLE_GROUP_MAP_N50 ON SENTRY_ROLE_GROUP_MAP (ROLE_ID); + +ALTER TABLE SENTRY_ROLE_GROUP_MAP ADD CONSTRAINT SENTRY_ROLE_GROUP_MAP_FK2 FOREIGN KEY (ROLE_ID) REFERENCES SENTRY_ROLE (ROLE_ID) ; + +ALTER TABLE SENTRY_ROLE_GROUP_MAP ADD CONSTRAINT SENTRY_ROLE_GROUP_MAP_FK1 FOREIGN KEY (GROUP_ID) REFERENCES SENTRY_GROUP (GROUP_ID) ; + + +-- Constraints for table SENTRY_ROLE_DB_PRIVILEGE_MAP +CREATE INDEX SENTRY_ROLE_DB_PRIVILEGE_MAP_N50 ON SENTRY_ROLE_DB_PRIVILEGE_MAP (ROLE_ID); + +CREATE INDEX SENTRY_ROLE_DB_PRIVILEGE_MAP_N49 ON SENTRY_ROLE_DB_PRIVILEGE_MAP (DB_PRIVILEGE_ID); + +ALTER TABLE SENTRY_ROLE_DB_PRIVILEGE_MAP ADD CONSTRAINT SENTRY_ROLE_DB_PRIVILEGE_MAP_FK2 FOREIGN KEY (DB_PRIVILEGE_ID) REFERENCES SENTRY_DB_PRIVILEGE (DB_PRIVILEGE_ID) ; + +ALTER TABLE SENTRY_ROLE_DB_PRIVILEGE_MAP ADD CONSTRAINT SENTRY_ROLE_DB_PRIVILEGE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES SENTRY_ROLE (ROLE_ID) ; + +INSERT INTO SENTRY_VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '1.6.0', 'Sentry release version 1.6.0'); + +-- Generic model +-- Table SENTRY_GM_PRIVILEGE for classes [org.apache.sentry.provider.db.service.model.MSentryGMPrivilege] +CREATE TABLE SENTRY_GM_PRIVILEGE +( + GM_PRIVILEGE_ID BIGINT NOT NULL, + "ACTION" VARCHAR(40), + COMPONENT_NAME VARCHAR(400), + CREATE_TIME BIGINT NOT NULL, + WITH_GRANT_OPTION CHAR(1), + RESOURCE_NAME_0 VARCHAR(400), + RESOURCE_NAME_1 VARCHAR(400), + RESOURCE_NAME_2 VARCHAR(400), + RESOURCE_NAME_3 VARCHAR(400), + RESOURCE_TYPE_0 VARCHAR(400), + RESOURCE_TYPE_1 VARCHAR(400), + RESOURCE_TYPE_2 VARCHAR(400), + RESOURCE_TYPE_3 VARCHAR(400), + "SCOPE" VARCHAR(40), + SERVICE_NAME VARCHAR(400) +); +-- Primary key(GM_PRIVILEGE_ID) +ALTER TABLE SENTRY_GM_PRIVILEGE ADD CONSTRAINT SENTRY_GM_PRIVILEGE_PK PRIMARY KEY (GM_PRIVILEGE_ID); + +-- Constraints for table SENTRY_GM_PRIVILEGE for class(es) [org.apache.sentry.provider.db.service.model.MSentryGMPrivilege] +CREATE UNIQUE INDEX GM_PRIVILEGE_INDEX ON SENTRY_GM_PRIVILEGE (COMPONENT_NAME,SERVICE_NAME,RESOURCE_NAME_0,RESOURCE_TYPE_0,RESOURCE_NAME_1,RESOURCE_TYPE_1,RESOURCE_NAME_2,RESOURCE_TYPE_2,RESOURCE_NAME_3,RESOURCE_TYPE_3,"ACTION",WITH_GRANT_OPTION); + +-- Table SENTRY_ROLE_GM_PRIVILEGE_MAP for join relationship +CREATE TABLE SENTRY_ROLE_GM_PRIVILEGE_MAP +( + ROLE_ID BIGINT NOT NULL, + GM_PRIVILEGE_ID BIGINT NOT NULL +); +ALTER TABLE SENTRY_ROLE_GM_PRIVILEGE_MAP ADD CONSTRAINT SENTRY_ROLE_GM_PRIVILEGE_MAP_PK PRIMARY KEY (ROLE_ID,GM_PRIVILEGE_ID); + +-- Constraints for table SENTRY_ROLE_GM_PRIVILEGE_MAP +CREATE INDEX SENTRY_ROLE_GM_PRIVILEGE_MAP_N50 ON SENTRY_ROLE_GM_PRIVILEGE_MAP (ROLE_ID); + +CREATE INDEX SENTRY_ROLE_GM_PRIVILEGE_MAP_N49 ON SENTRY_ROLE_GM_PRIVILEGE_MAP (GM_PRIVILEGE_ID); + +ALTER TABLE SENTRY_ROLE_GM_PRIVILEGE_MAP ADD CONSTRAINT SENTRY_ROLE_GM_PRIVILEGE_MAP_FK2 FOREIGN KEY (GM_PRIVILEGE_ID) REFERENCES SENTRY_GM_PRIVILEGE (GM_PRIVILEGE_ID); + +ALTER TABLE SENTRY_ROLE_GM_PRIVILEGE_MAP ADD CONSTRAINT SENTRY_ROLE_GM_PRIVILEGE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES SENTRY_ROLE (ROLE_ID); diff --git a/sentry-provider/sentry-provider-db/src/main/resources/sentry-derby-1.5.0.sql b/sentry-provider/sentry-provider-db/src/main/resources/sentry-derby-1.5.0.sql index 483aa7ebf..89d73bb9a 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/sentry-derby-1.5.0.sql +++ b/sentry-provider/sentry-provider-db/src/main/resources/sentry-derby-1.5.0.sql @@ -17,14 +17,14 @@ CREATE TABLE SENTRY_DB_PRIVILEGE ( DB_PRIVILEGE_ID BIGINT NOT NULL generated always as identity (start with 1), - URI VARCHAR(4000), + URI VARCHAR(4000) DEFAULT '__NULL__', "ACTION" VARCHAR(40), CREATE_TIME BIGINT NOT NULL, - DB_NAME VARCHAR(4000), + DB_NAME VARCHAR(4000) DEFAULT '__NULL__', PRIVILEGE_SCOPE VARCHAR(40), "SERVER_NAME" VARCHAR(4000), - "TABLE_NAME" VARCHAR(4000), - "COLUMN_NAME" VARCHAR(4000), + "TABLE_NAME" VARCHAR(4000) DEFAULT '__NULL__', + "COLUMN_NAME" VARCHAR(4000) DEFAULT '__NULL__', WITH_GRANT_OPTION CHAR(1) NOT NULL ); @@ -120,14 +120,14 @@ CREATE TABLE SENTRY_GM_PRIVILEGE COMPONENT_NAME VARCHAR(400), CREATE_TIME BIGINT NOT NULL, WITH_GRANT_OPTION CHAR(1), - RESOURCE_NAME_0 VARCHAR(400), - RESOURCE_NAME_1 VARCHAR(400), - RESOURCE_NAME_2 VARCHAR(400), - RESOURCE_NAME_3 VARCHAR(400), - RESOURCE_TYPE_0 VARCHAR(400), - RESOURCE_TYPE_1 VARCHAR(400), - RESOURCE_TYPE_2 VARCHAR(400), - RESOURCE_TYPE_3 VARCHAR(400), + RESOURCE_NAME_0 VARCHAR(400) DEFAULT '__NULL__', + RESOURCE_NAME_1 VARCHAR(400) DEFAULT '__NULL__', + RESOURCE_NAME_2 VARCHAR(400) DEFAULT '__NULL__', + RESOURCE_NAME_3 VARCHAR(400) DEFAULT '__NULL__', + RESOURCE_TYPE_0 VARCHAR(400) DEFAULT '__NULL__', + RESOURCE_TYPE_1 VARCHAR(400) DEFAULT '__NULL__', + RESOURCE_TYPE_2 VARCHAR(400) DEFAULT '__NULL__', + RESOURCE_TYPE_3 VARCHAR(400) DEFAULT '__NULL__', "SCOPE" VARCHAR(40), SERVICE_NAME VARCHAR(400) ); diff --git a/sentry-provider/sentry-provider-db/src/main/resources/sentry-derby-1.6.0.sql b/sentry-provider/sentry-provider-db/src/main/resources/sentry-derby-1.6.0.sql new file mode 100644 index 000000000..9ceb4c53b --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/resources/sentry-derby-1.6.0.sql @@ -0,0 +1,155 @@ +--Licensed to the Apache Software Foundation (ASF) under one or more +--contributor license agreements. See the NOTICE file distributed with +--this work for additional information regarding copyright ownership. +--The ASF licenses this file to You under the Apache License, Version 2.0 +--(the "License"); you may not use this file except in compliance with +--the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +--Unless required by applicable law or agreed to in writing, software +--distributed under the License is distributed on an "AS IS" BASIS, +--WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +--See the License for the specific language governing permissions and +--limitations under the License. + +-- Table SENTRY_DB_PRIVILEGE for classes [org.apache.sentry.provider.db.service.model.MSentryPrivilege] +CREATE TABLE SENTRY_DB_PRIVILEGE +( + DB_PRIVILEGE_ID BIGINT NOT NULL generated always as identity (start with 1), + URI VARCHAR(4000) DEFAULT '__NULL__', + "ACTION" VARCHAR(40), + CREATE_TIME BIGINT NOT NULL, + DB_NAME VARCHAR(4000) DEFAULT '__NULL__', + PRIVILEGE_SCOPE VARCHAR(40), + "SERVER_NAME" VARCHAR(4000), + "TABLE_NAME" VARCHAR(4000) DEFAULT '__NULL__', + "COLUMN_NAME" VARCHAR(4000) DEFAULT '__NULL__', + WITH_GRANT_OPTION CHAR(1) NOT NULL +); + +ALTER TABLE SENTRY_DB_PRIVILEGE ADD CONSTRAINT SENTRY_DB_PRIVILEGE_PK PRIMARY KEY (DB_PRIVILEGE_ID); + +-- Table SENTRY_ROLE for classes [org.apache.sentry.provider.db.service.model.MSentryRole] +CREATE TABLE SENTRY_ROLE +( + ROLE_ID BIGINT NOT NULL generated always as identity (start with 1), + CREATE_TIME BIGINT NOT NULL, + ROLE_NAME VARCHAR(128) +); + +ALTER TABLE SENTRY_ROLE ADD CONSTRAINT SENTRY_ROLE_PK PRIMARY KEY (ROLE_ID); + +-- Table SENTRY_GROUP for classes [org.apache.sentry.provider.db.service.model.MSentryGroup] +CREATE TABLE SENTRY_GROUP +( + GROUP_ID BIGINT NOT NULL generated always as identity (start with 1), + CREATE_TIME BIGINT NOT NULL, + GROUP_NAME VARCHAR(128) +); + +ALTER TABLE SENTRY_GROUP ADD CONSTRAINT SENTRY_GROUP_PK PRIMARY KEY (GROUP_ID); + +-- Table SENTRY_ROLE_GROUP_MAP for join relationship +CREATE TABLE SENTRY_ROLE_GROUP_MAP +( + GROUP_ID BIGINT NOT NULL, + ROLE_ID BIGINT NOT NULL, + GRANTOR_PRINCIPAL VARCHAR(128) +); + +ALTER TABLE SENTRY_ROLE_GROUP_MAP ADD CONSTRAINT SENTRY_ROLE_GROUP_MAP_PK PRIMARY KEY (GROUP_ID,ROLE_ID); + +-- Table SENTRY_ROLE_DB_PRIVILEGE_MAP for join relationship +CREATE TABLE SENTRY_ROLE_DB_PRIVILEGE_MAP +( + ROLE_ID BIGINT NOT NULL, + DB_PRIVILEGE_ID BIGINT NOT NULL, + GRANTOR_PRINCIPAL VARCHAR(128) +); + +ALTER TABLE SENTRY_ROLE_DB_PRIVILEGE_MAP ADD CONSTRAINT SENTRY_ROLE_DB_PRIVILEGE_MAP_PK PRIMARY KEY (ROLE_ID,DB_PRIVILEGE_ID); + +CREATE TABLE "SENTRY_VERSION" ( + VER_ID BIGINT NOT NULL, + SCHEMA_VERSION VARCHAR(127), + VERSION_COMMENT VARCHAR(255) +); + +ALTER TABLE SENTRY_VERSION ADD CONSTRAINT SENTRY_VERSION_PK PRIMARY KEY (VER_ID); + +-- Constraints for table SENTRY_DB_PRIVILEGE for class(es) [org.apache.sentry.provider.db.service.model.MSentryPrivilege] +CREATE UNIQUE INDEX SENTRYPRIVILEGENAME ON SENTRY_DB_PRIVILEGE ("SERVER_NAME",DB_NAME,"TABLE_NAME","COLUMN_NAME",URI,"ACTION",WITH_GRANT_OPTION); + + +-- Constraints for table SENTRY_ROLE for class(es) [org.apache.sentry.provider.db.service.model.MSentryRole] +CREATE UNIQUE INDEX SENTRYROLENAME ON SENTRY_ROLE (ROLE_NAME); + + +-- Constraints for table SENTRY_GROUP for class(es) [org.apache.sentry.provider.db.service.model.MSentryGroup] +CREATE UNIQUE INDEX SENTRYGROUPNAME ON SENTRY_GROUP (GROUP_NAME); + + +-- Constraints for table SENTRY_ROLE_GROUP_MAP +CREATE INDEX SENTRY_ROLE_GROUP_MAP_N49 ON SENTRY_ROLE_GROUP_MAP (GROUP_ID); + +CREATE INDEX SENTRY_ROLE_GROUP_MAP_N50 ON SENTRY_ROLE_GROUP_MAP (ROLE_ID); + +ALTER TABLE SENTRY_ROLE_GROUP_MAP ADD CONSTRAINT SENTRY_ROLE_GROUP_MAP_FK2 FOREIGN KEY (ROLE_ID) REFERENCES SENTRY_ROLE (ROLE_ID) ; + +ALTER TABLE SENTRY_ROLE_GROUP_MAP ADD CONSTRAINT SENTRY_ROLE_GROUP_MAP_FK1 FOREIGN KEY (GROUP_ID) REFERENCES SENTRY_GROUP (GROUP_ID) ; + + +-- Constraints for table SENTRY_ROLE_DB_PRIVILEGE_MAP +CREATE INDEX SENTRY_ROLE_DB_PRIVILEGE_MAP_N50 ON SENTRY_ROLE_DB_PRIVILEGE_MAP (ROLE_ID); + +CREATE INDEX SENTRY_ROLE_DB_PRIVILEGE_MAP_N49 ON SENTRY_ROLE_DB_PRIVILEGE_MAP (DB_PRIVILEGE_ID); + +ALTER TABLE SENTRY_ROLE_DB_PRIVILEGE_MAP ADD CONSTRAINT SENTRY_ROLE_DB_PRIVILEGE_MAP_FK2 FOREIGN KEY (DB_PRIVILEGE_ID) REFERENCES SENTRY_DB_PRIVILEGE (DB_PRIVILEGE_ID) ; + +ALTER TABLE SENTRY_ROLE_DB_PRIVILEGE_MAP ADD CONSTRAINT SENTRY_ROLE_DB_PRIVILEGE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES SENTRY_ROLE (ROLE_ID) ; + +INSERT INTO SENTRY_VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '1.6.0', 'Sentry release version 1.6.0'); + +-- Generic Model +-- Table SENTRY_GM_PRIVILEGE for classes [org.apache.sentry.provider.db.service.model.MSentryGMPrivilege] +CREATE TABLE SENTRY_GM_PRIVILEGE +( + GM_PRIVILEGE_ID BIGINT NOT NULL, + "ACTION" VARCHAR(40), + COMPONENT_NAME VARCHAR(400), + CREATE_TIME BIGINT NOT NULL, + WITH_GRANT_OPTION CHAR(1), + RESOURCE_NAME_0 VARCHAR(400) DEFAULT '__NULL__', + RESOURCE_NAME_1 VARCHAR(400) DEFAULT '__NULL__', + RESOURCE_NAME_2 VARCHAR(400) DEFAULT '__NULL__', + RESOURCE_NAME_3 VARCHAR(400) DEFAULT '__NULL__', + RESOURCE_TYPE_0 VARCHAR(400) DEFAULT '__NULL__', + RESOURCE_TYPE_1 VARCHAR(400) DEFAULT '__NULL__', + RESOURCE_TYPE_2 VARCHAR(400) DEFAULT '__NULL__', + RESOURCE_TYPE_3 VARCHAR(400) DEFAULT '__NULL__', + "SCOPE" VARCHAR(40), + SERVICE_NAME VARCHAR(400) +); +-- Primary key(GM_PRIVILEGE_ID) +ALTER TABLE SENTRY_GM_PRIVILEGE ADD CONSTRAINT SENTRY_GM_PRIVILEGE_PK PRIMARY KEY (GM_PRIVILEGE_ID); + +-- Constraints for table SENTRY_GM_PRIVILEGE for class(es) [org.apache.sentry.provider.db.service.model.MSentryGMPrivilege] +CREATE UNIQUE INDEX GM_PRIVILEGE_INDEX ON SENTRY_GM_PRIVILEGE (COMPONENT_NAME,SERVICE_NAME,RESOURCE_NAME_0,RESOURCE_TYPE_0,RESOURCE_NAME_1,RESOURCE_TYPE_1,RESOURCE_NAME_2,RESOURCE_TYPE_2,RESOURCE_NAME_3,RESOURCE_TYPE_3,"ACTION",WITH_GRANT_OPTION); + +-- Table SENTRY_ROLE_GM_PRIVILEGE_MAP for join relationship +CREATE TABLE SENTRY_ROLE_GM_PRIVILEGE_MAP +( + ROLE_ID BIGINT NOT NULL, + GM_PRIVILEGE_ID BIGINT NOT NULL +); +ALTER TABLE SENTRY_ROLE_GM_PRIVILEGE_MAP ADD CONSTRAINT SENTRY_ROLE_GM_PRIVILEGE_MAP_PK PRIMARY KEY (ROLE_ID,GM_PRIVILEGE_ID); + +-- Constraints for table SENTRY_ROLE_GM_PRIVILEGE_MAP +CREATE INDEX SENTRY_ROLE_GM_PRIVILEGE_MAP_N50 ON SENTRY_ROLE_GM_PRIVILEGE_MAP (ROLE_ID); + +CREATE INDEX SENTRY_ROLE_GM_PRIVILEGE_MAP_N49 ON SENTRY_ROLE_GM_PRIVILEGE_MAP (GM_PRIVILEGE_ID); + +ALTER TABLE SENTRY_ROLE_GM_PRIVILEGE_MAP ADD CONSTRAINT SENTRY_ROLE_GM_PRIVILEGE_MAP_FK2 FOREIGN KEY (GM_PRIVILEGE_ID) REFERENCES SENTRY_GM_PRIVILEGE (GM_PRIVILEGE_ID); + +ALTER TABLE SENTRY_ROLE_GM_PRIVILEGE_MAP ADD CONSTRAINT SENTRY_ROLE_GM_PRIVILEGE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES SENTRY_ROLE (ROLE_ID); diff --git a/sentry-provider/sentry-provider-db/src/main/resources/sentry-mysql-1.5.0.sql b/sentry-provider/sentry-provider-db/src/main/resources/sentry-mysql-1.5.0.sql index e190c4719..d5d2e0a5b 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/sentry-mysql-1.5.0.sql +++ b/sentry-provider/sentry-provider-db/src/main/resources/sentry-mysql-1.5.0.sql @@ -29,10 +29,10 @@ CREATE TABLE `SENTRY_DB_PRIVILEGE` ( `DB_PRIVILEGE_ID` BIGINT NOT NULL, `PRIVILEGE_SCOPE` VARCHAR(32) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL, `SERVER_NAME` VARCHAR(128) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL, - `DB_NAME` VARCHAR(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, - `TABLE_NAME` VARCHAR(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, - `COLUMN_NAME` VARCHAR(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, - `URI` VARCHAR(4000) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, + `DB_NAME` VARCHAR(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `TABLE_NAME` VARCHAR(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `COLUMN_NAME` VARCHAR(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `URI` VARCHAR(4000) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', `ACTION` VARCHAR(128) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL, `CREATE_TIME` BIGINT NOT NULL, `WITH_GRANT_OPTION` CHAR(1) NOT NULL @@ -137,14 +137,14 @@ CREATE TABLE `SENTRY_GM_PRIVILEGE` `COMPONENT_NAME` VARCHAR(32) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL, `CREATE_TIME` BIGINT NOT NULL, `WITH_GRANT_OPTION` CHAR(1) NOT NULL, - `RESOURCE_NAME_0` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, - `RESOURCE_NAME_1` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, - `RESOURCE_NAME_2` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, - `RESOURCE_NAME_3` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, - `RESOURCE_TYPE_0` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, - `RESOURCE_TYPE_1` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, - `RESOURCE_TYPE_2` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, - `RESOURCE_TYPE_3` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, + `RESOURCE_NAME_0` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `RESOURCE_NAME_1` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `RESOURCE_NAME_2` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `RESOURCE_NAME_3` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `RESOURCE_TYPE_0` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `RESOURCE_TYPE_1` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `RESOURCE_TYPE_2` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `RESOURCE_TYPE_3` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', `SCOPE` VARCHAR(128) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL, `SERVICE_NAME` VARCHAR(64) BINARY CHARACTER SET utf8 COLLATE utf8_bin NOT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8; diff --git a/sentry-provider/sentry-provider-db/src/main/resources/sentry-mysql-1.6.0.sql b/sentry-provider/sentry-provider-db/src/main/resources/sentry-mysql-1.6.0.sql new file mode 100644 index 000000000..1c1bb943a --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/resources/sentry-mysql-1.6.0.sql @@ -0,0 +1,193 @@ +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. + + +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; + +CREATE TABLE `SENTRY_DB_PRIVILEGE` ( + `DB_PRIVILEGE_ID` BIGINT NOT NULL, + `PRIVILEGE_SCOPE` VARCHAR(32) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL, + `SERVER_NAME` VARCHAR(128) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL, + `DB_NAME` VARCHAR(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `TABLE_NAME` VARCHAR(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `COLUMN_NAME` VARCHAR(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `URI` VARCHAR(4000) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `ACTION` VARCHAR(128) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL, + `CREATE_TIME` BIGINT NOT NULL, + `WITH_GRANT_OPTION` CHAR(1) NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `SENTRY_ROLE` ( + `ROLE_ID` BIGINT NOT NULL, + `ROLE_NAME` VARCHAR(128) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL, + `CREATE_TIME` BIGINT NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `SENTRY_GROUP` ( + `GROUP_ID` BIGINT NOT NULL, + `GROUP_NAME` VARCHAR(128) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL, + `CREATE_TIME` BIGINT NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `SENTRY_ROLE_DB_PRIVILEGE_MAP` ( + `ROLE_ID` BIGINT NOT NULL, + `DB_PRIVILEGE_ID` BIGINT NOT NULL, + `GRANTOR_PRINCIPAL` VARCHAR(128) CHARACTER SET utf8 COLLATE utf8_bin +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `SENTRY_ROLE_GROUP_MAP` ( + `ROLE_ID` BIGINT NOT NULL, + `GROUP_ID` BIGINT NOT NULL, + `GRANTOR_PRINCIPAL` VARCHAR(128) CHARACTER SET utf8 COLLATE utf8_bin +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `SENTRY_VERSION` ( + `VER_ID` BIGINT NOT NULL, + `SCHEMA_VERSION` VARCHAR(127) NOT NULL, + `VERSION_COMMENT` VARCHAR(255) NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +ALTER TABLE `SENTRY_DB_PRIVILEGE` + ADD CONSTRAINT `SENTRY_DB_PRIV_PK` PRIMARY KEY (`DB_PRIVILEGE_ID`); + +ALTER TABLE `SENTRY_ROLE` + ADD CONSTRAINT `SENTRY_ROLE_PK` PRIMARY KEY (`ROLE_ID`); + +ALTER TABLE `SENTRY_GROUP` + ADD CONSTRAINT `SENTRY_GROUP_PK` PRIMARY KEY (`GROUP_ID`); + +ALTER TABLE `SENTRY_VERSION` + ADD CONSTRAINT `SENTRY_VERSION` PRIMARY KEY (`VER_ID`); + +ALTER TABLE `SENTRY_DB_PRIVILEGE` + ADD UNIQUE `SENTRY_DB_PRIV_PRIV_NAME_UNIQ` (`SERVER_NAME`,`DB_NAME`,`TABLE_NAME`,`COLUMN_NAME`,`URI`(250),`ACTION`,`WITH_GRANT_OPTION`); + +ALTER TABLE `SENTRY_DB_PRIVILEGE` + ADD INDEX `SENTRY_PRIV_SERV_IDX` (`SERVER_NAME`); + +ALTER TABLE `SENTRY_DB_PRIVILEGE` + ADD INDEX `SENTRY_PRIV_DB_IDX` (`DB_NAME`); + +ALTER TABLE `SENTRY_DB_PRIVILEGE` + ADD INDEX `SENTRY_PRIV_TBL_IDX` (`TABLE_NAME`); + +ALTER TABLE `SENTRY_DB_PRIVILEGE` + ADD INDEX `SENTRY_PRIV_COL_IDX` (`COLUMN_NAME`); + +ALTER TABLE `SENTRY_DB_PRIVILEGE` + ADD INDEX `SENTRY_PRIV_URI_IDX` (`URI`); + +ALTER TABLE `SENTRY_ROLE` + ADD CONSTRAINT `SENTRY_ROLE_ROLE_NAME_UNIQUE` UNIQUE (`ROLE_NAME`); + +ALTER TABLE `SENTRY_GROUP` + ADD CONSTRAINT `SENTRY_GRP_GRP_NAME_UNIQUE` UNIQUE (`GROUP_NAME`); + +ALTER TABLE `SENTRY_ROLE_DB_PRIVILEGE_MAP` + ADD CONSTRAINT `SENTRY_ROLE_DB_PRIVILEGE_MAP_PK` PRIMARY KEY (`ROLE_ID`,`DB_PRIVILEGE_ID`); + +ALTER TABLE `SENTRY_ROLE_GROUP_MAP` + ADD CONSTRAINT `SENTRY_ROLE_GROUP_MAP_PK` PRIMARY KEY (`ROLE_ID`,`GROUP_ID`); + +ALTER TABLE `SENTRY_ROLE_DB_PRIVILEGE_MAP` + ADD CONSTRAINT `SEN_RLE_DB_PRV_MAP_SN_RLE_FK` + FOREIGN KEY (`ROLE_ID`) REFERENCES `SENTRY_ROLE`(`ROLE_ID`); + +ALTER TABLE `SENTRY_ROLE_DB_PRIVILEGE_MAP` + ADD CONSTRAINT `SEN_RL_DB_PRV_MAP_SN_DB_PRV_FK` + FOREIGN KEY (`DB_PRIVILEGE_ID`) REFERENCES `SENTRY_DB_PRIVILEGE`(`DB_PRIVILEGE_ID`); + +ALTER TABLE `SENTRY_ROLE_GROUP_MAP` + ADD CONSTRAINT `SEN_ROLE_GROUP_MAP_SEN_ROLE_FK` + FOREIGN KEY (`ROLE_ID`) REFERENCES `SENTRY_ROLE`(`ROLE_ID`); + +ALTER TABLE `SENTRY_ROLE_GROUP_MAP` + ADD CONSTRAINT `SEN_ROLE_GROUP_MAP_SEN_GRP_FK` + FOREIGN KEY (`GROUP_ID`) REFERENCES `SENTRY_GROUP`(`GROUP_ID`); + +INSERT INTO SENTRY_VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '1.6.0', 'Sentry release version 1.6.0'); + +-- Generic Model +-- Table SENTRY_GM_PRIVILEGE for classes [org.apache.sentry.provider.db.service.model.MSentryGMPrivilege] +CREATE TABLE `SENTRY_GM_PRIVILEGE` +( + `GM_PRIVILEGE_ID` BIGINT NOT NULL, + `ACTION` VARCHAR(32) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL, + `COMPONENT_NAME` VARCHAR(32) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL, + `CREATE_TIME` BIGINT NOT NULL, + `WITH_GRANT_OPTION` CHAR(1) NOT NULL, + `RESOURCE_NAME_0` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `RESOURCE_NAME_1` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `RESOURCE_NAME_2` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `RESOURCE_NAME_3` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `RESOURCE_TYPE_0` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `RESOURCE_TYPE_1` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `RESOURCE_TYPE_2` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `RESOURCE_TYPE_3` VARCHAR(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT '__NULL__', + `SCOPE` VARCHAR(128) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL, + `SERVICE_NAME` VARCHAR(64) BINARY CHARACTER SET utf8 COLLATE utf8_bin NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +ALTER TABLE `SENTRY_GM_PRIVILEGE` + ADD CONSTRAINT `SENTRY_GM_PRIVILEGE_PK` PRIMARY KEY (`GM_PRIVILEGE_ID`); +-- Constraints for table SENTRY_GM_PRIVILEGE for class(es) [org.apache.sentry.provider.db.service.model.MSentryGMPrivilege] +ALTER TABLE `SENTRY_GM_PRIVILEGE` + ADD UNIQUE `GM_PRIVILEGE_UNIQUE` (`COMPONENT_NAME`,`SERVICE_NAME`,`RESOURCE_NAME_0`,`RESOURCE_TYPE_0`,`RESOURCE_NAME_1`,`RESOURCE_TYPE_1`,`RESOURCE_NAME_2`,`RESOURCE_TYPE_2`,`RESOURCE_NAME_3`,`RESOURCE_TYPE_3`,`ACTION`,`WITH_GRANT_OPTION`); + +ALTER TABLE `SENTRY_GM_PRIVILEGE` + ADD INDEX `SENTRY_GM_PRIV_COMP_IDX` (`COMPONENT_NAME`); + +ALTER TABLE `SENTRY_GM_PRIVILEGE` + ADD INDEX `SENTRY_GM_PRIV_SERV_IDX` (`SERVICE_NAME`); + +ALTER TABLE `SENTRY_GM_PRIVILEGE` + ADD INDEX `SENTRY_GM_PRIV_RES0_IDX` (`RESOURCE_NAME_0`,`RESOURCE_TYPE_0`); + +ALTER TABLE `SENTRY_GM_PRIVILEGE` + ADD INDEX `SENTRY_GM_PRIV_RES1_IDX` (`RESOURCE_NAME_1`,`RESOURCE_TYPE_1`); + +ALTER TABLE `SENTRY_GM_PRIVILEGE` + ADD INDEX `SENTRY_GM_PRIV_RES2_IDX` (`RESOURCE_NAME_2`,`RESOURCE_TYPE_2`); + +ALTER TABLE `SENTRY_GM_PRIVILEGE` + ADD INDEX `SENTRY_GM_PRIV_RES3_IDX` (`RESOURCE_NAME_3`,`RESOURCE_TYPE_3`); + +-- Table SENTRY_ROLE_GM_PRIVILEGE_MAP for join relationship +CREATE TABLE `SENTRY_ROLE_GM_PRIVILEGE_MAP` +( + `ROLE_ID` BIGINT NOT NULL, + `GM_PRIVILEGE_ID` BIGINT NOT NULL +) ENGINE=INNODB DEFAULT CHARSET=utf8; + +ALTER TABLE `SENTRY_ROLE_GM_PRIVILEGE_MAP` + ADD CONSTRAINT `SENTRY_ROLE_GM_PRIVILEGE_MAP_PK` PRIMARY KEY (`ROLE_ID`,`GM_PRIVILEGE_ID`); + +-- Constraints for table SENTRY_ROLE_GM_PRIVILEGE_MAP +ALTER TABLE `SENTRY_ROLE_GM_PRIVILEGE_MAP` + ADD CONSTRAINT `SEN_RLE_GM_PRV_MAP_SN_RLE_FK` + FOREIGN KEY (`ROLE_ID`) REFERENCES `SENTRY_ROLE`(`ROLE_ID`); + +ALTER TABLE `SENTRY_ROLE_GM_PRIVILEGE_MAP` + ADD CONSTRAINT `SEN_RL_GM_PRV_MAP_SN_DB_PRV_FK` + FOREIGN KEY (`GM_PRIVILEGE_ID`) REFERENCES `SENTRY_GM_PRIVILEGE`(`GM_PRIVILEGE_ID`); diff --git a/sentry-provider/sentry-provider-db/src/main/resources/sentry-oracle-1.5.0.sql b/sentry-provider/sentry-provider-db/src/main/resources/sentry-oracle-1.5.0.sql index 7ff933210..fe8e93c5b 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/sentry-oracle-1.5.0.sql +++ b/sentry-provider/sentry-provider-db/src/main/resources/sentry-oracle-1.5.0.sql @@ -17,13 +17,13 @@ CREATE TABLE "SENTRY_DB_PRIVILEGE" ( "DB_PRIVILEGE_ID" NUMBER NOT NULL, "PRIVILEGE_SCOPE" VARCHAR2(32) NOT NULL, "SERVER_NAME" VARCHAR2(128) NOT NULL, - "DB_NAME" VARCHAR2(128) NULL, - "TABLE_NAME" VARCHAR2(128) NULL, - "COLUMN_NAME" VARCHAR2(128) NULL, - "URI" VARCHAR2(4000) NULL, + "DB_NAME" VARCHAR2(128) DEFAULT '__NULL__', + "TABLE_NAME" VARCHAR2(128) DEFAULT '__NULL__', + "COLUMN_NAME" VARCHAR2(128) DEFAULT '__NULL__', + "URI" VARCHAR2(4000) DEFAULT '__NULL__', "ACTION" VARCHAR2(128) NOT NULL, "CREATE_TIME" NUMBER NOT NULL, - "WITH_GRANT_OPTION" CHAR(1) NOT NULL + "WITH_GRANT_OPTION" CHAR(1) DEFAULT 'N' NOT NULL ); CREATE TABLE "SENTRY_ROLE" ( @@ -116,18 +116,18 @@ CREATE TABLE "SENTRY_GM_PRIVILEGE" ( "GM_PRIVILEGE_ID" NUMBER NOT NULL, "COMPONENT_NAME" VARCHAR2(32) NOT NULL, "SERVICE_NAME" VARCHAR2(64) NOT NULL, - "RESOURCE_NAME_0" VARCHAR2(64) NULL, - "RESOURCE_NAME_1" VARCHAR2(64) NULL, - "RESOURCE_NAME_2" VARCHAR2(64) NULL, - "RESOURCE_NAME_3" VARCHAR2(64) NULL, - "RESOURCE_TYPE_0" VARCHAR2(64) NULL, - "RESOURCE_TYPE_1" VARCHAR2(64) NULL, - "RESOURCE_TYPE_2" VARCHAR2(64) NULL, - "RESOURCE_TYPE_3" VARCHAR2(64) NULL, + "RESOURCE_NAME_0" VARCHAR2(64) DEFAULT '__NULL__', + "RESOURCE_NAME_1" VARCHAR2(64) DEFAULT '__NULL__', + "RESOURCE_NAME_2" VARCHAR2(64) DEFAULT '__NULL__', + "RESOURCE_NAME_3" VARCHAR2(64) DEFAULT '__NULL__', + "RESOURCE_TYPE_0" VARCHAR2(64) DEFAULT '__NULL__', + "RESOURCE_TYPE_1" VARCHAR2(64) DEFAULT '__NULL__', + "RESOURCE_TYPE_2" VARCHAR2(64) DEFAULT '__NULL__', + "RESOURCE_TYPE_3" VARCHAR2(64) DEFAULT '__NULL__', "ACTION" VARCHAR2(32) NOT NULL, - "scope" VARCHAR2(128) NOT NULL, + "SCOPE" VARCHAR2(128) NOT NULL, "CREATE_TIME" NUMBER NOT NULL, - "WITH_GRANT_OPTION" CHAR(1) NOT NULL + "WITH_GRANT_OPTION" CHAR(1) DEFAULT 'N' NOT NULL ); ALTER TABLE "SENTRY_GM_PRIVILEGE" diff --git a/sentry-provider/sentry-provider-db/src/main/resources/sentry-oracle-1.6.0.sql b/sentry-provider/sentry-provider-db/src/main/resources/sentry-oracle-1.6.0.sql new file mode 100644 index 000000000..3a2233550 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/resources/sentry-oracle-1.6.0.sql @@ -0,0 +1,168 @@ +--Licensed to the Apache Software Foundation (ASF) under one or more +--contributor license agreements. See the NOTICE file distributed with +--this work for additional information regarding copyright ownership. +--The ASF licenses this file to You under the Apache License, Version 2.0 +--(the "License"); you may not use this file except in compliance with +--the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +--Unless required by applicable law or agreed to in writing, software +--distributed under the License is distributed on an "AS IS" BASIS, +--WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +--See the License for the specific language governing permissions and +--limitations under the License. + +CREATE TABLE "SENTRY_DB_PRIVILEGE" ( + "DB_PRIVILEGE_ID" NUMBER NOT NULL, + "PRIVILEGE_SCOPE" VARCHAR2(32) NOT NULL, + "SERVER_NAME" VARCHAR2(128) NOT NULL, + "DB_NAME" VARCHAR2(128) DEFAULT '__NULL__', + "TABLE_NAME" VARCHAR2(128) DEFAULT '__NULL__', + "COLUMN_NAME" VARCHAR2(128) DEFAULT '__NULL__', + "URI" VARCHAR2(4000) DEFAULT '__NULL__', + "ACTION" VARCHAR2(128) NOT NULL, + "CREATE_TIME" NUMBER NOT NULL, + "WITH_GRANT_OPTION" CHAR(1) DEFAULT 'N' NOT NULL +); + +CREATE TABLE "SENTRY_ROLE" ( + "ROLE_ID" NUMBER NOT NULL, + "ROLE_NAME" VARCHAR2(128) NOT NULL, + "CREATE_TIME" NUMBER NOT NULL +); + +CREATE TABLE "SENTRY_GROUP" ( + "GROUP_ID" NUMBER NOT NULL, + "GROUP_NAME" VARCHAR2(128) NOT NULL, + "CREATE_TIME" NUMBER NOT NULL +); + +CREATE TABLE "SENTRY_ROLE_DB_PRIVILEGE_MAP" ( + "ROLE_ID" NUMBER NOT NULL, + "DB_PRIVILEGE_ID" NUMBER NOT NULL, + "GRANTOR_PRINCIPAL" VARCHAR2(128) +); + +CREATE TABLE "SENTRY_ROLE_GROUP_MAP" ( + "ROLE_ID" NUMBER NOT NULL, + "GROUP_ID" NUMBER NOT NULL, + "GRANTOR_PRINCIPAL" VARCHAR2(128) +); + +CREATE TABLE "SENTRY_VERSION" ( + "VER_ID" NUMBER NOT NULL, + "SCHEMA_VERSION" VARCHAR(127) NOT NULL, + "VERSION_COMMENT" VARCHAR(255) NOT NULL +); + +ALTER TABLE "SENTRY_DB_PRIVILEGE" + ADD CONSTRAINT "SENTRY_DB_PRIV_PK" PRIMARY KEY ("DB_PRIVILEGE_ID"); + +ALTER TABLE "SENTRY_ROLE" + ADD CONSTRAINT "SENTRY_ROLE_PK" PRIMARY KEY ("ROLE_ID"); + +ALTER TABLE "SENTRY_GROUP" + ADD CONSTRAINT "SENTRY_GROUP_PK" PRIMARY KEY ("GROUP_ID"); + +ALTER TABLE "SENTRY_VERSION" ADD CONSTRAINT "SENTRY_VERSION_PK" PRIMARY KEY ("VER_ID"); + +ALTER TABLE "SENTRY_DB_PRIVILEGE" + ADD CONSTRAINT "SENTRY_DB_PRIV_PRIV_NAME_UNIQ" UNIQUE ("SERVER_NAME","DB_NAME","TABLE_NAME","COLUMN_NAME","URI","ACTION","WITH_GRANT_OPTION"); + +CREATE INDEX "SENTRY_SERV_PRIV_IDX" ON "SENTRY_DB_PRIVILEGE" ("SERVER_NAME"); + +CREATE INDEX "SENTRY_DB_PRIV_IDX" ON "SENTRY_DB_PRIVILEGE" ("DB_NAME"); + +CREATE INDEX "SENTRY_TBL_PRIV_IDX" ON "SENTRY_DB_PRIVILEGE" ("TABLE_NAME"); + +CREATE INDEX "SENTRY_COL_PRIV_IDX" ON "SENTRY_DB_PRIVILEGE" ("COLUMN_NAME"); + +CREATE INDEX "SENTRY_URI_PRIV_IDX" ON "SENTRY_DB_PRIVILEGE" ("URI"); + +ALTER TABLE "SENTRY_ROLE" + ADD CONSTRAINT "SENTRY_ROLE_ROLE_NAME_UNIQUE" UNIQUE ("ROLE_NAME"); + +ALTER TABLE "SENTRY_GROUP" + ADD CONSTRAINT "SENTRY_GRP_GRP_NAME_UNIQUE" UNIQUE ("GROUP_NAME"); + +ALTER TABLE "SENTRY_ROLE_DB_PRIVILEGE_MAP" + ADD CONSTRAINT "SEN_RLE_PRIV_MAP_PK" PRIMARY KEY ("ROLE_ID","DB_PRIVILEGE_ID"); + +ALTER TABLE "SENTRY_ROLE_GROUP_MAP" + ADD CONSTRAINT "SENTRY_ROLE_GROUP_MAP_PK" PRIMARY KEY ("ROLE_ID","GROUP_ID"); + +ALTER TABLE "SENTRY_ROLE_DB_PRIVILEGE_MAP" + ADD CONSTRAINT "SEN_RLE_DB_PRV_MAP_SN_RLE_FK" + FOREIGN KEY ("ROLE_ID") REFERENCES "SENTRY_ROLE"("ROLE_ID") INITIALLY DEFERRED; + +ALTER TABLE "SENTRY_ROLE_DB_PRIVILEGE_MAP" + ADD CONSTRAINT "SEN_RL_DB_PRV_MAP_SN_DB_PRV_FK" + FOREIGN KEY ("DB_PRIVILEGE_ID") REFERENCES "SENTRY_DB_PRIVILEGE"("DB_PRIVILEGE_ID") INITIALLY DEFERRED; + +ALTER TABLE "SENTRY_ROLE_GROUP_MAP" + ADD CONSTRAINT "SEN_ROLE_GROUP_MAP_SEN_ROLE_FK" + FOREIGN KEY ("ROLE_ID") REFERENCES "SENTRY_ROLE"("ROLE_ID") INITIALLY DEFERRED; + +ALTER TABLE "SENTRY_ROLE_GROUP_MAP" + ADD CONSTRAINT "SEN_ROLE_GROUP_MAP_SEN_GRP_FK" + FOREIGN KEY ("GROUP_ID") REFERENCES "SENTRY_GROUP"("GROUP_ID") INITIALLY DEFERRED; + +INSERT INTO SENTRY_VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '1.6.0', 'Sentry release version 1.6.0'); + +-- Generic Model +-- Table SENTRY_GM_PRIVILEGE for classes [org.apache.sentry.provider.db.service.model.MSentryGMPrivilege] +CREATE TABLE "SENTRY_GM_PRIVILEGE" ( + "GM_PRIVILEGE_ID" NUMBER NOT NULL, + "COMPONENT_NAME" VARCHAR2(32) NOT NULL, + "SERVICE_NAME" VARCHAR2(64) NOT NULL, + "RESOURCE_NAME_0" VARCHAR2(64) DEFAULT '__NULL__', + "RESOURCE_NAME_1" VARCHAR2(64) DEFAULT '__NULL__', + "RESOURCE_NAME_2" VARCHAR2(64) DEFAULT '__NULL__', + "RESOURCE_NAME_3" VARCHAR2(64) DEFAULT '__NULL__', + "RESOURCE_TYPE_0" VARCHAR2(64) DEFAULT '__NULL__', + "RESOURCE_TYPE_1" VARCHAR2(64) DEFAULT '__NULL__', + "RESOURCE_TYPE_2" VARCHAR2(64) DEFAULT '__NULL__', + "RESOURCE_TYPE_3" VARCHAR2(64) DEFAULT '__NULL__', + "ACTION" VARCHAR2(32) NOT NULL, + "SCOPE" VARCHAR2(128) NOT NULL, + "CREATE_TIME" NUMBER NOT NULL, + "WITH_GRANT_OPTION" CHAR(1) DEFAULT 'N' NOT NULL +); + +ALTER TABLE "SENTRY_GM_PRIVILEGE" + ADD CONSTRAINT "SENTRY_GM_PRIV_PK" PRIMARY KEY ("GM_PRIVILEGE_ID"); +-- Constraints for table SENTRY_GM_PRIVILEGE for class(es) [org.apache.sentry.provider.db.service.model.MSentryGMPrivilege] +ALTER TABLE "SENTRY_GM_PRIVILEGE" + ADD CONSTRAINT "SENTRY_GM_PRIV_PRIV_NAME_UNIQ" UNIQUE ("COMPONENT_NAME","SERVICE_NAME","RESOURCE_NAME_0","RESOURCE_NAME_1","RESOURCE_NAME_2", + "RESOURCE_NAME_3","RESOURCE_TYPE_0","RESOURCE_TYPE_1","RESOURCE_TYPE_2","RESOURCE_TYPE_3","ACTION","WITH_GRANT_OPTION"); + +CREATE INDEX "SENTRY_GM_PRIV_COMP_IDX" ON "SENTRY_GM_PRIVILEGE" ("COMPONENT_NAME"); + +CREATE INDEX "SENTRY_GM_PRIV_SERV_IDX" ON "SENTRY_GM_PRIVILEGE" ("SERVICE_NAME"); + +CREATE INDEX "SENTRY_GM_PRIV_RES0_IDX" ON "SENTRY_GM_PRIVILEGE" ("RESOURCE_NAME_0","RESOURCE_TYPE_0"); + +CREATE INDEX "SENTRY_GM_PRIV_RES1_IDX" ON "SENTRY_GM_PRIVILEGE" ("RESOURCE_NAME_1","RESOURCE_TYPE_1"); + +CREATE INDEX "SENTRY_GM_PRIV_RES2_IDX" ON "SENTRY_GM_PRIVILEGE" ("RESOURCE_NAME_2","RESOURCE_TYPE_2"); + +CREATE INDEX "SENTRY_GM_PRIV_RES3_IDX" ON "SENTRY_GM_PRIVILEGE" ("RESOURCE_NAME_3","RESOURCE_TYPE_3"); + +-- Table SENTRY_ROLE_GM_PRIVILEGE_MAP for join relationship +CREATE TABLE "SENTRY_ROLE_GM_PRIVILEGE_MAP" ( + "ROLE_ID" NUMBER NOT NULL, + "GM_PRIVILEGE_ID" NUMBER NOT NULL +); + +ALTER TABLE "SENTRY_ROLE_GM_PRIVILEGE_MAP" + ADD CONSTRAINT "SEN_RLE_GM_PRIV_MAP_PK" PRIMARY KEY ("ROLE_ID","GM_PRIVILEGE_ID"); + +-- Constraints for table SENTRY_ROLE_GM_PRIVILEGE_MAP +ALTER TABLE "SENTRY_ROLE_GM_PRIVILEGE_MAP" + ADD CONSTRAINT "SEN_RLE_GM_PRV_MAP_SN_RLE_FK" + FOREIGN KEY ("ROLE_ID") REFERENCES "SENTRY_ROLE"("ROLE_ID") INITIALLY DEFERRED; + +ALTER TABLE "SENTRY_ROLE_GM_PRIVILEGE_MAP" + ADD CONSTRAINT "SEN_RL_GM_PRV_MAP_SN_DB_PRV_FK" + FOREIGN KEY ("GM_PRIVILEGE_ID") REFERENCES "SENTRY_GM_PRIVILEGE"("GM_PRIVILEGE_ID") INITIALLY DEFERRED; diff --git a/sentry-provider/sentry-provider-db/src/main/resources/sentry-postgres-1.5.0.sql b/sentry-provider/sentry-provider-db/src/main/resources/sentry-postgres-1.5.0.sql index ae387ccb9..fb26770cf 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/sentry-postgres-1.5.0.sql +++ b/sentry-provider/sentry-provider-db/src/main/resources/sentry-postgres-1.5.0.sql @@ -29,10 +29,10 @@ CREATE TABLE "SENTRY_DB_PRIVILEGE" ( "DB_PRIVILEGE_ID" BIGINT NOT NULL, "PRIVILEGE_SCOPE" character varying(32) NOT NULL, "SERVER_NAME" character varying(128) NOT NULL, - "DB_NAME" character varying(128) DEFAULT NULL::character varying, - "TABLE_NAME" character varying(128) DEFAULT NULL::character varying, - "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying, - "URI" character varying(4000) DEFAULT NULL::character varying, + "DB_NAME" character varying(128) DEFAULT '__NULL__', + "TABLE_NAME" character varying(128) DEFAULT '__NULL__', + "COLUMN_NAME" character varying(128) DEFAULT '__NULL__', + "URI" character varying(4000) DEFAULT '__NULL__', "ACTION" character varying(128) NOT NULL, "CREATE_TIME" BIGINT NOT NULL, "WITH_GRANT_OPTION" CHAR(1) NOT NULL @@ -129,16 +129,16 @@ CREATE TABLE "SENTRY_GM_PRIVILEGE" ( "GM_PRIVILEGE_ID" BIGINT NOT NULL, "COMPONENT_NAME" character varying(32) NOT NULL, "SERVICE_NAME" character varying(64) NOT NULL, - "RESOURCE_NAME_0" character varying(64) DEFAULT NULL::character varying, - "RESOURCE_NAME_1" character varying(64) DEFAULT NULL::character varying, - "RESOURCE_NAME_2" character varying(64) DEFAULT NULL::character varying, - "RESOURCE_NAME_3" character varying(64) DEFAULT NULL::character varying, - "RESOURCE_TYPE_0" character varying(64) DEFAULT NULL::character varying, - "RESOURCE_TYPE_1" character varying(64) DEFAULT NULL::character varying, - "RESOURCE_TYPE_2" character varying(64) DEFAULT NULL::character varying, - "RESOURCE_TYPE_3" character varying(64) DEFAULT NULL::character varying, + "RESOURCE_NAME_0" character varying(64) DEFAULT '__NULL__', + "RESOURCE_NAME_1" character varying(64) DEFAULT '__NULL__', + "RESOURCE_NAME_2" character varying(64) DEFAULT '__NULL__', + "RESOURCE_NAME_3" character varying(64) DEFAULT '__NULL__', + "RESOURCE_TYPE_0" character varying(64) DEFAULT '__NULL__', + "RESOURCE_TYPE_1" character varying(64) DEFAULT '__NULL__', + "RESOURCE_TYPE_2" character varying(64) DEFAULT '__NULL__', + "RESOURCE_TYPE_3" character varying(64) DEFAULT '__NULL__', "ACTION" character varying(32) NOT NULL, - "scope" character varying(128) NOT NULL, + "SCOPE" character varying(128) NOT NULL, "CREATE_TIME" BIGINT NOT NULL, "WITH_GRANT_OPTION" CHAR(1) NOT NULL ); diff --git a/sentry-provider/sentry-provider-db/src/main/resources/sentry-postgres-1.6.0.sql b/sentry-provider/sentry-provider-db/src/main/resources/sentry-postgres-1.6.0.sql new file mode 100644 index 000000000..62edf3e89 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/resources/sentry-postgres-1.6.0.sql @@ -0,0 +1,182 @@ +--Licensed to the Apache Software Foundation (ASF) under one or more +--contributor license agreements. See the NOTICE file distributed with +--this work for additional information regarding copyright ownership. +--The ASF licenses this file to You under the Apache License, Version 2.0 +--(the "License"); you may not use this file except in compliance with +--the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +--Unless required by applicable law or agreed to in writing, software +--distributed under the License is distributed on an "AS IS" BASIS, +--WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +--See the License for the specific language governing permissions and +--limitations under the License. + +START TRANSACTION; + +SET statement_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = off; +SET check_function_bodies = false; +SET client_min_messages = warning; +SET escape_string_warning = off; +SET search_path = public, pg_catalog; +SET default_tablespace = ''; +SET default_with_oids = false; + +CREATE TABLE "SENTRY_DB_PRIVILEGE" ( + "DB_PRIVILEGE_ID" BIGINT NOT NULL, + "PRIVILEGE_SCOPE" character varying(32) NOT NULL, + "SERVER_NAME" character varying(128) NOT NULL, + "DB_NAME" character varying(128) DEFAULT '__NULL__', + "TABLE_NAME" character varying(128) DEFAULT '__NULL__', + "COLUMN_NAME" character varying(128) DEFAULT '__NULL__', + "URI" character varying(4000) DEFAULT '__NULL__', + "ACTION" character varying(128) NOT NULL, + "CREATE_TIME" BIGINT NOT NULL, + "WITH_GRANT_OPTION" CHAR(1) NOT NULL +); + +CREATE TABLE "SENTRY_ROLE" ( + "ROLE_ID" BIGINT NOT NULL, + "ROLE_NAME" character varying(128) NOT NULL, + "CREATE_TIME" BIGINT NOT NULL +); + +CREATE TABLE "SENTRY_GROUP" ( + "GROUP_ID" BIGINT NOT NULL, + "GROUP_NAME" character varying(128) NOT NULL, + "CREATE_TIME" BIGINT NOT NULL +); + +CREATE TABLE "SENTRY_ROLE_DB_PRIVILEGE_MAP" ( + "ROLE_ID" BIGINT NOT NULL, + "DB_PRIVILEGE_ID" BIGINT NOT NULL, + "GRANTOR_PRINCIPAL" character varying(128) +); + +CREATE TABLE "SENTRY_ROLE_GROUP_MAP" ( + "ROLE_ID" BIGINT NOT NULL, + "GROUP_ID" BIGINT NOT NULL, + "GRANTOR_PRINCIPAL" character varying(128) +); + +CREATE TABLE "SENTRY_VERSION" ( + "VER_ID" bigint, + "SCHEMA_VERSION" character varying(127) NOT NULL, + "VERSION_COMMENT" character varying(255) NOT NULL +); + + +ALTER TABLE ONLY "SENTRY_DB_PRIVILEGE" + ADD CONSTRAINT "SENTRY_DB_PRIV_PK" PRIMARY KEY ("DB_PRIVILEGE_ID"); + +ALTER TABLE ONLY "SENTRY_ROLE" + ADD CONSTRAINT "SENTRY_ROLE_PK" PRIMARY KEY ("ROLE_ID"); + +ALTER TABLE ONLY "SENTRY_GROUP" + ADD CONSTRAINT "SENTRY_GROUP_PK" PRIMARY KEY ("GROUP_ID"); + +ALTER TABLE ONLY "SENTRY_VERSION" ADD CONSTRAINT "SENTRY_VERSION_PK" PRIMARY KEY ("VER_ID"); + +ALTER TABLE ONLY "SENTRY_DB_PRIVILEGE" + ADD CONSTRAINT "SENTRY_DB_PRIV_PRIV_NAME_UNIQ" UNIQUE ("SERVER_NAME","DB_NAME","TABLE_NAME","COLUMN_NAME","URI", "ACTION","WITH_GRANT_OPTION"); + +CREATE INDEX "SENTRY_PRIV_SERV_IDX" ON "SENTRY_DB_PRIVILEGE" USING btree ("SERVER_NAME"); + +CREATE INDEX "SENTRY_PRIV_DB_IDX" ON "SENTRY_DB_PRIVILEGE" USING btree ("DB_NAME"); + +CREATE INDEX "SENTRY_PRIV_TBL_IDX" ON "SENTRY_DB_PRIVILEGE" USING btree ("TABLE_NAME"); + +CREATE INDEX "SENTRY_PRIV_COL_IDX" ON "SENTRY_DB_PRIVILEGE" USING btree ("COLUMN_NAME"); + +CREATE INDEX "SENTRY_PRIV_URI_IDX" ON "SENTRY_DB_PRIVILEGE" USING btree ("URI"); + +ALTER TABLE ONLY "SENTRY_ROLE" + ADD CONSTRAINT "SENTRY_ROLE_ROLE_NAME_UNIQUE" UNIQUE ("ROLE_NAME"); + +ALTER TABLE ONLY "SENTRY_GROUP" + ADD CONSTRAINT "SENTRY_GRP_GRP_NAME_UNIQUE" UNIQUE ("GROUP_NAME"); + +ALTER TABLE "SENTRY_ROLE_DB_PRIVILEGE_MAP" + ADD CONSTRAINT "SENTRY_ROLE_DB_PRIVILEGE_MAP_PK" PRIMARY KEY ("ROLE_ID","DB_PRIVILEGE_ID"); + +ALTER TABLE "SENTRY_ROLE_GROUP_MAP" + ADD CONSTRAINT "SENTRY_ROLE_GROUP_MAP_PK" PRIMARY KEY ("ROLE_ID","GROUP_ID"); + +ALTER TABLE ONLY "SENTRY_ROLE_DB_PRIVILEGE_MAP" + ADD CONSTRAINT "SEN_RLE_DB_PRV_MAP_SN_RLE_FK" + FOREIGN KEY ("ROLE_ID") REFERENCES "SENTRY_ROLE"("ROLE_ID") DEFERRABLE; + +ALTER TABLE ONLY "SENTRY_ROLE_DB_PRIVILEGE_MAP" + ADD CONSTRAINT "SEN_RL_DB_PRV_MAP_SN_DB_PRV_FK" + FOREIGN KEY ("DB_PRIVILEGE_ID") REFERENCES "SENTRY_DB_PRIVILEGE"("DB_PRIVILEGE_ID") DEFERRABLE; + +ALTER TABLE ONLY "SENTRY_ROLE_GROUP_MAP" + ADD CONSTRAINT "SEN_ROLE_GROUP_MAP_SEN_ROLE_FK" + FOREIGN KEY ("ROLE_ID") REFERENCES "SENTRY_ROLE"("ROLE_ID") DEFERRABLE; + +ALTER TABLE ONLY "SENTRY_ROLE_GROUP_MAP" + ADD CONSTRAINT "SEN_ROLE_GROUP_MAP_SEN_GRP_FK" + FOREIGN KEY ("GROUP_ID") REFERENCES "SENTRY_GROUP"("GROUP_ID") DEFERRABLE; + +INSERT INTO "SENTRY_VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '1.6.0', 'Sentry release version 1.6.0'); + +-- Generic Model +-- Table SENTRY_GM_PRIVILEGE for classes [org.apache.sentry.provider.db.service.model.MSentryGMPrivilege] +CREATE TABLE "SENTRY_GM_PRIVILEGE" ( + "GM_PRIVILEGE_ID" BIGINT NOT NULL, + "COMPONENT_NAME" character varying(32) NOT NULL, + "SERVICE_NAME" character varying(64) NOT NULL, + "RESOURCE_NAME_0" character varying(64) DEFAULT '__NULL__', + "RESOURCE_NAME_1" character varying(64) DEFAULT '__NULL__', + "RESOURCE_NAME_2" character varying(64) DEFAULT '__NULL__', + "RESOURCE_NAME_3" character varying(64) DEFAULT '__NULL__', + "RESOURCE_TYPE_0" character varying(64) DEFAULT '__NULL__', + "RESOURCE_TYPE_1" character varying(64) DEFAULT '__NULL__', + "RESOURCE_TYPE_2" character varying(64) DEFAULT '__NULL__', + "RESOURCE_TYPE_3" character varying(64) DEFAULT '__NULL__', + "ACTION" character varying(32) NOT NULL, + "SCOPE" character varying(128) NOT NULL, + "CREATE_TIME" BIGINT NOT NULL, + "WITH_GRANT_OPTION" CHAR(1) NOT NULL +); +ALTER TABLE ONLY "SENTRY_GM_PRIVILEGE" + ADD CONSTRAINT "SENTRY_GM_PRIV_PK" PRIMARY KEY ("GM_PRIVILEGE_ID"); +-- Constraints for table SENTRY_GM_PRIVILEGE for class(es) [org.apache.sentry.provider.db.service.model.MSentryGMPrivilege] +ALTER TABLE ONLY "SENTRY_GM_PRIVILEGE" + ADD CONSTRAINT "SENTRY_GM_PRIV_PRIV_NAME_UNIQ" UNIQUE ("COMPONENT_NAME","SERVICE_NAME","RESOURCE_NAME_0","RESOURCE_NAME_1","RESOURCE_NAME_2", + "RESOURCE_NAME_3","RESOURCE_TYPE_0","RESOURCE_TYPE_1","RESOURCE_TYPE_2","RESOURCE_TYPE_3","ACTION","WITH_GRANT_OPTION"); + +CREATE INDEX "SENTRY_GM_PRIV_COMP_IDX" ON "SENTRY_GM_PRIVILEGE" USING btree ("COMPONENT_NAME"); + +CREATE INDEX "SENTRY_GM_PRIV_SERV_IDX" ON "SENTRY_GM_PRIVILEGE" USING btree ("SERVICE_NAME"); + +CREATE INDEX "SENTRY_GM_PRIV_RES0_IDX" ON "SENTRY_GM_PRIVILEGE" USING btree ("RESOURCE_NAME_0","RESOURCE_TYPE_0"); + +CREATE INDEX "SENTRY_GM_PRIV_RES1_IDX" ON "SENTRY_GM_PRIVILEGE" USING btree ("RESOURCE_NAME_1","RESOURCE_TYPE_1"); + +CREATE INDEX "SENTRY_GM_PRIV_RES2_IDX" ON "SENTRY_GM_PRIVILEGE" USING btree ("RESOURCE_NAME_2","RESOURCE_TYPE_2"); + +CREATE INDEX "SENTRY_GM_PRIV_RES3_IDX" ON "SENTRY_GM_PRIVILEGE" USING btree ("RESOURCE_NAME_3","RESOURCE_TYPE_3"); + +-- Table SENTRY_ROLE_GM_PRIVILEGE_MAP for join relationship +CREATE TABLE "SENTRY_ROLE_GM_PRIVILEGE_MAP" ( + "ROLE_ID" BIGINT NOT NULL, + "GM_PRIVILEGE_ID" BIGINT NOT NULL +); + +ALTER TABLE "SENTRY_ROLE_GM_PRIVILEGE_MAP" + ADD CONSTRAINT "SENTRY_ROLE_GM_PRIVILEGE_MAP_PK" PRIMARY KEY ("ROLE_ID","GM_PRIVILEGE_ID"); + +-- Constraints for table SENTRY_ROLE_GM_PRIVILEGE_MAP +ALTER TABLE ONLY "SENTRY_ROLE_GM_PRIVILEGE_MAP" + ADD CONSTRAINT "SEN_RLE_GM_PRV_MAP_SN_RLE_FK" + FOREIGN KEY ("ROLE_ID") REFERENCES "SENTRY_ROLE"("ROLE_ID") DEFERRABLE; + +ALTER TABLE ONLY "SENTRY_ROLE_GM_PRIVILEGE_MAP" + ADD CONSTRAINT "SEN_RL_GM_PRV_MAP_SN_DB_PRV_FK" + FOREIGN KEY ("GM_PRIVILEGE_ID") REFERENCES "SENTRY_GM_PRIVILEGE"("GM_PRIVILEGE_ID") DEFERRABLE; + +COMMIT; diff --git a/sentry-provider/sentry-provider-db/src/main/resources/sentry-upgrade-db2-1.5.0-to-1.6.0.sql b/sentry-provider/sentry-provider-db/src/main/resources/sentry-upgrade-db2-1.5.0-to-1.6.0.sql new file mode 100644 index 000000000..5560d9fd8 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/resources/sentry-upgrade-db2-1.5.0-to-1.6.0.sql @@ -0,0 +1,2 @@ +-- Version update +UPDATE SENTRY_VERSION SET SCHEMA_VERSION='1.6.0', VERSION_COMMENT='Sentry release version 1.6.0' WHERE VER_ID=1; \ No newline at end of file diff --git a/sentry-provider/sentry-provider-db/src/main/resources/sentry-upgrade-derby-1.5.0-to-1.6.0.sql b/sentry-provider/sentry-provider-db/src/main/resources/sentry-upgrade-derby-1.5.0-to-1.6.0.sql new file mode 100644 index 000000000..5560d9fd8 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/resources/sentry-upgrade-derby-1.5.0-to-1.6.0.sql @@ -0,0 +1,2 @@ +-- Version update +UPDATE SENTRY_VERSION SET SCHEMA_VERSION='1.6.0', VERSION_COMMENT='Sentry release version 1.6.0' WHERE VER_ID=1; \ No newline at end of file diff --git a/sentry-provider/sentry-provider-db/src/main/resources/sentry-upgrade-mysql-1.5.0-to-1.6.0.sql b/sentry-provider/sentry-provider-db/src/main/resources/sentry-upgrade-mysql-1.5.0-to-1.6.0.sql new file mode 100644 index 000000000..352332ca4 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/resources/sentry-upgrade-mysql-1.5.0-to-1.6.0.sql @@ -0,0 +1,5 @@ +SELECT 'Upgrading Sentry store schema from 1.5.0 to 1.6.0' AS ' '; + +UPDATE SENTRY_VERSION SET SCHEMA_VERSION='1.6.0', VERSION_COMMENT='Sentry release version 1.6.0' WHERE VER_ID=1; + +SELECT 'Finish upgrading Sentry store schema from 1.5.0 to 1.6.0' AS ' '; \ No newline at end of file diff --git a/sentry-provider/sentry-provider-db/src/main/resources/sentry-upgrade-oracle-1.5.0-to-1.6.0.sql b/sentry-provider/sentry-provider-db/src/main/resources/sentry-upgrade-oracle-1.5.0-to-1.6.0.sql new file mode 100644 index 000000000..3437075f3 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/resources/sentry-upgrade-oracle-1.5.0-to-1.6.0.sql @@ -0,0 +1,5 @@ +SELECT 'Upgrading Sentry store schema from 1.5.0 to 1.6.0' AS Status from dual; + +UPDATE SENTRY_VERSION SET SCHEMA_VERSION='1.6.0', VERSION_COMMENT='Sentry release version 1.6.0' WHERE VER_ID=1; + +SELECT 'Finished upgrading Sentry store schema from 1.5.0 to 1.6.0' AS Status from dual; \ No newline at end of file diff --git a/sentry-provider/sentry-provider-db/src/main/resources/sentry-upgrade-postgres-1.4.0-to-1.5.0.sql b/sentry-provider/sentry-provider-db/src/main/resources/sentry-upgrade-postgres-1.4.0-to-1.5.0.sql index ed38774cc..2f03d5e9f 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/sentry-upgrade-postgres-1.4.0-to-1.5.0.sql +++ b/sentry-provider/sentry-provider-db/src/main/resources/sentry-upgrade-postgres-1.4.0-to-1.5.0.sql @@ -5,5 +5,5 @@ SELECT 'Upgrading Sentry store schema from 1.4.0 to 1.5.0'; \i 004-SENTRY-74.postgres.sql; \i 005-SENTRY-398.postgres.sql; -UPDATE SENTRY_VERSION SET SCHEMA_VERSION='1.5.0', VERSION_COMMENT='Sentry release version 1.5.0' WHERE VER_ID=1; +UPDATE "SENTRY_VERSION" SET "SCHEMA_VERSION"='1.5.0', "VERSION_COMMENT"='Sentry release version 1.5.0' WHERE "VER_ID"=1; SELECT 'Finished upgrading Sentry store schema from 1.4.0 to 1.5.0'; diff --git a/sentry-provider/sentry-provider-db/src/main/resources/sentry-upgrade-postgres-1.5.0-to-1.6.0.sql b/sentry-provider/sentry-provider-db/src/main/resources/sentry-upgrade-postgres-1.5.0-to-1.6.0.sql new file mode 100644 index 000000000..598259616 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/resources/sentry-upgrade-postgres-1.5.0-to-1.6.0.sql @@ -0,0 +1,5 @@ +SELECT 'Upgrading Sentry store schema from 1.5.0 to 1.6.0'; + +UPDATE "SENTRY_VERSION" SET "SCHEMA_VERSION"='1.6.0', "VERSION_COMMENT"='Sentry release version 1.6.0' WHERE "VER_ID"=1; + +SELECT 'Finished upgrading Sentry store schema from 1.5.0 to 1.6.0'; \ No newline at end of file diff --git a/sentry-provider/sentry-provider-db/src/main/resources/sentry_common_service.thrift b/sentry-provider/sentry-provider-db/src/main/resources/sentry_common_service.thrift index 956dabe7f..65c6934bc 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/sentry_common_service.thrift +++ b/sentry-provider/sentry-provider-db/src/main/resources/sentry_common_service.thrift @@ -25,7 +25,9 @@ namespace php sentry.service.thrift namespace cpp Apache.Sentry.Service.Thrift const i32 TSENTRY_SERVICE_V1 = 1; -const i32 TSENTRY_SERVICE_V2 = 1; +// Made a backward incompatible change when adding column level privileges. +// We also added generalized model in this version +const i32 TSENTRY_SERVICE_V2 = 2; const i32 TSENTRY_STATUS_OK = 0; const i32 TSENTRY_STATUS_ALREADY_EXISTS = 1; @@ -33,6 +35,7 @@ const i32 TSENTRY_STATUS_NO_SUCH_OBJECT = 2; const i32 TSENTRY_STATUS_RUNTIME_ERROR = 3; const i32 TSENTRY_STATUS_INVALID_INPUT = 4; const i32 TSENTRY_STATUS_ACCESS_DENIED = 5; +const i32 TSENTRY_STATUS_THRIFT_VERSION_MISMATCH = 6; struct TSentryResponseStatus { 1: required i32 value, diff --git a/sentry-provider/sentry-provider-db/src/main/resources/sentry_generic_policy_service.thrift b/sentry-provider/sentry-provider-db/src/main/resources/sentry_generic_policy_service.thrift index 91ff672ec..db107bfde 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/sentry_generic_policy_service.thrift +++ b/sentry-provider/sentry-provider-db/src/main/resources/sentry_generic_policy_service.thrift @@ -195,6 +195,7 @@ struct TSentryActiveRoleSet { 1: required bool all, 2: required set roles, } + struct TListSentryPrivilegesForProviderRequest { 1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V2, 2: required string component, # The request is issued to which component @@ -203,11 +204,56 @@ struct TListSentryPrivilegesForProviderRequest { 5: required TSentryActiveRoleSet roleSet, 6: optional list authorizables # authorizable hierarchys } + struct TListSentryPrivilegesForProviderResponse { 1: required TSentryResponseStatus status 2: required set privileges } +# Map of role:set for the given authorizable +# Optionally use the set of groups to filter the roles +struct TSentryPrivilegeMap { +1: required map> privilegeMap +} + +struct TListSentryPrivilegesByAuthRequest { +1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V2, + +# User on whose behalf the request is issued +2: required string requestorUserName, + +# The request is issued to which component +3: required string component, + +# The privilege belongs to which service +4: required string serviceName, + +# The authorizable hierarchys, it is represented as a string. e.g +# resourceType1=resourceName1->resourceType2=resourceName2->resourceType3=resourceName3 +5: required set authorizablesSet, + +# The requested groups. For admin, the requested groups can be empty, if so it is +# treated as a wildcard query. Otherwise, it is a query on this specifc groups. +# For non-admin user, the requested groups must be the groups they are part of. +6: optional set groups, + +# The active role set. +7: optional TSentryActiveRoleSet roleSet +} + +struct TListSentryPrivilegesByAuthResponse { +1: required sentry_common_service.TSentryResponseStatus status, + +# Will not be set in case of an error. Otherwise it will be a +# >> mapping. For non-admin +# requestor, the roles are intersection of active roles and granted roles. +# For admin requestor, the roles are filtered based on the active roles +# and requested group from TListSentryPrivilegesByAuthRequest. +# The authorizable hierarchys is represented as a string in the form +# of the request. +2: optional map privilegesMapByAuth +} + service SentryGenericPolicyService { TCreateSentryRoleResponse create_sentry_role(1:TCreateSentryRoleRequest request) @@ -225,6 +271,8 @@ service SentryGenericPolicyService TListSentryPrivilegesForProviderResponse list_sentry_privileges_for_provider(1:TListSentryPrivilegesForProviderRequest request) + TListSentryPrivilegesByAuthResponse list_sentry_privileges_by_authorizable(1:TListSentryPrivilegesByAuthRequest request); + TDropPrivilegesResponse drop_sentry_privilege(1:TDropPrivilegesRequest request); TRenamePrivilegesResponse rename_sentry_privilege(1:TRenamePrivilegesRequest request); diff --git a/sentry-provider/sentry-provider-db/src/main/resources/sentry_policy_service.thrift b/sentry-provider/sentry-provider-db/src/main/resources/sentry_policy_service.thrift index 993ea4658..40889e8fe 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/sentry_policy_service.thrift +++ b/sentry-provider/sentry-provider-db/src/main/resources/sentry_policy_service.thrift @@ -41,7 +41,7 @@ enum TSentryGrantOption { # Represents a Privilege in transport from the client to the server struct TSentryPrivilege { -1: required string privilegeScope, # Valid values are SERVER, DATABASE, TABLE +1: required string privilegeScope, # Valid values are SERVER, DATABASE, TABLE, COLUMN, URI 3: required string serverName, 4: optional string dbName = "", 5: optional string tableName = "", @@ -59,7 +59,7 @@ struct TSentryGroup { # CREATE ROLE r1 struct TCreateSentryRoleRequest { -1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V1, +1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V2, 2: required string requestorUserName, # user on whose behalf the request is issued 3: required string roleName, # TSentryRole is not required for this request } @@ -69,7 +69,7 @@ struct TCreateSentryRoleResponse { # DROP ROLE r1 struct TDropSentryRoleRequest { -1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V1, +1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V2, 2: required string requestorUserName, # user on whose behalf the request is issued 3: required string roleName # role to drop } @@ -79,7 +79,7 @@ struct TDropSentryRoleResponse { # GRANT ROLE r1 TO GROUP g1 struct TAlterSentryRoleAddGroupsRequest { -1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V1, +1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V2, 2: required string requestorUserName, # user on whose behalf the request is issued 3: required string roleName, 5: required set groups @@ -91,7 +91,7 @@ struct TAlterSentryRoleAddGroupsResponse { # REVOLE ROLE r1 FROM GROUP g1 struct TAlterSentryRoleDeleteGroupsRequest { -1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V1, +1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V2, 2: required string requestorUserName, # user on whose behalf the request is issued 3: required string roleName, 5: required set groups @@ -102,7 +102,7 @@ struct TAlterSentryRoleDeleteGroupsResponse { # GRANT ... ON ... TO ROLE ... struct TAlterSentryRoleGrantPrivilegeRequest { -1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V1, +1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V2, 2: required string requestorUserName, # user on whose behalf the request is issued 3: required string roleName, 5: optional TSentryPrivilege privilege, @@ -116,7 +116,7 @@ struct TAlterSentryRoleGrantPrivilegeResponse { # REVOKE ... ON ... FROM ROLE ... struct TAlterSentryRoleRevokePrivilegeRequest { -1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V1, +1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V2, 2: required string requestorUserName, # user on whose behalf the request is issued 3: required string roleName, 5: optional TSentryPrivilege privilege, @@ -128,7 +128,7 @@ struct TAlterSentryRoleRevokePrivilegeResponse { # SHOW ROLE GRANT struct TListSentryRolesRequest { -1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V1, +1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V2, 2: required string requestorUserName, # user on whose behalf the request is issued 3: optional string groupName # for this group, or all roles for all groups if null } @@ -153,7 +153,7 @@ struct TSentryAuthorizable { # SHOW GRANT struct TListSentryPrivilegesRequest { -1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V1, +1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V2, 2: required string requestorUserName, # user on whose behalf the request is issued 4: required string roleName, # get privileges assigned for this role 5: optional TSentryAuthorizable authorizableHierarchy # get privileges assigned for this role @@ -165,7 +165,7 @@ struct TListSentryPrivilegesResponse { # Drop privilege struct TDropPrivilegesRequest { -1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V1, +1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V2, 2: required string requestorUserName, # user on whose behalf the request is issued 3: required TSentryAuthorizable authorizable } @@ -175,7 +175,7 @@ struct TDropPrivilegesResponse { } struct TRenamePrivilegesRequest { -1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V1, +1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V2, 2: required string requestorUserName, # user on whose behalf the request is issued 3: required TSentryAuthorizable oldAuthorizable 4: required TSentryAuthorizable newAuthorizable @@ -194,7 +194,7 @@ struct TSentryActiveRoleSet { 2: required set roles, } struct TListSentryPrivilegesForProviderRequest { -1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V1, +1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V2, 2: required set groups, 3: required TSentryActiveRoleSet roleSet, 4: optional TSentryAuthorizable authorizableHierarchy, @@ -210,7 +210,7 @@ struct TSentryPrivilegeMap { 1: required map> privilegeMap } struct TListSentryPrivilegesByAuthRequest { -1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V1, +1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V2, 2: required string requestorUserName, # user on whose behalf the request is issued 3: required set authorizableSet, 4: optional set groups, @@ -223,7 +223,7 @@ struct TListSentryPrivilegesByAuthResponse { # Obtain a config value from the Sentry service struct TSentryConfigValueRequest { -1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V1, +1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V2, 2: required string propertyName, # Config attribute to obtain 3: optional string defaultValue # Value if propertyName not found } @@ -232,6 +232,33 @@ struct TSentryConfigValueResponse { 2: optional string value } +# struct for the mapping data like group to role, role to privilege +struct TSentryMappingData { +1: optional map> groupRolesMap, # for the groupName -> role mapping +2: optional map> rolePrivilegesMap # for the roleName -> privilege mapping +} + +struct TSentryExportMappingDataRequest { +1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V1, +2: required string requestorUserName # user on whose behalf the request is issued +} + +struct TSentryExportMappingDataResponse { +1: required sentry_common_service.TSentryResponseStatus status, +2: required TSentryMappingData mappingData +} + +struct TSentryImportMappingDataRequest { +1: required i32 protocol_version = sentry_common_service.TSENTRY_SERVICE_V1, +2: required string requestorUserName, # user on whose behalf the request is issued +3: required bool overwriteRole = false, # if overwrite the exist role with the imported privileges, default is false +4: required TSentryMappingData mappingData +} + +struct TSentryImportMappingDataResponse { +1: required sentry_common_service.TSentryResponseStatus status +} + service SentryPolicyService { TCreateSentryRoleResponse create_sentry_role(1:TCreateSentryRoleRequest request) @@ -250,11 +277,17 @@ service SentryPolicyService # For use with ProviderBackend.getPrivileges only TListSentryPrivilegesForProviderResponse list_sentry_privileges_for_provider(1:TListSentryPrivilegesForProviderRequest request) - TDropPrivilegesResponse drop_sentry_privilege(1:TDropPrivilegesRequest request); + TDropPrivilegesResponse drop_sentry_privilege(1:TDropPrivilegesRequest request); + + TRenamePrivilegesResponse rename_sentry_privilege(1:TRenamePrivilegesRequest request); + + TListSentryPrivilegesByAuthResponse list_sentry_privileges_by_authorizable(1:TListSentryPrivilegesByAuthRequest request); - TRenamePrivilegesResponse rename_sentry_privilege(1:TRenamePrivilegesRequest request); + TSentryConfigValueResponse get_sentry_config_value(1:TSentryConfigValueRequest request); - TListSentryPrivilegesByAuthResponse list_sentry_privileges_by_authorizable(1:TListSentryPrivilegesByAuthRequest request); + # export the mapping data in sentry + TSentryExportMappingDataResponse export_sentry_mapping_data(1:TSentryExportMappingDataRequest request); - TSentryConfigValueResponse get_sentry_config_value(1:TSentryConfigValueRequest request) + # import the mapping data in sentry + TSentryImportMappingDataResponse import_sentry_mapping_data(1:TSentryImportMappingDataRequest request); } diff --git a/sentry-provider/sentry-provider-db/src/main/resources/upgrade.order.db2 b/sentry-provider/sentry-provider-db/src/main/resources/upgrade.order.db2 index b1c21c4c7..8473c4cdc 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/upgrade.order.db2 +++ b/sentry-provider/sentry-provider-db/src/main/resources/upgrade.order.db2 @@ -1 +1,2 @@ 1.4.0-to-1.5.0 +1.5.0-to-1.6.0 diff --git a/sentry-provider/sentry-provider-db/src/main/resources/upgrade.order.derby b/sentry-provider/sentry-provider-db/src/main/resources/upgrade.order.derby index b1c21c4c7..8473c4cdc 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/upgrade.order.derby +++ b/sentry-provider/sentry-provider-db/src/main/resources/upgrade.order.derby @@ -1 +1,2 @@ 1.4.0-to-1.5.0 +1.5.0-to-1.6.0 diff --git a/sentry-provider/sentry-provider-db/src/main/resources/upgrade.order.mysql b/sentry-provider/sentry-provider-db/src/main/resources/upgrade.order.mysql index b1c21c4c7..8473c4cdc 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/upgrade.order.mysql +++ b/sentry-provider/sentry-provider-db/src/main/resources/upgrade.order.mysql @@ -1 +1,2 @@ 1.4.0-to-1.5.0 +1.5.0-to-1.6.0 diff --git a/sentry-provider/sentry-provider-db/src/main/resources/upgrade.order.oracle b/sentry-provider/sentry-provider-db/src/main/resources/upgrade.order.oracle index b1c21c4c7..8473c4cdc 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/upgrade.order.oracle +++ b/sentry-provider/sentry-provider-db/src/main/resources/upgrade.order.oracle @@ -1 +1,2 @@ 1.4.0-to-1.5.0 +1.5.0-to-1.6.0 diff --git a/sentry-provider/sentry-provider-db/src/main/resources/upgrade.order.postgres b/sentry-provider/sentry-provider-db/src/main/resources/upgrade.order.postgres index b1c21c4c7..8473c4cdc 100644 --- a/sentry-provider/sentry-provider-db/src/main/resources/upgrade.order.postgres +++ b/sentry-provider/sentry-provider-db/src/main/resources/upgrade.order.postgres @@ -1 +1,2 @@ 1.4.0-to-1.5.0 +1.5.0-to-1.6.0 diff --git a/sentry-provider/sentry-provider-db/src/main/webapp/SentryService.html b/sentry-provider/sentry-provider-db/src/main/webapp/SentryService.html new file mode 100644 index 000000000..ee112ce8d --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/webapp/SentryService.html @@ -0,0 +1,54 @@ + + + + + + Sentry Service + + + + + + + + + + + + + diff --git a/sentry-provider/sentry-provider-db/src/main/webapp/css/bootstrap-theme.min.css b/sentry-provider/sentry-provider-db/src/main/webapp/css/bootstrap-theme.min.css new file mode 100644 index 000000000..c31428b07 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/webapp/css/bootstrap-theme.min.css @@ -0,0 +1,10 @@ +/*! + * Bootstrap v3.0.0 + * + * Copyright 2013 Twitter, Inc + * Licensed under the Apache License v2.0 + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Designed and built with all the love in the world by @mdo and @fat. + */ +.btn-default,.btn-primary,.btn-success,.btn-info,.btn-warning,.btn-danger{text-shadow:0 -1px 0 rgba(0,0,0,0.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.15),0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 0 rgba(255,255,255,0.15),0 1px 1px rgba(0,0,0,0.075)}.btn-default:active,.btn-primary:active,.btn-success:active,.btn-info:active,.btn-warning:active,.btn-danger:active,.btn-default.active,.btn-primary.active,.btn-success.active,.btn-info.active,.btn-warning.active,.btn-danger.active{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,0.125);box-shadow:inset 0 3px 5px rgba(0,0,0,0.125)}.btn:active,.btn.active{background-image:none}.btn-default{text-shadow:0 1px 0 #fff;background-image:-webkit-gradient(linear,left 0,left 100%,from(#fff),to(#e6e6e6));background-image:-webkit-linear-gradient(top,#fff,0%,#e6e6e6,100%);background-image:-moz-linear-gradient(top,#fff 0,#e6e6e6 100%);background-image:linear-gradient(to bottom,#fff 0,#e6e6e6 100%);background-repeat:repeat-x;border-color:#e0e0e0;border-color:#ccc;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff',endColorstr='#ffe6e6e6',GradientType=0)}.btn-default:active,.btn-default.active{background-color:#e6e6e6;border-color:#e0e0e0}.btn-primary{background-image:-webkit-gradient(linear,left 0,left 100%,from(#428bca),to(#3071a9));background-image:-webkit-linear-gradient(top,#428bca,0%,#3071a9,100%);background-image:-moz-linear-gradient(top,#428bca 0,#3071a9 100%);background-image:linear-gradient(to bottom,#428bca 0,#3071a9 100%);background-repeat:repeat-x;border-color:#2d6ca2;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca',endColorstr='#ff3071a9',GradientType=0)}.btn-primary:active,.btn-primary.active{background-color:#3071a9;border-color:#2d6ca2}.btn-success{background-image:-webkit-gradient(linear,left 0,left 100%,from(#5cb85c),to(#449d44));background-image:-webkit-linear-gradient(top,#5cb85c,0%,#449d44,100%);background-image:-moz-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:linear-gradient(to bottom,#5cb85c 0,#449d44 100%);background-repeat:repeat-x;border-color:#419641;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c',endColorstr='#ff449d44',GradientType=0)}.btn-success:active,.btn-success.active{background-color:#449d44;border-color:#419641}.btn-warning{background-image:-webkit-gradient(linear,left 0,left 100%,from(#f0ad4e),to(#ec971f));background-image:-webkit-linear-gradient(top,#f0ad4e,0%,#ec971f,100%);background-image:-moz-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:linear-gradient(to bottom,#f0ad4e 0,#ec971f 100%);background-repeat:repeat-x;border-color:#eb9316;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e',endColorstr='#ffec971f',GradientType=0)}.btn-warning:active,.btn-warning.active{background-color:#ec971f;border-color:#eb9316}.btn-danger{background-image:-webkit-gradient(linear,left 0,left 100%,from(#d9534f),to(#c9302c));background-image:-webkit-linear-gradient(top,#d9534f,0%,#c9302c,100%);background-image:-moz-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:linear-gradient(to bottom,#d9534f 0,#c9302c 100%);background-repeat:repeat-x;border-color:#c12e2a;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f',endColorstr='#ffc9302c',GradientType=0)}.btn-danger:active,.btn-danger.active{background-color:#c9302c;border-color:#c12e2a}.btn-info{background-image:-webkit-gradient(linear,left 0,left 100%,from(#5bc0de),to(#31b0d5));background-image:-webkit-linear-gradient(top,#5bc0de,0%,#31b0d5,100%);background-image:-moz-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:linear-gradient(to bottom,#5bc0de 0,#31b0d5 100%);background-repeat:repeat-x;border-color:#2aabd2;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de',endColorstr='#ff31b0d5',GradientType=0)}.btn-info:active,.btn-info.active{background-color:#31b0d5;border-color:#2aabd2}.thumbnail,.img-thumbnail{-webkit-box-shadow:0 1px 2px rgba(0,0,0,0.075);box-shadow:0 1px 2px rgba(0,0,0,0.075)}.dropdown-menu>li>a:hover,.dropdown-menu>li>a:focus,.dropdown-menu>.active>a,.dropdown-menu>.active>a:hover,.dropdown-menu>.active>a:focus{background-color:#357ebd;background-image:-webkit-gradient(linear,left 0,left 100%,from(#428bca),to(#357ebd));background-image:-webkit-linear-gradient(top,#428bca,0%,#357ebd,100%);background-image:-moz-linear-gradient(top,#428bca 0,#357ebd 100%);background-image:linear-gradient(to bottom,#428bca 0,#357ebd 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca',endColorstr='#ff357ebd',GradientType=0)}.navbar{background-image:-webkit-gradient(linear,left 0,left 100%,from(#fff),to(#f8f8f8));background-image:-webkit-linear-gradient(top,#fff,0%,#f8f8f8,100%);background-image:-moz-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:linear-gradient(to bottom,#fff 0,#f8f8f8 100%);background-repeat:repeat-x;border-radius:4px;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff',endColorstr='#fff8f8f8',GradientType=0);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.15),0 1px 5px rgba(0,0,0,0.075);box-shadow:inset 0 1px 0 rgba(255,255,255,0.15),0 1px 5px rgba(0,0,0,0.075)}.navbar .navbar-nav>.active>a{background-color:#f8f8f8}.navbar-brand,.navbar-nav>li>a{text-shadow:0 1px 0 rgba(255,255,255,0.25)}.navbar-inverse{background-image:-webkit-gradient(linear,left 0,left 100%,from(#3c3c3c),to(#222));background-image:-webkit-linear-gradient(top,#3c3c3c,0%,#222,100%);background-image:-moz-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:linear-gradient(to bottom,#3c3c3c 0,#222 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c',endColorstr='#ff222222',GradientType=0)}.navbar-inverse .navbar-nav>.active>a{background-color:#222}.navbar-inverse .navbar-brand,.navbar-inverse .navbar-nav>li>a{text-shadow:0 -1px 0 rgba(0,0,0,0.25)}.navbar-static-top,.navbar-fixed-top,.navbar-fixed-bottom{border-radius:0}.alert{text-shadow:0 1px 0 rgba(255,255,255,0.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.25),0 1px 2px rgba(0,0,0,0.05);box-shadow:inset 0 1px 0 rgba(255,255,255,0.25),0 1px 2px rgba(0,0,0,0.05)}.alert-success{background-image:-webkit-gradient(linear,left 0,left 100%,from(#dff0d8),to(#c8e5bc));background-image:-webkit-linear-gradient(top,#dff0d8,0%,#c8e5bc,100%);background-image:-moz-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:linear-gradient(to bottom,#dff0d8 0,#c8e5bc 100%);background-repeat:repeat-x;border-color:#b2dba1;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8',endColorstr='#ffc8e5bc',GradientType=0)}.alert-info{background-image:-webkit-gradient(linear,left 0,left 100%,from(#d9edf7),to(#b9def0));background-image:-webkit-linear-gradient(top,#d9edf7,0%,#b9def0,100%);background-image:-moz-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:linear-gradient(to bottom,#d9edf7 0,#b9def0 100%);background-repeat:repeat-x;border-color:#9acfea;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7',endColorstr='#ffb9def0',GradientType=0)}.alert-warning{background-image:-webkit-gradient(linear,left 0,left 100%,from(#fcf8e3),to(#f8efc0));background-image:-webkit-linear-gradient(top,#fcf8e3,0%,#f8efc0,100%);background-image:-moz-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:linear-gradient(to bottom,#fcf8e3 0,#f8efc0 100%);background-repeat:repeat-x;border-color:#f5e79e;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3',endColorstr='#fff8efc0',GradientType=0)}.alert-danger{background-image:-webkit-gradient(linear,left 0,left 100%,from(#f2dede),to(#e7c3c3));background-image:-webkit-linear-gradient(top,#f2dede,0%,#e7c3c3,100%);background-image:-moz-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:linear-gradient(to bottom,#f2dede 0,#e7c3c3 100%);background-repeat:repeat-x;border-color:#dca7a7;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede',endColorstr='#ffe7c3c3',GradientType=0)}.progress{background-image:-webkit-gradient(linear,left 0,left 100%,from(#ebebeb),to(#f5f5f5));background-image:-webkit-linear-gradient(top,#ebebeb,0%,#f5f5f5,100%);background-image:-moz-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:linear-gradient(to bottom,#ebebeb 0,#f5f5f5 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb',endColorstr='#fff5f5f5',GradientType=0)}.progress-bar{background-image:-webkit-gradient(linear,left 0,left 100%,from(#428bca),to(#3071a9));background-image:-webkit-linear-gradient(top,#428bca,0%,#3071a9,100%);background-image:-moz-linear-gradient(top,#428bca 0,#3071a9 100%);background-image:linear-gradient(to bottom,#428bca 0,#3071a9 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca',endColorstr='#ff3071a9',GradientType=0)}.progress-bar-success{background-image:-webkit-gradient(linear,left 0,left 100%,from(#5cb85c),to(#449d44));background-image:-webkit-linear-gradient(top,#5cb85c,0%,#449d44,100%);background-image:-moz-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:linear-gradient(to bottom,#5cb85c 0,#449d44 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c',endColorstr='#ff449d44',GradientType=0)}.progress-bar-info{background-image:-webkit-gradient(linear,left 0,left 100%,from(#5bc0de),to(#31b0d5));background-image:-webkit-linear-gradient(top,#5bc0de,0%,#31b0d5,100%);background-image:-moz-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:linear-gradient(to bottom,#5bc0de 0,#31b0d5 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de',endColorstr='#ff31b0d5',GradientType=0)}.progress-bar-warning{background-image:-webkit-gradient(linear,left 0,left 100%,from(#f0ad4e),to(#ec971f));background-image:-webkit-linear-gradient(top,#f0ad4e,0%,#ec971f,100%);background-image:-moz-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:linear-gradient(to bottom,#f0ad4e 0,#ec971f 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e',endColorstr='#ffec971f',GradientType=0)}.progress-bar-danger{background-image:-webkit-gradient(linear,left 0,left 100%,from(#d9534f),to(#c9302c));background-image:-webkit-linear-gradient(top,#d9534f,0%,#c9302c,100%);background-image:-moz-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:linear-gradient(to bottom,#d9534f 0,#c9302c 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f',endColorstr='#ffc9302c',GradientType=0)}.list-group{border-radius:4px;-webkit-box-shadow:0 1px 2px rgba(0,0,0,0.075);box-shadow:0 1px 2px rgba(0,0,0,0.075)}.list-group-item.active,.list-group-item.active:hover,.list-group-item.active:focus{text-shadow:0 -1px 0 #3071a9;background-image:-webkit-gradient(linear,left 0,left 100%,from(#428bca),to(#3278b3));background-image:-webkit-linear-gradient(top,#428bca,0%,#3278b3,100%);background-image:-moz-linear-gradient(top,#428bca 0,#3278b3 100%);background-image:linear-gradient(to bottom,#428bca 0,#3278b3 100%);background-repeat:repeat-x;border-color:#3278b3;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca',endColorstr='#ff3278b3',GradientType=0)}.panel{-webkit-box-shadow:0 1px 2px rgba(0,0,0,0.05);box-shadow:0 1px 2px rgba(0,0,0,0.05)}.panel-default>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#f5f5f5),to(#e8e8e8));background-image:-webkit-linear-gradient(top,#f5f5f5,0%,#e8e8e8,100%);background-image:-moz-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5',endColorstr='#ffe8e8e8',GradientType=0)}.panel-primary>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#428bca),to(#357ebd));background-image:-webkit-linear-gradient(top,#428bca,0%,#357ebd,100%);background-image:-moz-linear-gradient(top,#428bca 0,#357ebd 100%);background-image:linear-gradient(to bottom,#428bca 0,#357ebd 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca',endColorstr='#ff357ebd',GradientType=0)}.panel-success>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#dff0d8),to(#d0e9c6));background-image:-webkit-linear-gradient(top,#dff0d8,0%,#d0e9c6,100%);background-image:-moz-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:linear-gradient(to bottom,#dff0d8 0,#d0e9c6 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8',endColorstr='#ffd0e9c6',GradientType=0)}.panel-info>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#d9edf7),to(#c4e3f3));background-image:-webkit-linear-gradient(top,#d9edf7,0%,#c4e3f3,100%);background-image:-moz-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:linear-gradient(to bottom,#d9edf7 0,#c4e3f3 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7',endColorstr='#ffc4e3f3',GradientType=0)}.panel-warning>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#fcf8e3),to(#faf2cc));background-image:-webkit-linear-gradient(top,#fcf8e3,0%,#faf2cc,100%);background-image:-moz-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:linear-gradient(to bottom,#fcf8e3 0,#faf2cc 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3',endColorstr='#fffaf2cc',GradientType=0)}.panel-danger>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#f2dede),to(#ebcccc));background-image:-webkit-linear-gradient(top,#f2dede,0%,#ebcccc,100%);background-image:-moz-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:linear-gradient(to bottom,#f2dede 0,#ebcccc 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede',endColorstr='#ffebcccc',GradientType=0)}.well{background-image:-webkit-gradient(linear,left 0,left 100%,from(#e8e8e8),to(#f5f5f5));background-image:-webkit-linear-gradient(top,#e8e8e8,0%,#f5f5f5,100%);background-image:-moz-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:linear-gradient(to bottom,#e8e8e8 0,#f5f5f5 100%);background-repeat:repeat-x;border-color:#dcdcdc;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8',endColorstr='#fff5f5f5',GradientType=0);-webkit-box-shadow:inset 0 1px 3px rgba(0,0,0,0.05),0 1px 0 rgba(255,255,255,0.1);box-shadow:inset 0 1px 3px rgba(0,0,0,0.05),0 1px 0 rgba(255,255,255,0.1)} \ No newline at end of file diff --git a/sentry-provider/sentry-provider-db/src/main/webapp/css/bootstrap.min.css b/sentry-provider/sentry-provider-db/src/main/webapp/css/bootstrap.min.css new file mode 100644 index 000000000..a553c4f5e --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/webapp/css/bootstrap.min.css @@ -0,0 +1,9 @@ +/*! + * Bootstrap v3.0.0 + * + * Copyright 2013 Twitter, Inc + * Licensed under the Apache License v2.0 + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Designed and built with all the love in the world by @mdo and @fat. + *//*! normalize.css v2.1.0 | MIT License | git.io/normalize */article,aside,details,figcaption,figure,footer,header,hgroup,main,nav,section,summary{display:block}audio,canvas,video{display:inline-block}audio:not([controls]){display:none;height:0}[hidden]{display:none}html{font-family:sans-serif;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:focus{outline:thin dotted}a:active,a:hover{outline:0}h1{margin:.67em 0;font-size:2em}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:bold}dfn{font-style:italic}hr{height:0;-moz-box-sizing:content-box;box-sizing:content-box}mark{color:#000;background:#ff0}code,kbd,pre,samp{font-family:monospace,serif;font-size:1em}pre{white-space:pre-wrap}q{quotes:"\201C" "\201D" "\2018" "\2019"}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sup{top:-0.5em}sub{bottom:-0.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:0}fieldset{padding:.35em .625em .75em;margin:0 2px;border:1px solid #c0c0c0}legend{padding:0;border:0}button,input,select,textarea{margin:0;font-family:inherit;font-size:100%}button,input{line-height:normal}button,select{text-transform:none}button,html input[type="button"],input[type="reset"],input[type="submit"]{cursor:pointer;-webkit-appearance:button}button[disabled],html input[disabled]{cursor:default}input[type="checkbox"],input[type="radio"]{padding:0;box-sizing:border-box}input[type="search"]{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;-webkit-appearance:textfield}input[type="search"]::-webkit-search-cancel-button,input[type="search"]::-webkit-search-decoration{-webkit-appearance:none}button::-moz-focus-inner,input::-moz-focus-inner{padding:0;border:0}textarea{overflow:auto;vertical-align:top}table{border-collapse:collapse;border-spacing:0}@media print{*{color:#000!important;text-shadow:none!important;background:transparent!important;box-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100%!important}@page{margin:2cm .5cm}p,h2,h3{orphans:3;widows:3}h2,h3{page-break-after:avoid}.navbar{display:none}.table td,.table th{background-color:#fff!important}.btn>.caret,.dropup>.btn>.caret{border-top-color:#000!important}.label{border:1px solid #000}.table{border-collapse:collapse!important}.table-bordered th,.table-bordered td{border:1px solid #ddd!important}}*,*:before,*:after{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:62.5%;-webkit-tap-highlight-color:rgba(0,0,0,0)}body{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:1.428571429;color:#333;background-color:#fff}input,button,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}button,input,select[multiple],textarea{background-image:none}a{color:#428bca;text-decoration:none}a:hover,a:focus{color:#2a6496;text-decoration:underline}a:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}img{vertical-align:middle}.img-responsive{display:block;height:auto;max-width:100%}.img-rounded{border-radius:6px}.img-thumbnail{display:inline-block;height:auto;max-width:100%;padding:4px;line-height:1.428571429;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.img-circle{border-radius:50%}hr{margin-top:20px;margin-bottom:20px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0 0 0 0);border:0}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:16.099999999999998px;font-weight:200;line-height:1.4}@media(min-width:768px){.lead{font-size:21px}}small{font-size:85%}cite{font-style:normal}.text-muted{color:#999}.text-primary{color:#428bca}.text-warning{color:#c09853}.text-danger{color:#b94a48}.text-success{color:#468847}.text-info{color:#3a87ad}.text-left{text-align:left}.text-right{text-align:right}.text-center{text-align:center}h1,h2,h3,h4,h5,h6,.h1,.h2,.h3,.h4,.h5,.h6{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-weight:500;line-height:1.1}h1 small,h2 small,h3 small,h4 small,h5 small,h6 small,.h1 small,.h2 small,.h3 small,.h4 small,.h5 small,.h6 small{font-weight:normal;line-height:1;color:#999}h1,h2,h3{margin-top:20px;margin-bottom:10px}h4,h5,h6{margin-top:10px;margin-bottom:10px}h1,.h1{font-size:36px}h2,.h2{font-size:30px}h3,.h3{font-size:24px}h4,.h4{font-size:18px}h5,.h5{font-size:14px}h6,.h6{font-size:12px}h1 small,.h1 small{font-size:24px}h2 small,.h2 small{font-size:18px}h3 small,.h3 small,h4 small,.h4 small{font-size:14px}.page-header{padding-bottom:9px;margin:40px 0 20px;border-bottom:1px solid #eee}ul,ol{margin-top:0;margin-bottom:10px}ul ul,ol ul,ul ol,ol ol{margin-bottom:0}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;list-style:none}.list-inline>li{display:inline-block;padding-right:5px;padding-left:5px}dl{margin-bottom:20px}dt,dd{line-height:1.428571429}dt{font-weight:bold}dd{margin-left:0}@media(min-width:768px){.dl-horizontal dt{float:left;width:160px;overflow:hidden;clear:left;text-align:right;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}.dl-horizontal dd:before,.dl-horizontal dd:after{display:table;content:" "}.dl-horizontal dd:after{clear:both}.dl-horizontal dd:before,.dl-horizontal dd:after{display:table;content:" "}.dl-horizontal dd:after{clear:both}}abbr[title],abbr[data-original-title]{cursor:help;border-bottom:1px dotted #999}abbr.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:10px 20px;margin:0 0 20px;border-left:5px solid #eee}blockquote p{font-size:17.5px;font-weight:300;line-height:1.25}blockquote p:last-child{margin-bottom:0}blockquote small{display:block;line-height:1.428571429;color:#999}blockquote small:before{content:'\2014 \00A0'}blockquote.pull-right{padding-right:15px;padding-left:0;border-right:5px solid #eee;border-left:0}blockquote.pull-right p,blockquote.pull-right small{text-align:right}blockquote.pull-right small:before{content:''}blockquote.pull-right small:after{content:'\00A0 \2014'}q:before,q:after,blockquote:before,blockquote:after{content:""}address{display:block;margin-bottom:20px;font-style:normal;line-height:1.428571429}code,pre{font-family:Monaco,Menlo,Consolas,"Courier New",monospace}code{padding:2px 4px;font-size:90%;color:#c7254e;white-space:nowrap;background-color:#f9f2f4;border-radius:4px}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:1.428571429;color:#333;word-break:break-all;word-wrap:break-word;background-color:#f5f5f5;border:1px solid #ccc;border-radius:4px}pre.prettyprint{margin-bottom:20px}pre code{padding:0;font-size:inherit;color:inherit;white-space:pre-wrap;background-color:transparent;border:0}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.container:before,.container:after{display:table;content:" "}.container:after{clear:both}.container:before,.container:after{display:table;content:" "}.container:after{clear:both}.row{margin-right:-15px;margin-left:-15px}.row:before,.row:after{display:table;content:" "}.row:after{clear:both}.row:before,.row:after{display:table;content:" "}.row:after{clear:both}.col-xs-1,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9,.col-xs-10,.col-xs-11,.col-xs-12,.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11,.col-sm-12,.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11,.col-md-12,.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11,.col-lg-12{position:relative;min-height:1px;padding-right:15px;padding-left:15px}.col-xs-1,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9,.col-xs-10,.col-xs-11{float:left}.col-xs-1{width:8.333333333333332%}.col-xs-2{width:16.666666666666664%}.col-xs-3{width:25%}.col-xs-4{width:33.33333333333333%}.col-xs-5{width:41.66666666666667%}.col-xs-6{width:50%}.col-xs-7{width:58.333333333333336%}.col-xs-8{width:66.66666666666666%}.col-xs-9{width:75%}.col-xs-10{width:83.33333333333334%}.col-xs-11{width:91.66666666666666%}.col-xs-12{width:100%}@media(min-width:768px){.container{max-width:750px}.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11{float:left}.col-sm-1{width:8.333333333333332%}.col-sm-2{width:16.666666666666664%}.col-sm-3{width:25%}.col-sm-4{width:33.33333333333333%}.col-sm-5{width:41.66666666666667%}.col-sm-6{width:50%}.col-sm-7{width:58.333333333333336%}.col-sm-8{width:66.66666666666666%}.col-sm-9{width:75%}.col-sm-10{width:83.33333333333334%}.col-sm-11{width:91.66666666666666%}.col-sm-12{width:100%}.col-sm-push-1{left:8.333333333333332%}.col-sm-push-2{left:16.666666666666664%}.col-sm-push-3{left:25%}.col-sm-push-4{left:33.33333333333333%}.col-sm-push-5{left:41.66666666666667%}.col-sm-push-6{left:50%}.col-sm-push-7{left:58.333333333333336%}.col-sm-push-8{left:66.66666666666666%}.col-sm-push-9{left:75%}.col-sm-push-10{left:83.33333333333334%}.col-sm-push-11{left:91.66666666666666%}.col-sm-pull-1{right:8.333333333333332%}.col-sm-pull-2{right:16.666666666666664%}.col-sm-pull-3{right:25%}.col-sm-pull-4{right:33.33333333333333%}.col-sm-pull-5{right:41.66666666666667%}.col-sm-pull-6{right:50%}.col-sm-pull-7{right:58.333333333333336%}.col-sm-pull-8{right:66.66666666666666%}.col-sm-pull-9{right:75%}.col-sm-pull-10{right:83.33333333333334%}.col-sm-pull-11{right:91.66666666666666%}.col-sm-offset-1{margin-left:8.333333333333332%}.col-sm-offset-2{margin-left:16.666666666666664%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-4{margin-left:33.33333333333333%}.col-sm-offset-5{margin-left:41.66666666666667%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-7{margin-left:58.333333333333336%}.col-sm-offset-8{margin-left:66.66666666666666%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-10{margin-left:83.33333333333334%}.col-sm-offset-11{margin-left:91.66666666666666%}}@media(min-width:992px){.container{max-width:970px}.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11{float:left}.col-md-1{width:8.333333333333332%}.col-md-2{width:16.666666666666664%}.col-md-3{width:25%}.col-md-4{width:33.33333333333333%}.col-md-5{width:41.66666666666667%}.col-md-6{width:50%}.col-md-7{width:58.333333333333336%}.col-md-8{width:66.66666666666666%}.col-md-9{width:75%}.col-md-10{width:83.33333333333334%}.col-md-11{width:91.66666666666666%}.col-md-12{width:100%}.col-md-push-0{left:auto}.col-md-push-1{left:8.333333333333332%}.col-md-push-2{left:16.666666666666664%}.col-md-push-3{left:25%}.col-md-push-4{left:33.33333333333333%}.col-md-push-5{left:41.66666666666667%}.col-md-push-6{left:50%}.col-md-push-7{left:58.333333333333336%}.col-md-push-8{left:66.66666666666666%}.col-md-push-9{left:75%}.col-md-push-10{left:83.33333333333334%}.col-md-push-11{left:91.66666666666666%}.col-md-pull-0{right:auto}.col-md-pull-1{right:8.333333333333332%}.col-md-pull-2{right:16.666666666666664%}.col-md-pull-3{right:25%}.col-md-pull-4{right:33.33333333333333%}.col-md-pull-5{right:41.66666666666667%}.col-md-pull-6{right:50%}.col-md-pull-7{right:58.333333333333336%}.col-md-pull-8{right:66.66666666666666%}.col-md-pull-9{right:75%}.col-md-pull-10{right:83.33333333333334%}.col-md-pull-11{right:91.66666666666666%}.col-md-offset-0{margin-left:0}.col-md-offset-1{margin-left:8.333333333333332%}.col-md-offset-2{margin-left:16.666666666666664%}.col-md-offset-3{margin-left:25%}.col-md-offset-4{margin-left:33.33333333333333%}.col-md-offset-5{margin-left:41.66666666666667%}.col-md-offset-6{margin-left:50%}.col-md-offset-7{margin-left:58.333333333333336%}.col-md-offset-8{margin-left:66.66666666666666%}.col-md-offset-9{margin-left:75%}.col-md-offset-10{margin-left:83.33333333333334%}.col-md-offset-11{margin-left:91.66666666666666%}}@media(min-width:1200px){.container{max-width:1170px}.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11{float:left}.col-lg-1{width:8.333333333333332%}.col-lg-2{width:16.666666666666664%}.col-lg-3{width:25%}.col-lg-4{width:33.33333333333333%}.col-lg-5{width:41.66666666666667%}.col-lg-6{width:50%}.col-lg-7{width:58.333333333333336%}.col-lg-8{width:66.66666666666666%}.col-lg-9{width:75%}.col-lg-10{width:83.33333333333334%}.col-lg-11{width:91.66666666666666%}.col-lg-12{width:100%}.col-lg-push-0{left:auto}.col-lg-push-1{left:8.333333333333332%}.col-lg-push-2{left:16.666666666666664%}.col-lg-push-3{left:25%}.col-lg-push-4{left:33.33333333333333%}.col-lg-push-5{left:41.66666666666667%}.col-lg-push-6{left:50%}.col-lg-push-7{left:58.333333333333336%}.col-lg-push-8{left:66.66666666666666%}.col-lg-push-9{left:75%}.col-lg-push-10{left:83.33333333333334%}.col-lg-push-11{left:91.66666666666666%}.col-lg-pull-0{right:auto}.col-lg-pull-1{right:8.333333333333332%}.col-lg-pull-2{right:16.666666666666664%}.col-lg-pull-3{right:25%}.col-lg-pull-4{right:33.33333333333333%}.col-lg-pull-5{right:41.66666666666667%}.col-lg-pull-6{right:50%}.col-lg-pull-7{right:58.333333333333336%}.col-lg-pull-8{right:66.66666666666666%}.col-lg-pull-9{right:75%}.col-lg-pull-10{right:83.33333333333334%}.col-lg-pull-11{right:91.66666666666666%}.col-lg-offset-0{margin-left:0}.col-lg-offset-1{margin-left:8.333333333333332%}.col-lg-offset-2{margin-left:16.666666666666664%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-4{margin-left:33.33333333333333%}.col-lg-offset-5{margin-left:41.66666666666667%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-7{margin-left:58.333333333333336%}.col-lg-offset-8{margin-left:66.66666666666666%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-10{margin-left:83.33333333333334%}.col-lg-offset-11{margin-left:91.66666666666666%}}table{max-width:100%;background-color:transparent}th{text-align:left}.table{width:100%;margin-bottom:20px}.table thead>tr>th,.table tbody>tr>th,.table tfoot>tr>th,.table thead>tr>td,.table tbody>tr>td,.table tfoot>tr>td{padding:8px;line-height:1.428571429;vertical-align:top;border-top:1px solid #ddd}.table thead>tr>th{vertical-align:bottom;border-bottom:2px solid #ddd}.table caption+thead tr:first-child th,.table colgroup+thead tr:first-child th,.table thead:first-child tr:first-child th,.table caption+thead tr:first-child td,.table colgroup+thead tr:first-child td,.table thead:first-child tr:first-child td{border-top:0}.table tbody+tbody{border-top:2px solid #ddd}.table .table{background-color:#fff}.table-condensed thead>tr>th,.table-condensed tbody>tr>th,.table-condensed tfoot>tr>th,.table-condensed thead>tr>td,.table-condensed tbody>tr>td,.table-condensed tfoot>tr>td{padding:5px}.table-bordered{border:1px solid #ddd}.table-bordered>thead>tr>th,.table-bordered>tbody>tr>th,.table-bordered>tfoot>tr>th,.table-bordered>thead>tr>td,.table-bordered>tbody>tr>td,.table-bordered>tfoot>tr>td{border:1px solid #ddd}.table-bordered>thead>tr>th,.table-bordered>thead>tr>td{border-bottom-width:2px}.table-striped>tbody>tr:nth-child(odd)>td,.table-striped>tbody>tr:nth-child(odd)>th{background-color:#f9f9f9}.table-hover>tbody>tr:hover>td,.table-hover>tbody>tr:hover>th{background-color:#f5f5f5}table col[class*="col-"]{display:table-column;float:none}table td[class*="col-"],table th[class*="col-"]{display:table-cell;float:none}.table>thead>tr>td.active,.table>tbody>tr>td.active,.table>tfoot>tr>td.active,.table>thead>tr>th.active,.table>tbody>tr>th.active,.table>tfoot>tr>th.active,.table>thead>tr.active>td,.table>tbody>tr.active>td,.table>tfoot>tr.active>td,.table>thead>tr.active>th,.table>tbody>tr.active>th,.table>tfoot>tr.active>th{background-color:#f5f5f5}.table>thead>tr>td.success,.table>tbody>tr>td.success,.table>tfoot>tr>td.success,.table>thead>tr>th.success,.table>tbody>tr>th.success,.table>tfoot>tr>th.success,.table>thead>tr.success>td,.table>tbody>tr.success>td,.table>tfoot>tr.success>td,.table>thead>tr.success>th,.table>tbody>tr.success>th,.table>tfoot>tr.success>th{background-color:#dff0d8;border-color:#d6e9c6}.table-hover>tbody>tr>td.success:hover,.table-hover>tbody>tr>th.success:hover,.table-hover>tbody>tr.success:hover>td{background-color:#d0e9c6;border-color:#c9e2b3}.table>thead>tr>td.danger,.table>tbody>tr>td.danger,.table>tfoot>tr>td.danger,.table>thead>tr>th.danger,.table>tbody>tr>th.danger,.table>tfoot>tr>th.danger,.table>thead>tr.danger>td,.table>tbody>tr.danger>td,.table>tfoot>tr.danger>td,.table>thead>tr.danger>th,.table>tbody>tr.danger>th,.table>tfoot>tr.danger>th{background-color:#f2dede;border-color:#eed3d7}.table-hover>tbody>tr>td.danger:hover,.table-hover>tbody>tr>th.danger:hover,.table-hover>tbody>tr.danger:hover>td{background-color:#ebcccc;border-color:#e6c1c7}.table>thead>tr>td.warning,.table>tbody>tr>td.warning,.table>tfoot>tr>td.warning,.table>thead>tr>th.warning,.table>tbody>tr>th.warning,.table>tfoot>tr>th.warning,.table>thead>tr.warning>td,.table>tbody>tr.warning>td,.table>tfoot>tr.warning>td,.table>thead>tr.warning>th,.table>tbody>tr.warning>th,.table>tfoot>tr.warning>th{background-color:#fcf8e3;border-color:#fbeed5}.table-hover>tbody>tr>td.warning:hover,.table-hover>tbody>tr>th.warning:hover,.table-hover>tbody>tr.warning:hover>td{background-color:#faf2cc;border-color:#f8e5be}@media(max-width:768px){.table-responsive{width:100%;margin-bottom:15px;overflow-x:scroll;overflow-y:hidden;border:1px solid #ddd}.table-responsive>.table{margin-bottom:0;background-color:#fff}.table-responsive>.table>thead>tr>th,.table-responsive>.table>tbody>tr>th,.table-responsive>.table>tfoot>tr>th,.table-responsive>.table>thead>tr>td,.table-responsive>.table>tbody>tr>td,.table-responsive>.table>tfoot>tr>td{white-space:nowrap}.table-responsive>.table-bordered{border:0}.table-responsive>.table-bordered>thead>tr>th:first-child,.table-responsive>.table-bordered>tbody>tr>th:first-child,.table-responsive>.table-bordered>tfoot>tr>th:first-child,.table-responsive>.table-bordered>thead>tr>td:first-child,.table-responsive>.table-bordered>tbody>tr>td:first-child,.table-responsive>.table-bordered>tfoot>tr>td:first-child{border-left:0}.table-responsive>.table-bordered>thead>tr>th:last-child,.table-responsive>.table-bordered>tbody>tr>th:last-child,.table-responsive>.table-bordered>tfoot>tr>th:last-child,.table-responsive>.table-bordered>thead>tr>td:last-child,.table-responsive>.table-bordered>tbody>tr>td:last-child,.table-responsive>.table-bordered>tfoot>tr>td:last-child{border-right:0}.table-responsive>.table-bordered>thead>tr:last-child>th,.table-responsive>.table-bordered>tbody>tr:last-child>th,.table-responsive>.table-bordered>tfoot>tr:last-child>th,.table-responsive>.table-bordered>thead>tr:last-child>td,.table-responsive>.table-bordered>tbody>tr:last-child>td,.table-responsive>.table-bordered>tfoot>tr:last-child>td{border-bottom:0}}fieldset{padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:inherit;color:#333;border:0;border-bottom:1px solid #e5e5e5}label{display:inline-block;margin-bottom:5px;font-weight:bold}input[type="search"]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type="radio"],input[type="checkbox"]{margin:4px 0 0;margin-top:1px \9;line-height:normal}input[type="file"]{display:block}select[multiple],select[size]{height:auto}select optgroup{font-family:inherit;font-size:inherit;font-style:inherit}input[type="file"]:focus,input[type="radio"]:focus,input[type="checkbox"]:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}input[type="number"]::-webkit-outer-spin-button,input[type="number"]::-webkit-inner-spin-button{height:auto}.form-control:-moz-placeholder{color:#999}.form-control::-moz-placeholder{color:#999}.form-control:-ms-input-placeholder{color:#999}.form-control::-webkit-input-placeholder{color:#999}.form-control{display:block;width:100%;height:34px;padding:6px 12px;font-size:14px;line-height:1.428571429;color:#555;vertical-align:middle;background-color:#fff;border:1px solid #ccc;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-webkit-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s}.form-control:focus{border-color:#66afe9;outline:0;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(102,175,233,0.6);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(102,175,233,0.6)}.form-control[disabled],.form-control[readonly],fieldset[disabled] .form-control{cursor:not-allowed;background-color:#eee}textarea.form-control{height:auto}.form-group{margin-bottom:15px}.radio,.checkbox{display:block;min-height:20px;padding-left:20px;margin-top:10px;margin-bottom:10px;vertical-align:middle}.radio label,.checkbox label{display:inline;margin-bottom:0;font-weight:normal;cursor:pointer}.radio input[type="radio"],.radio-inline input[type="radio"],.checkbox input[type="checkbox"],.checkbox-inline input[type="checkbox"]{float:left;margin-left:-20px}.radio+.radio,.checkbox+.checkbox{margin-top:-5px}.radio-inline,.checkbox-inline{display:inline-block;padding-left:20px;margin-bottom:0;font-weight:normal;vertical-align:middle;cursor:pointer}.radio-inline+.radio-inline,.checkbox-inline+.checkbox-inline{margin-top:0;margin-left:10px}input[type="radio"][disabled],input[type="checkbox"][disabled],.radio[disabled],.radio-inline[disabled],.checkbox[disabled],.checkbox-inline[disabled],fieldset[disabled] input[type="radio"],fieldset[disabled] input[type="checkbox"],fieldset[disabled] .radio,fieldset[disabled] .radio-inline,fieldset[disabled] .checkbox,fieldset[disabled] .checkbox-inline{cursor:not-allowed}.input-sm{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-sm{height:30px;line-height:30px}textarea.input-sm{height:auto}.input-lg{height:45px;padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}select.input-lg{height:45px;line-height:45px}textarea.input-lg{height:auto}.has-warning .help-block,.has-warning .control-label{color:#c09853}.has-warning .form-control{border-color:#c09853;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.has-warning .form-control:focus{border-color:#a47e3c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #dbc59e;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #dbc59e}.has-warning .input-group-addon{color:#c09853;background-color:#fcf8e3;border-color:#c09853}.has-error .help-block,.has-error .control-label{color:#b94a48}.has-error .form-control{border-color:#b94a48;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.has-error .form-control:focus{border-color:#953b39;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #d59392;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #d59392}.has-error .input-group-addon{color:#b94a48;background-color:#f2dede;border-color:#b94a48}.has-success .help-block,.has-success .control-label{color:#468847}.has-success .form-control{border-color:#468847;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.has-success .form-control:focus{border-color:#356635;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7aba7b;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7aba7b}.has-success .input-group-addon{color:#468847;background-color:#dff0d8;border-color:#468847}.form-control-static{padding-top:7px;margin-bottom:0}.help-block{display:block;margin-top:5px;margin-bottom:10px;color:#737373}@media(min-width:768px){.form-inline .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.form-inline .form-control{display:inline-block}.form-inline .radio,.form-inline .checkbox{display:inline-block;padding-left:0;margin-top:0;margin-bottom:0}.form-inline .radio input[type="radio"],.form-inline .checkbox input[type="checkbox"]{float:none;margin-left:0}}.form-horizontal .control-label,.form-horizontal .radio,.form-horizontal .checkbox,.form-horizontal .radio-inline,.form-horizontal .checkbox-inline{padding-top:7px;margin-top:0;margin-bottom:0}.form-horizontal .form-group{margin-right:-15px;margin-left:-15px}.form-horizontal .form-group:before,.form-horizontal .form-group:after{display:table;content:" "}.form-horizontal .form-group:after{clear:both}.form-horizontal .form-group:before,.form-horizontal .form-group:after{display:table;content:" "}.form-horizontal .form-group:after{clear:both}@media(min-width:768px){.form-horizontal .control-label{text-align:right}}.btn{display:inline-block;padding:6px 12px;margin-bottom:0;font-size:14px;font-weight:normal;line-height:1.428571429;text-align:center;white-space:nowrap;vertical-align:middle;cursor:pointer;border:1px solid transparent;border-radius:4px;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;-o-user-select:none;user-select:none}.btn:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn:hover,.btn:focus{color:#333;text-decoration:none}.btn:active,.btn.active{background-image:none;outline:0;-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,0.125);box-shadow:inset 0 3px 5px rgba(0,0,0,0.125)}.btn.disabled,.btn[disabled],fieldset[disabled] .btn{pointer-events:none;cursor:not-allowed;opacity:.65;filter:alpha(opacity=65);-webkit-box-shadow:none;box-shadow:none}.btn-default{color:#333;background-color:#fff;border-color:#ccc}.btn-default:hover,.btn-default:focus,.btn-default:active,.btn-default.active,.open .dropdown-toggle.btn-default{color:#333;background-color:#ebebeb;border-color:#adadad}.btn-default:active,.btn-default.active,.open .dropdown-toggle.btn-default{background-image:none}.btn-default.disabled,.btn-default[disabled],fieldset[disabled] .btn-default,.btn-default.disabled:hover,.btn-default[disabled]:hover,fieldset[disabled] .btn-default:hover,.btn-default.disabled:focus,.btn-default[disabled]:focus,fieldset[disabled] .btn-default:focus,.btn-default.disabled:active,.btn-default[disabled]:active,fieldset[disabled] .btn-default:active,.btn-default.disabled.active,.btn-default[disabled].active,fieldset[disabled] .btn-default.active{background-color:#fff;border-color:#ccc}.btn-primary{color:#fff;background-color:#428bca;border-color:#357ebd}.btn-primary:hover,.btn-primary:focus,.btn-primary:active,.btn-primary.active,.open .dropdown-toggle.btn-primary{color:#fff;background-color:#3276b1;border-color:#285e8e}.btn-primary:active,.btn-primary.active,.open .dropdown-toggle.btn-primary{background-image:none}.btn-primary.disabled,.btn-primary[disabled],fieldset[disabled] .btn-primary,.btn-primary.disabled:hover,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary:hover,.btn-primary.disabled:focus,.btn-primary[disabled]:focus,fieldset[disabled] .btn-primary:focus,.btn-primary.disabled:active,.btn-primary[disabled]:active,fieldset[disabled] .btn-primary:active,.btn-primary.disabled.active,.btn-primary[disabled].active,fieldset[disabled] .btn-primary.active{background-color:#428bca;border-color:#357ebd}.btn-warning{color:#fff;background-color:#f0ad4e;border-color:#eea236}.btn-warning:hover,.btn-warning:focus,.btn-warning:active,.btn-warning.active,.open .dropdown-toggle.btn-warning{color:#fff;background-color:#ed9c28;border-color:#d58512}.btn-warning:active,.btn-warning.active,.open .dropdown-toggle.btn-warning{background-image:none}.btn-warning.disabled,.btn-warning[disabled],fieldset[disabled] .btn-warning,.btn-warning.disabled:hover,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning:hover,.btn-warning.disabled:focus,.btn-warning[disabled]:focus,fieldset[disabled] .btn-warning:focus,.btn-warning.disabled:active,.btn-warning[disabled]:active,fieldset[disabled] .btn-warning:active,.btn-warning.disabled.active,.btn-warning[disabled].active,fieldset[disabled] .btn-warning.active{background-color:#f0ad4e;border-color:#eea236}.btn-danger{color:#fff;background-color:#d9534f;border-color:#d43f3a}.btn-danger:hover,.btn-danger:focus,.btn-danger:active,.btn-danger.active,.open .dropdown-toggle.btn-danger{color:#fff;background-color:#d2322d;border-color:#ac2925}.btn-danger:active,.btn-danger.active,.open .dropdown-toggle.btn-danger{background-image:none}.btn-danger.disabled,.btn-danger[disabled],fieldset[disabled] .btn-danger,.btn-danger.disabled:hover,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger:hover,.btn-danger.disabled:focus,.btn-danger[disabled]:focus,fieldset[disabled] .btn-danger:focus,.btn-danger.disabled:active,.btn-danger[disabled]:active,fieldset[disabled] .btn-danger:active,.btn-danger.disabled.active,.btn-danger[disabled].active,fieldset[disabled] .btn-danger.active{background-color:#d9534f;border-color:#d43f3a}.btn-success{color:#fff;background-color:#5cb85c;border-color:#4cae4c}.btn-success:hover,.btn-success:focus,.btn-success:active,.btn-success.active,.open .dropdown-toggle.btn-success{color:#fff;background-color:#47a447;border-color:#398439}.btn-success:active,.btn-success.active,.open .dropdown-toggle.btn-success{background-image:none}.btn-success.disabled,.btn-success[disabled],fieldset[disabled] .btn-success,.btn-success.disabled:hover,.btn-success[disabled]:hover,fieldset[disabled] .btn-success:hover,.btn-success.disabled:focus,.btn-success[disabled]:focus,fieldset[disabled] .btn-success:focus,.btn-success.disabled:active,.btn-success[disabled]:active,fieldset[disabled] .btn-success:active,.btn-success.disabled.active,.btn-success[disabled].active,fieldset[disabled] .btn-success.active{background-color:#5cb85c;border-color:#4cae4c}.btn-info{color:#fff;background-color:#5bc0de;border-color:#46b8da}.btn-info:hover,.btn-info:focus,.btn-info:active,.btn-info.active,.open .dropdown-toggle.btn-info{color:#fff;background-color:#39b3d7;border-color:#269abc}.btn-info:active,.btn-info.active,.open .dropdown-toggle.btn-info{background-image:none}.btn-info.disabled,.btn-info[disabled],fieldset[disabled] .btn-info,.btn-info.disabled:hover,.btn-info[disabled]:hover,fieldset[disabled] .btn-info:hover,.btn-info.disabled:focus,.btn-info[disabled]:focus,fieldset[disabled] .btn-info:focus,.btn-info.disabled:active,.btn-info[disabled]:active,fieldset[disabled] .btn-info:active,.btn-info.disabled.active,.btn-info[disabled].active,fieldset[disabled] .btn-info.active{background-color:#5bc0de;border-color:#46b8da}.btn-link{font-weight:normal;color:#428bca;cursor:pointer;border-radius:0}.btn-link,.btn-link:active,.btn-link[disabled],fieldset[disabled] .btn-link{background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link,.btn-link:hover,.btn-link:focus,.btn-link:active{border-color:transparent}.btn-link:hover,.btn-link:focus{color:#2a6496;text-decoration:underline;background-color:transparent}.btn-link[disabled]:hover,fieldset[disabled] .btn-link:hover,.btn-link[disabled]:focus,fieldset[disabled] .btn-link:focus{color:#999;text-decoration:none}.btn-lg{padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}.btn-sm,.btn-xs{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-xs{padding:1px 5px}.btn-block{display:block;width:100%;padding-right:0;padding-left:0}.btn-block+.btn-block{margin-top:5px}input[type="submit"].btn-block,input[type="reset"].btn-block,input[type="button"].btn-block{width:100%}.fade{opacity:0;-webkit-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{display:none}.collapse.in{display:block}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition:height .35s ease;transition:height .35s ease}@font-face{font-family:'Glyphicons Halflings';src:url('../fonts/glyphicons-halflings-regular.eot');src:url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'),url('../fonts/glyphicons-halflings-regular.woff') format('woff'),url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'),url('../fonts/glyphicons-halflings-regular.svg#glyphicons-halflingsregular') format('svg')}.glyphicon{position:relative;top:1px;display:inline-block;font-family:'Glyphicons Halflings';-webkit-font-smoothing:antialiased;font-style:normal;font-weight:normal;line-height:1}.glyphicon-asterisk:before{content:"\2a"}.glyphicon-plus:before{content:"\2b"}.glyphicon-euro:before{content:"\20ac"}.glyphicon-minus:before{content:"\2212"}.glyphicon-cloud:before{content:"\2601"}.glyphicon-envelope:before{content:"\2709"}.glyphicon-pencil:before{content:"\270f"}.glyphicon-glass:before{content:"\e001"}.glyphicon-music:before{content:"\e002"}.glyphicon-search:before{content:"\e003"}.glyphicon-heart:before{content:"\e005"}.glyphicon-star:before{content:"\e006"}.glyphicon-star-empty:before{content:"\e007"}.glyphicon-user:before{content:"\e008"}.glyphicon-film:before{content:"\e009"}.glyphicon-th-large:before{content:"\e010"}.glyphicon-th:before{content:"\e011"}.glyphicon-th-list:before{content:"\e012"}.glyphicon-ok:before{content:"\e013"}.glyphicon-remove:before{content:"\e014"}.glyphicon-zoom-in:before{content:"\e015"}.glyphicon-zoom-out:before{content:"\e016"}.glyphicon-off:before{content:"\e017"}.glyphicon-signal:before{content:"\e018"}.glyphicon-cog:before{content:"\e019"}.glyphicon-trash:before{content:"\e020"}.glyphicon-home:before{content:"\e021"}.glyphicon-file:before{content:"\e022"}.glyphicon-time:before{content:"\e023"}.glyphicon-road:before{content:"\e024"}.glyphicon-download-alt:before{content:"\e025"}.glyphicon-download:before{content:"\e026"}.glyphicon-upload:before{content:"\e027"}.glyphicon-inbox:before{content:"\e028"}.glyphicon-play-circle:before{content:"\e029"}.glyphicon-repeat:before{content:"\e030"}.glyphicon-refresh:before{content:"\e031"}.glyphicon-list-alt:before{content:"\e032"}.glyphicon-flag:before{content:"\e034"}.glyphicon-headphones:before{content:"\e035"}.glyphicon-volume-off:before{content:"\e036"}.glyphicon-volume-down:before{content:"\e037"}.glyphicon-volume-up:before{content:"\e038"}.glyphicon-qrcode:before{content:"\e039"}.glyphicon-barcode:before{content:"\e040"}.glyphicon-tag:before{content:"\e041"}.glyphicon-tags:before{content:"\e042"}.glyphicon-book:before{content:"\e043"}.glyphicon-print:before{content:"\e045"}.glyphicon-font:before{content:"\e047"}.glyphicon-bold:before{content:"\e048"}.glyphicon-italic:before{content:"\e049"}.glyphicon-text-height:before{content:"\e050"}.glyphicon-text-width:before{content:"\e051"}.glyphicon-align-left:before{content:"\e052"}.glyphicon-align-center:before{content:"\e053"}.glyphicon-align-right:before{content:"\e054"}.glyphicon-align-justify:before{content:"\e055"}.glyphicon-list:before{content:"\e056"}.glyphicon-indent-left:before{content:"\e057"}.glyphicon-indent-right:before{content:"\e058"}.glyphicon-facetime-video:before{content:"\e059"}.glyphicon-picture:before{content:"\e060"}.glyphicon-map-marker:before{content:"\e062"}.glyphicon-adjust:before{content:"\e063"}.glyphicon-tint:before{content:"\e064"}.glyphicon-edit:before{content:"\e065"}.glyphicon-share:before{content:"\e066"}.glyphicon-check:before{content:"\e067"}.glyphicon-move:before{content:"\e068"}.glyphicon-step-backward:before{content:"\e069"}.glyphicon-fast-backward:before{content:"\e070"}.glyphicon-backward:before{content:"\e071"}.glyphicon-play:before{content:"\e072"}.glyphicon-pause:before{content:"\e073"}.glyphicon-stop:before{content:"\e074"}.glyphicon-forward:before{content:"\e075"}.glyphicon-fast-forward:before{content:"\e076"}.glyphicon-step-forward:before{content:"\e077"}.glyphicon-eject:before{content:"\e078"}.glyphicon-chevron-left:before{content:"\e079"}.glyphicon-chevron-right:before{content:"\e080"}.glyphicon-plus-sign:before{content:"\e081"}.glyphicon-minus-sign:before{content:"\e082"}.glyphicon-remove-sign:before{content:"\e083"}.glyphicon-ok-sign:before{content:"\e084"}.glyphicon-question-sign:before{content:"\e085"}.glyphicon-info-sign:before{content:"\e086"}.glyphicon-screenshot:before{content:"\e087"}.glyphicon-remove-circle:before{content:"\e088"}.glyphicon-ok-circle:before{content:"\e089"}.glyphicon-ban-circle:before{content:"\e090"}.glyphicon-arrow-left:before{content:"\e091"}.glyphicon-arrow-right:before{content:"\e092"}.glyphicon-arrow-up:before{content:"\e093"}.glyphicon-arrow-down:before{content:"\e094"}.glyphicon-share-alt:before{content:"\e095"}.glyphicon-resize-full:before{content:"\e096"}.glyphicon-resize-small:before{content:"\e097"}.glyphicon-exclamation-sign:before{content:"\e101"}.glyphicon-gift:before{content:"\e102"}.glyphicon-leaf:before{content:"\e103"}.glyphicon-eye-open:before{content:"\e105"}.glyphicon-eye-close:before{content:"\e106"}.glyphicon-warning-sign:before{content:"\e107"}.glyphicon-plane:before{content:"\e108"}.glyphicon-random:before{content:"\e110"}.glyphicon-comment:before{content:"\e111"}.glyphicon-magnet:before{content:"\e112"}.glyphicon-chevron-up:before{content:"\e113"}.glyphicon-chevron-down:before{content:"\e114"}.glyphicon-retweet:before{content:"\e115"}.glyphicon-shopping-cart:before{content:"\e116"}.glyphicon-folder-close:before{content:"\e117"}.glyphicon-folder-open:before{content:"\e118"}.glyphicon-resize-vertical:before{content:"\e119"}.glyphicon-resize-horizontal:before{content:"\e120"}.glyphicon-hdd:before{content:"\e121"}.glyphicon-bullhorn:before{content:"\e122"}.glyphicon-certificate:before{content:"\e124"}.glyphicon-thumbs-up:before{content:"\e125"}.glyphicon-thumbs-down:before{content:"\e126"}.glyphicon-hand-right:before{content:"\e127"}.glyphicon-hand-left:before{content:"\e128"}.glyphicon-hand-up:before{content:"\e129"}.glyphicon-hand-down:before{content:"\e130"}.glyphicon-circle-arrow-right:before{content:"\e131"}.glyphicon-circle-arrow-left:before{content:"\e132"}.glyphicon-circle-arrow-up:before{content:"\e133"}.glyphicon-circle-arrow-down:before{content:"\e134"}.glyphicon-globe:before{content:"\e135"}.glyphicon-tasks:before{content:"\e137"}.glyphicon-filter:before{content:"\e138"}.glyphicon-fullscreen:before{content:"\e140"}.glyphicon-dashboard:before{content:"\e141"}.glyphicon-heart-empty:before{content:"\e143"}.glyphicon-link:before{content:"\e144"}.glyphicon-phone:before{content:"\e145"}.glyphicon-usd:before{content:"\e148"}.glyphicon-gbp:before{content:"\e149"}.glyphicon-sort:before{content:"\e150"}.glyphicon-sort-by-alphabet:before{content:"\e151"}.glyphicon-sort-by-alphabet-alt:before{content:"\e152"}.glyphicon-sort-by-order:before{content:"\e153"}.glyphicon-sort-by-order-alt:before{content:"\e154"}.glyphicon-sort-by-attributes:before{content:"\e155"}.glyphicon-sort-by-attributes-alt:before{content:"\e156"}.glyphicon-unchecked:before{content:"\e157"}.glyphicon-expand:before{content:"\e158"}.glyphicon-collapse-down:before{content:"\e159"}.glyphicon-collapse-up:before{content:"\e160"}.glyphicon-log-in:before{content:"\e161"}.glyphicon-flash:before{content:"\e162"}.glyphicon-log-out:before{content:"\e163"}.glyphicon-new-window:before{content:"\e164"}.glyphicon-record:before{content:"\e165"}.glyphicon-save:before{content:"\e166"}.glyphicon-open:before{content:"\e167"}.glyphicon-saved:before{content:"\e168"}.glyphicon-import:before{content:"\e169"}.glyphicon-export:before{content:"\e170"}.glyphicon-send:before{content:"\e171"}.glyphicon-floppy-disk:before{content:"\e172"}.glyphicon-floppy-saved:before{content:"\e173"}.glyphicon-floppy-remove:before{content:"\e174"}.glyphicon-floppy-save:before{content:"\e175"}.glyphicon-floppy-open:before{content:"\e176"}.glyphicon-credit-card:before{content:"\e177"}.glyphicon-transfer:before{content:"\e178"}.glyphicon-cutlery:before{content:"\e179"}.glyphicon-header:before{content:"\e180"}.glyphicon-compressed:before{content:"\e181"}.glyphicon-earphone:before{content:"\e182"}.glyphicon-phone-alt:before{content:"\e183"}.glyphicon-tower:before{content:"\e184"}.glyphicon-stats:before{content:"\e185"}.glyphicon-sd-video:before{content:"\e186"}.glyphicon-hd-video:before{content:"\e187"}.glyphicon-subtitles:before{content:"\e188"}.glyphicon-sound-stereo:before{content:"\e189"}.glyphicon-sound-dolby:before{content:"\e190"}.glyphicon-sound-5-1:before{content:"\e191"}.glyphicon-sound-6-1:before{content:"\e192"}.glyphicon-sound-7-1:before{content:"\e193"}.glyphicon-copyright-mark:before{content:"\e194"}.glyphicon-registration-mark:before{content:"\e195"}.glyphicon-cloud-download:before{content:"\e197"}.glyphicon-cloud-upload:before{content:"\e198"}.glyphicon-tree-conifer:before{content:"\e199"}.glyphicon-tree-deciduous:before{content:"\e200"}.glyphicon-briefcase:before{content:"\1f4bc"}.glyphicon-calendar:before{content:"\1f4c5"}.glyphicon-pushpin:before{content:"\1f4cc"}.glyphicon-paperclip:before{content:"\1f4ce"}.glyphicon-camera:before{content:"\1f4f7"}.glyphicon-lock:before{content:"\1f512"}.glyphicon-bell:before{content:"\1f514"}.glyphicon-bookmark:before{content:"\1f516"}.glyphicon-fire:before{content:"\1f525"}.glyphicon-wrench:before{content:"\1f527"}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px solid #000;border-right:4px solid transparent;border-bottom:0 dotted;border-left:4px solid transparent;content:""}.dropdown{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;font-size:14px;list-style:none;background-color:#fff;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.15);border-radius:4px;-webkit-box-shadow:0 6px 12px rgba(0,0,0,0.175);box-shadow:0 6px 12px rgba(0,0,0,0.175);background-clip:padding-box}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:normal;line-height:1.428571429;color:#333;white-space:nowrap}.dropdown-menu>li>a:hover,.dropdown-menu>li>a:focus{color:#fff;text-decoration:none;background-color:#428bca}.dropdown-menu>.active>a,.dropdown-menu>.active>a:hover,.dropdown-menu>.active>a:focus{color:#fff;text-decoration:none;background-color:#428bca;outline:0}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:hover,.dropdown-menu>.disabled>a:focus{color:#999}.dropdown-menu>.disabled>a:hover,.dropdown-menu>.disabled>a:focus{text-decoration:none;cursor:not-allowed;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-header{display:block;padding:3px 20px;font-size:12px;line-height:1.428571429;color:#999}.dropdown-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{border-top:0 dotted;border-bottom:4px solid #000;content:""}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:1px}@media(min-width:768px){.navbar-right .dropdown-menu{right:0;left:auto}}.btn-default .caret{border-top-color:#333}.btn-primary .caret,.btn-success .caret,.btn-warning .caret,.btn-danger .caret,.btn-info .caret{border-top-color:#fff}.dropup .btn-default .caret{border-bottom-color:#333}.dropup .btn-primary .caret,.dropup .btn-success .caret,.dropup .btn-warning .caret,.dropup .btn-danger .caret,.dropup .btn-info .caret{border-bottom-color:#fff}.btn-group,.btn-group-vertical{position:relative;display:inline-block;vertical-align:middle}.btn-group>.btn,.btn-group-vertical>.btn{position:relative;float:left}.btn-group>.btn:hover,.btn-group-vertical>.btn:hover,.btn-group>.btn:focus,.btn-group-vertical>.btn:focus,.btn-group>.btn:active,.btn-group-vertical>.btn:active,.btn-group>.btn.active,.btn-group-vertical>.btn.active{z-index:2}.btn-group>.btn:focus,.btn-group-vertical>.btn:focus{outline:0}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group{margin-left:-1px}.btn-toolbar:before,.btn-toolbar:after{display:table;content:" "}.btn-toolbar:after{clear:both}.btn-toolbar:before,.btn-toolbar:after{display:table;content:" "}.btn-toolbar:after{clear:both}.btn-toolbar .btn-group{float:left}.btn-toolbar>.btn+.btn,.btn-toolbar>.btn-group+.btn,.btn-toolbar>.btn+.btn-group,.btn-toolbar>.btn-group+.btn-group{margin-left:5px}.btn-group>.btn:not(:first-child):not(:last-child):not(.dropdown-toggle){border-radius:0}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:first-child:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:last-child:not(:first-child),.btn-group>.dropdown-toggle:not(:first-child){border-bottom-left-radius:0;border-top-left-radius:0}.btn-group>.btn-group{float:left}.btn-group>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group>.btn-group:first-child>.btn:last-child,.btn-group>.btn-group:first-child>.dropdown-toggle{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:last-child>.btn:first-child{border-bottom-left-radius:0;border-top-left-radius:0}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group-xs>.btn{padding:5px 10px;padding:1px 5px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-sm>.btn{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-lg>.btn{padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}.btn-group>.btn+.dropdown-toggle{padding-right:8px;padding-left:8px}.btn-group>.btn-lg+.dropdown-toggle{padding-right:12px;padding-left:12px}.btn-group.open .dropdown-toggle{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,0.125);box-shadow:inset 0 3px 5px rgba(0,0,0,0.125)}.btn .caret{margin-left:0}.btn-lg .caret{border-width:5px 5px 0;border-bottom-width:0}.dropup .btn-lg .caret{border-width:0 5px 5px}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group{display:block;float:none;width:100%;max-width:100%}.btn-group-vertical>.btn-group:before,.btn-group-vertical>.btn-group:after{display:table;content:" "}.btn-group-vertical>.btn-group:after{clear:both}.btn-group-vertical>.btn-group:before,.btn-group-vertical>.btn-group:after{display:table;content:" "}.btn-group-vertical>.btn-group:after{clear:both}.btn-group-vertical>.btn-group>.btn{float:none}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:first-child):not(:last-child){border-radius:0}.btn-group-vertical>.btn:first-child:not(:last-child){border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:last-child:not(:first-child){border-top-right-radius:0;border-bottom-left-radius:4px;border-top-left-radius:0}.btn-group-vertical>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group-vertical>.btn-group:first-child>.btn:last-child,.btn-group-vertical>.btn-group:first-child>.dropdown-toggle{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:last-child>.btn:first-child{border-top-right-radius:0;border-top-left-radius:0}.btn-group-justified{display:table;width:100%;border-collapse:separate;table-layout:fixed}.btn-group-justified .btn{display:table-cell;float:none;width:1%}[data-toggle="buttons"]>.btn>input[type="radio"],[data-toggle="buttons"]>.btn>input[type="checkbox"]{display:none}.input-group{position:relative;display:table;border-collapse:separate}.input-group.col{float:none;padding-right:0;padding-left:0}.input-group .form-control{width:100%;margin-bottom:0}.input-group-lg>.form-control,.input-group-lg>.input-group-addon,.input-group-lg>.input-group-btn>.btn{height:45px;padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}select.input-group-lg>.form-control,select.input-group-lg>.input-group-addon,select.input-group-lg>.input-group-btn>.btn{height:45px;line-height:45px}textarea.input-group-lg>.form-control,textarea.input-group-lg>.input-group-addon,textarea.input-group-lg>.input-group-btn>.btn{height:auto}.input-group-sm>.form-control,.input-group-sm>.input-group-addon,.input-group-sm>.input-group-btn>.btn{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-group-sm>.form-control,select.input-group-sm>.input-group-addon,select.input-group-sm>.input-group-btn>.btn{height:30px;line-height:30px}textarea.input-group-sm>.form-control,textarea.input-group-sm>.input-group-addon,textarea.input-group-sm>.input-group-btn>.btn{height:auto}.input-group-addon,.input-group-btn,.input-group .form-control{display:table-cell}.input-group-addon:not(:first-child):not(:last-child),.input-group-btn:not(:first-child):not(:last-child),.input-group .form-control:not(:first-child):not(:last-child){border-radius:0}.input-group-addon,.input-group-btn{width:1%;white-space:nowrap;vertical-align:middle}.input-group-addon{padding:6px 12px;font-size:14px;font-weight:normal;line-height:1;text-align:center;background-color:#eee;border:1px solid #ccc;border-radius:4px}.input-group-addon.input-sm{padding:5px 10px;font-size:12px;border-radius:3px}.input-group-addon.input-lg{padding:10px 16px;font-size:18px;border-radius:6px}.input-group-addon input[type="radio"],.input-group-addon input[type="checkbox"]{margin-top:0}.input-group .form-control:first-child,.input-group-addon:first-child,.input-group-btn:first-child>.btn,.input-group-btn:first-child>.dropdown-toggle,.input-group-btn:last-child>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.input-group-addon:first-child{border-right:0}.input-group .form-control:last-child,.input-group-addon:last-child,.input-group-btn:last-child>.btn,.input-group-btn:last-child>.dropdown-toggle,.input-group-btn:first-child>.btn:not(:first-child){border-bottom-left-radius:0;border-top-left-radius:0}.input-group-addon:last-child{border-left:0}.input-group-btn{position:relative;white-space:nowrap}.input-group-btn>.btn{position:relative}.input-group-btn>.btn+.btn{margin-left:-4px}.input-group-btn>.btn:hover,.input-group-btn>.btn:active{z-index:2}.nav{padding-left:0;margin-bottom:0;list-style:none}.nav:before,.nav:after{display:table;content:" "}.nav:after{clear:both}.nav:before,.nav:after{display:table;content:" "}.nav:after{clear:both}.nav>li{position:relative;display:block}.nav>li>a{position:relative;display:block;padding:10px 15px}.nav>li>a:hover,.nav>li>a:focus{text-decoration:none;background-color:#eee}.nav>li.disabled>a{color:#999}.nav>li.disabled>a:hover,.nav>li.disabled>a:focus{color:#999;text-decoration:none;cursor:not-allowed;background-color:transparent}.nav .open>a,.nav .open>a:hover,.nav .open>a:focus{background-color:#eee;border-color:#428bca}.nav .nav-divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.nav>li>a>img{max-width:none}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{float:left;margin-bottom:-1px}.nav-tabs>li>a{margin-right:2px;line-height:1.428571429;border:1px solid transparent;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover{border-color:#eee #eee #ddd}.nav-tabs>li.active>a,.nav-tabs>li.active>a:hover,.nav-tabs>li.active>a:focus{color:#555;cursor:default;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent}.nav-tabs.nav-justified{width:100%;border-bottom:0}.nav-tabs.nav-justified>li{float:none}.nav-tabs.nav-justified>li>a{text-align:center}@media(min-width:768px){.nav-tabs.nav-justified>li{display:table-cell;width:1%}}.nav-tabs.nav-justified>li>a{margin-right:0;border-bottom:1px solid #ddd}.nav-tabs.nav-justified>.active>a{border-bottom-color:#fff}.nav-pills>li{float:left}.nav-pills>li>a{border-radius:5px}.nav-pills>li+li{margin-left:2px}.nav-pills>li.active>a,.nav-pills>li.active>a:hover,.nav-pills>li.active>a:focus{color:#fff;background-color:#428bca}.nav-stacked>li{float:none}.nav-stacked>li+li{margin-top:2px;margin-left:0}.nav-justified{width:100%}.nav-justified>li{float:none}.nav-justified>li>a{text-align:center}@media(min-width:768px){.nav-justified>li{display:table-cell;width:1%}}.nav-tabs-justified{border-bottom:0}.nav-tabs-justified>li>a{margin-right:0;border-bottom:1px solid #ddd}.nav-tabs-justified>.active>a{border-bottom-color:#fff}.tabbable:before,.tabbable:after{display:table;content:" "}.tabbable:after{clear:both}.tabbable:before,.tabbable:after{display:table;content:" "}.tabbable:after{clear:both}.tab-content>.tab-pane,.pill-content>.pill-pane{display:none}.tab-content>.active,.pill-content>.active{display:block}.nav .caret{border-top-color:#428bca;border-bottom-color:#428bca}.nav a:hover .caret{border-top-color:#2a6496;border-bottom-color:#2a6496}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-right-radius:0;border-top-left-radius:0}.navbar{position:relative;z-index:1000;min-height:50px;margin-bottom:20px;border:1px solid transparent}.navbar:before,.navbar:after{display:table;content:" "}.navbar:after{clear:both}.navbar:before,.navbar:after{display:table;content:" "}.navbar:after{clear:both}@media(min-width:768px){.navbar{border-radius:4px}}.navbar-header:before,.navbar-header:after{display:table;content:" "}.navbar-header:after{clear:both}.navbar-header:before,.navbar-header:after{display:table;content:" "}.navbar-header:after{clear:both}@media(min-width:768px){.navbar-header{float:left}}.navbar-collapse{max-height:340px;padding-right:15px;padding-left:15px;overflow-x:visible;border-top:1px solid transparent;box-shadow:inset 0 1px 0 rgba(255,255,255,0.1);-webkit-overflow-scrolling:touch}.navbar-collapse:before,.navbar-collapse:after{display:table;content:" "}.navbar-collapse:after{clear:both}.navbar-collapse:before,.navbar-collapse:after{display:table;content:" "}.navbar-collapse:after{clear:both}.navbar-collapse.in{overflow-y:auto}@media(min-width:768px){.navbar-collapse{width:auto;border-top:0;box-shadow:none}.navbar-collapse.collapse{display:block!important;height:auto!important;padding-bottom:0;overflow:visible!important}.navbar-collapse.in{overflow-y:visible}.navbar-collapse .navbar-nav.navbar-left:first-child{margin-left:-15px}.navbar-collapse .navbar-nav.navbar-right:last-child{margin-right:-15px}.navbar-collapse .navbar-text:last-child{margin-right:0}}.container>.navbar-header,.container>.navbar-collapse{margin-right:-15px;margin-left:-15px}@media(min-width:768px){.container>.navbar-header,.container>.navbar-collapse{margin-right:0;margin-left:0}}.navbar-static-top{border-width:0 0 1px}@media(min-width:768px){.navbar-static-top{border-radius:0}}.navbar-fixed-top,.navbar-fixed-bottom{position:fixed;right:0;left:0;border-width:0 0 1px}@media(min-width:768px){.navbar-fixed-top,.navbar-fixed-bottom{border-radius:0}}.navbar-fixed-top{top:0;z-index:1030}.navbar-fixed-bottom{bottom:0;margin-bottom:0}.navbar-brand{float:left;padding:15px 15px;font-size:18px;line-height:20px}.navbar-brand:hover,.navbar-brand:focus{text-decoration:none}@media(min-width:768px){.navbar>.container .navbar-brand{margin-left:-15px}}.navbar-toggle{position:relative;float:right;padding:9px 10px;margin-top:8px;margin-right:15px;margin-bottom:8px;background-color:transparent;border:1px solid transparent;border-radius:4px}.navbar-toggle .icon-bar{display:block;width:22px;height:2px;border-radius:1px}.navbar-toggle .icon-bar+.icon-bar{margin-top:4px}@media(min-width:768px){.navbar-toggle{display:none}}.navbar-nav{margin:7.5px -15px}.navbar-nav>li>a{padding-top:10px;padding-bottom:10px;line-height:20px}@media(max-width:767px){.navbar-nav .open .dropdown-menu{position:static;float:none;width:auto;margin-top:0;background-color:transparent;border:0;box-shadow:none}.navbar-nav .open .dropdown-menu>li>a,.navbar-nav .open .dropdown-menu .dropdown-header{padding:5px 15px 5px 25px}.navbar-nav .open .dropdown-menu>li>a{line-height:20px}.navbar-nav .open .dropdown-menu>li>a:hover,.navbar-nav .open .dropdown-menu>li>a:focus{background-image:none}}@media(min-width:768px){.navbar-nav{float:left;margin:0}.navbar-nav>li{float:left}.navbar-nav>li>a{padding-top:15px;padding-bottom:15px}}@media(min-width:768px){.navbar-left{float:left!important}.navbar-right{float:right!important}}.navbar-form{padding:10px 15px;margin-top:8px;margin-right:-15px;margin-bottom:8px;margin-left:-15px;border-top:1px solid transparent;border-bottom:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.1);box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.1)}@media(min-width:768px){.navbar-form .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.navbar-form .form-control{display:inline-block}.navbar-form .radio,.navbar-form .checkbox{display:inline-block;padding-left:0;margin-top:0;margin-bottom:0}.navbar-form .radio input[type="radio"],.navbar-form .checkbox input[type="checkbox"]{float:none;margin-left:0}}@media(max-width:767px){.navbar-form .form-group{margin-bottom:5px}}@media(min-width:768px){.navbar-form{width:auto;padding-top:0;padding-bottom:0;margin-right:0;margin-left:0;border:0;-webkit-box-shadow:none;box-shadow:none}}.navbar-nav>li>.dropdown-menu{margin-top:0;border-top-right-radius:0;border-top-left-radius:0}.navbar-fixed-bottom .navbar-nav>li>.dropdown-menu{border-bottom-right-radius:0;border-bottom-left-radius:0}.navbar-nav.pull-right>li>.dropdown-menu,.navbar-nav>li>.dropdown-menu.pull-right{right:0;left:auto}.navbar-btn{margin-top:8px;margin-bottom:8px}.navbar-text{float:left;margin-top:15px;margin-bottom:15px}@media(min-width:768px){.navbar-text{margin-right:15px;margin-left:15px}}.navbar-default{background-color:#f8f8f8;border-color:#e7e7e7}.navbar-default .navbar-brand{color:#777}.navbar-default .navbar-brand:hover,.navbar-default .navbar-brand:focus{color:#5e5e5e;background-color:transparent}.navbar-default .navbar-text{color:#777}.navbar-default .navbar-nav>li>a{color:#777}.navbar-default .navbar-nav>li>a:hover,.navbar-default .navbar-nav>li>a:focus{color:#333;background-color:transparent}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.active>a:hover,.navbar-default .navbar-nav>.active>a:focus{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.disabled>a,.navbar-default .navbar-nav>.disabled>a:hover,.navbar-default .navbar-nav>.disabled>a:focus{color:#ccc;background-color:transparent}.navbar-default .navbar-toggle{border-color:#ddd}.navbar-default .navbar-toggle:hover,.navbar-default .navbar-toggle:focus{background-color:#ddd}.navbar-default .navbar-toggle .icon-bar{background-color:#ccc}.navbar-default .navbar-collapse,.navbar-default .navbar-form{border-color:#e6e6e6}.navbar-default .navbar-nav>.dropdown>a:hover .caret,.navbar-default .navbar-nav>.dropdown>a:focus .caret{border-top-color:#333;border-bottom-color:#333}.navbar-default .navbar-nav>.open>a,.navbar-default .navbar-nav>.open>a:hover,.navbar-default .navbar-nav>.open>a:focus{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.open>a .caret,.navbar-default .navbar-nav>.open>a:hover .caret,.navbar-default .navbar-nav>.open>a:focus .caret{border-top-color:#555;border-bottom-color:#555}.navbar-default .navbar-nav>.dropdown>a .caret{border-top-color:#777;border-bottom-color:#777}@media(max-width:767px){.navbar-default .navbar-nav .open .dropdown-menu>li>a{color:#777}.navbar-default .navbar-nav .open .dropdown-menu>li>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>li>a:focus{color:#333;background-color:transparent}.navbar-default .navbar-nav .open .dropdown-menu>.active>a,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:focus{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:focus{color:#ccc;background-color:transparent}}.navbar-default .navbar-link{color:#777}.navbar-default .navbar-link:hover{color:#333}.navbar-inverse{background-color:#222;border-color:#080808}.navbar-inverse .navbar-brand{color:#999}.navbar-inverse .navbar-brand:hover,.navbar-inverse .navbar-brand:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-text{color:#999}.navbar-inverse .navbar-nav>li>a{color:#999}.navbar-inverse .navbar-nav>li>a:hover,.navbar-inverse .navbar-nav>li>a:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.active>a:hover,.navbar-inverse .navbar-nav>.active>a:focus{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.disabled>a,.navbar-inverse .navbar-nav>.disabled>a:hover,.navbar-inverse .navbar-nav>.disabled>a:focus{color:#444;background-color:transparent}.navbar-inverse .navbar-toggle{border-color:#333}.navbar-inverse .navbar-toggle:hover,.navbar-inverse .navbar-toggle:focus{background-color:#333}.navbar-inverse .navbar-toggle .icon-bar{background-color:#fff}.navbar-inverse .navbar-collapse,.navbar-inverse .navbar-form{border-color:#101010}.navbar-inverse .navbar-nav>.open>a,.navbar-inverse .navbar-nav>.open>a:hover,.navbar-inverse .navbar-nav>.open>a:focus{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.dropdown>a:hover .caret{border-top-color:#fff;border-bottom-color:#fff}.navbar-inverse .navbar-nav>.dropdown>a .caret{border-top-color:#999;border-bottom-color:#999}.navbar-inverse .navbar-nav>.open>a .caret,.navbar-inverse .navbar-nav>.open>a:hover .caret,.navbar-inverse .navbar-nav>.open>a:focus .caret{border-top-color:#fff;border-bottom-color:#fff}@media(max-width:767px){.navbar-inverse .navbar-nav .open .dropdown-menu>.dropdown-header{border-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a{color:#999}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:focus{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:focus{color:#444;background-color:transparent}}.navbar-inverse .navbar-link{color:#999}.navbar-inverse .navbar-link:hover{color:#fff}.breadcrumb{padding:8px 15px;margin-bottom:20px;list-style:none;background-color:#f5f5f5;border-radius:4px}.breadcrumb>li{display:inline-block}.breadcrumb>li+li:before{padding:0 5px;color:#ccc;content:"/\00a0"}.breadcrumb>.active{color:#999}.pagination{display:inline-block;padding-left:0;margin:20px 0;border-radius:4px}.pagination>li{display:inline}.pagination>li>a,.pagination>li>span{position:relative;float:left;padding:6px 12px;margin-left:-1px;line-height:1.428571429;text-decoration:none;background-color:#fff;border:1px solid #ddd}.pagination>li:first-child>a,.pagination>li:first-child>span{margin-left:0;border-bottom-left-radius:4px;border-top-left-radius:4px}.pagination>li:last-child>a,.pagination>li:last-child>span{border-top-right-radius:4px;border-bottom-right-radius:4px}.pagination>li>a:hover,.pagination>li>span:hover,.pagination>li>a:focus,.pagination>li>span:focus{background-color:#eee}.pagination>.active>a,.pagination>.active>span,.pagination>.active>a:hover,.pagination>.active>span:hover,.pagination>.active>a:focus,.pagination>.active>span:focus{z-index:2;color:#fff;cursor:default;background-color:#428bca;border-color:#428bca}.pagination>.disabled>span,.pagination>.disabled>a,.pagination>.disabled>a:hover,.pagination>.disabled>a:focus{color:#999;cursor:not-allowed;background-color:#fff;border-color:#ddd}.pagination-lg>li>a,.pagination-lg>li>span{padding:10px 16px;font-size:18px}.pagination-lg>li:first-child>a,.pagination-lg>li:first-child>span{border-bottom-left-radius:6px;border-top-left-radius:6px}.pagination-lg>li:last-child>a,.pagination-lg>li:last-child>span{border-top-right-radius:6px;border-bottom-right-radius:6px}.pagination-sm>li>a,.pagination-sm>li>span{padding:5px 10px;font-size:12px}.pagination-sm>li:first-child>a,.pagination-sm>li:first-child>span{border-bottom-left-radius:3px;border-top-left-radius:3px}.pagination-sm>li:last-child>a,.pagination-sm>li:last-child>span{border-top-right-radius:3px;border-bottom-right-radius:3px}.pager{padding-left:0;margin:20px 0;text-align:center;list-style:none}.pager:before,.pager:after{display:table;content:" "}.pager:after{clear:both}.pager:before,.pager:after{display:table;content:" "}.pager:after{clear:both}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;border-radius:15px}.pager li>a:hover,.pager li>a:focus{text-decoration:none;background-color:#eee}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:hover,.pager .disabled>a:focus,.pager .disabled>span{color:#999;cursor:not-allowed;background-color:#fff}.label{display:inline;padding:.2em .6em .3em;font-size:75%;font-weight:bold;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25em}.label[href]:hover,.label[href]:focus{color:#fff;text-decoration:none;cursor:pointer}.label:empty{display:none}.label-default{background-color:#999}.label-default[href]:hover,.label-default[href]:focus{background-color:#808080}.label-primary{background-color:#428bca}.label-primary[href]:hover,.label-primary[href]:focus{background-color:#3071a9}.label-success{background-color:#5cb85c}.label-success[href]:hover,.label-success[href]:focus{background-color:#449d44}.label-info{background-color:#5bc0de}.label-info[href]:hover,.label-info[href]:focus{background-color:#31b0d5}.label-warning{background-color:#f0ad4e}.label-warning[href]:hover,.label-warning[href]:focus{background-color:#ec971f}.label-danger{background-color:#d9534f}.label-danger[href]:hover,.label-danger[href]:focus{background-color:#c9302c}.badge{display:inline-block;min-width:10px;padding:3px 7px;font-size:12px;font-weight:bold;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;background-color:#999;border-radius:10px}.badge:empty{display:none}a.badge:hover,a.badge:focus{color:#fff;text-decoration:none;cursor:pointer}.btn .badge{position:relative;top:-1px}a.list-group-item.active>.badge,.nav-pills>.active>a>.badge{color:#428bca;background-color:#fff}.nav-pills>li>a>.badge{margin-left:3px}.jumbotron{padding:30px;margin-bottom:30px;font-size:21px;font-weight:200;line-height:2.1428571435;color:inherit;background-color:#eee}.jumbotron h1{line-height:1;color:inherit}.jumbotron p{line-height:1.4}.container .jumbotron{border-radius:6px}@media screen and (min-width:768px){.jumbotron{padding-top:48px;padding-bottom:48px}.container .jumbotron{padding-right:60px;padding-left:60px}.jumbotron h1{font-size:63px}}.thumbnail{display:inline-block;display:block;height:auto;max-width:100%;padding:4px;line-height:1.428571429;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.thumbnail>img{display:block;height:auto;max-width:100%}a.thumbnail:hover,a.thumbnail:focus{border-color:#428bca}.thumbnail>img{margin-right:auto;margin-left:auto}.thumbnail .caption{padding:9px;color:#333}.alert{padding:15px;margin-bottom:20px;border:1px solid transparent;border-radius:4px}.alert h4{margin-top:0;color:inherit}.alert .alert-link{font-weight:bold}.alert>p,.alert>ul{margin-bottom:0}.alert>p+p{margin-top:5px}.alert-dismissable{padding-right:35px}.alert-dismissable .close{position:relative;top:-2px;right:-21px;color:inherit}.alert-success{color:#468847;background-color:#dff0d8;border-color:#d6e9c6}.alert-success hr{border-top-color:#c9e2b3}.alert-success .alert-link{color:#356635}.alert-info{color:#3a87ad;background-color:#d9edf7;border-color:#bce8f1}.alert-info hr{border-top-color:#a6e1ec}.alert-info .alert-link{color:#2d6987}.alert-warning{color:#c09853;background-color:#fcf8e3;border-color:#fbeed5}.alert-warning hr{border-top-color:#f8e5be}.alert-warning .alert-link{color:#a47e3c}.alert-danger{color:#b94a48;background-color:#f2dede;border-color:#eed3d7}.alert-danger hr{border-top-color:#e6c1c7}.alert-danger .alert-link{color:#953b39}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-moz-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:0 0}to{background-position:40px 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{height:20px;margin-bottom:20px;overflow:hidden;background-color:#f5f5f5;border-radius:4px;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1);box-shadow:inset 0 1px 2px rgba(0,0,0,0.1)}.progress-bar{float:left;width:0;height:100%;font-size:12px;color:#fff;text-align:center;background-color:#428bca;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);-webkit-transition:width .6s ease;transition:width .6s ease}.progress-striped .progress-bar{background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-size:40px 40px}.progress.active .progress-bar{-webkit-animation:progress-bar-stripes 2s linear infinite;-moz-animation:progress-bar-stripes 2s linear infinite;-ms-animation:progress-bar-stripes 2s linear infinite;-o-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-bar-success{background-color:#5cb85c}.progress-striped .progress-bar-success{background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-bar-info{background-color:#5bc0de}.progress-striped .progress-bar-info{background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-bar-warning{background-color:#f0ad4e}.progress-striped .progress-bar-warning{background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-bar-danger{background-color:#d9534f}.progress-striped .progress-bar-danger{background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.media,.media-body{overflow:hidden;zoom:1}.media,.media .media{margin-top:15px}.media:first-child{margin-top:0}.media-object{display:block}.media-heading{margin:0 0 5px}.media>.pull-left{margin-right:10px}.media>.pull-right{margin-left:10px}.media-list{padding-left:0;list-style:none}.list-group{padding-left:0;margin-bottom:20px}.list-group-item{position:relative;display:block;padding:10px 15px;margin-bottom:-1px;background-color:#fff;border:1px solid #ddd}.list-group-item:first-child{border-top-right-radius:4px;border-top-left-radius:4px}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}.list-group-item>.badge{float:right}.list-group-item>.badge+.badge{margin-right:5px}a.list-group-item{color:#555}a.list-group-item .list-group-item-heading{color:#333}a.list-group-item:hover,a.list-group-item:focus{text-decoration:none;background-color:#f5f5f5}.list-group-item.active,.list-group-item.active:hover,.list-group-item.active:focus{z-index:2;color:#fff;background-color:#428bca;border-color:#428bca}.list-group-item.active .list-group-item-heading,.list-group-item.active:hover .list-group-item-heading,.list-group-item.active:focus .list-group-item-heading{color:inherit}.list-group-item.active .list-group-item-text,.list-group-item.active:hover .list-group-item-text,.list-group-item.active:focus .list-group-item-text{color:#e1edf7}.list-group-item-heading{margin-top:0;margin-bottom:5px}.list-group-item-text{margin-bottom:0;line-height:1.3}.panel{margin-bottom:20px;background-color:#fff;border:1px solid transparent;border-radius:4px;-webkit-box-shadow:0 1px 1px rgba(0,0,0,0.05);box-shadow:0 1px 1px rgba(0,0,0,0.05)}.panel-body{padding:15px}.panel-body:before,.panel-body:after{display:table;content:" "}.panel-body:after{clear:both}.panel-body:before,.panel-body:after{display:table;content:" "}.panel-body:after{clear:both}.panel>.list-group{margin-bottom:0}.panel>.list-group .list-group-item{border-width:1px 0}.panel>.list-group .list-group-item:first-child{border-top-right-radius:0;border-top-left-radius:0}.panel>.list-group .list-group-item:last-child{border-bottom:0}.panel-heading+.list-group .list-group-item:first-child{border-top-width:0}.panel>.table{margin-bottom:0}.panel>.panel-body+.table{border-top:1px solid #ddd}.panel-heading{padding:10px 15px;border-bottom:1px solid transparent;border-top-right-radius:3px;border-top-left-radius:3px}.panel-title{margin-top:0;margin-bottom:0;font-size:16px}.panel-title>a{color:inherit}.panel-footer{padding:10px 15px;background-color:#f5f5f5;border-top:1px solid #ddd;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel-group .panel{margin-bottom:0;overflow:hidden;border-radius:4px}.panel-group .panel+.panel{margin-top:5px}.panel-group .panel-heading{border-bottom:0}.panel-group .panel-heading+.panel-collapse .panel-body{border-top:1px solid #ddd}.panel-group .panel-footer{border-top:0}.panel-group .panel-footer+.panel-collapse .panel-body{border-bottom:1px solid #ddd}.panel-default{border-color:#ddd}.panel-default>.panel-heading{color:#333;background-color:#f5f5f5;border-color:#ddd}.panel-default>.panel-heading+.panel-collapse .panel-body{border-top-color:#ddd}.panel-default>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#ddd}.panel-primary{border-color:#428bca}.panel-primary>.panel-heading{color:#fff;background-color:#428bca;border-color:#428bca}.panel-primary>.panel-heading+.panel-collapse .panel-body{border-top-color:#428bca}.panel-primary>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#428bca}.panel-success{border-color:#d6e9c6}.panel-success>.panel-heading{color:#468847;background-color:#dff0d8;border-color:#d6e9c6}.panel-success>.panel-heading+.panel-collapse .panel-body{border-top-color:#d6e9c6}.panel-success>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#d6e9c6}.panel-warning{border-color:#fbeed5}.panel-warning>.panel-heading{color:#c09853;background-color:#fcf8e3;border-color:#fbeed5}.panel-warning>.panel-heading+.panel-collapse .panel-body{border-top-color:#fbeed5}.panel-warning>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#fbeed5}.panel-danger{border-color:#eed3d7}.panel-danger>.panel-heading{color:#b94a48;background-color:#f2dede;border-color:#eed3d7}.panel-danger>.panel-heading+.panel-collapse .panel-body{border-top-color:#eed3d7}.panel-danger>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#eed3d7}.panel-info{border-color:#bce8f1}.panel-info>.panel-heading{color:#3a87ad;background-color:#d9edf7;border-color:#bce8f1}.panel-info>.panel-heading+.panel-collapse .panel-body{border-top-color:#bce8f1}.panel-info>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#bce8f1}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.05);box-shadow:inset 0 1px 1px rgba(0,0,0,0.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,0.15)}.well-lg{padding:24px;border-radius:6px}.well-sm{padding:9px;border-radius:3px}.close{float:right;font-size:21px;font-weight:bold;line-height:1;color:#000;text-shadow:0 1px 0 #fff;opacity:.2;filter:alpha(opacity=20)}.close:hover,.close:focus{color:#000;text-decoration:none;cursor:pointer;opacity:.5;filter:alpha(opacity=50)}button.close{padding:0;cursor:pointer;background:transparent;border:0;-webkit-appearance:none}.modal-open{overflow:hidden}body.modal-open,.modal-open .navbar-fixed-top,.modal-open .navbar-fixed-bottom{margin-right:15px}.modal{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;display:none;overflow:auto;overflow-y:scroll}.modal.fade .modal-dialog{-webkit-transform:translate(0,-25%);-ms-transform:translate(0,-25%);transform:translate(0,-25%);-webkit-transition:-webkit-transform .3s ease-out;-moz-transition:-moz-transform .3s ease-out;-o-transition:-o-transform .3s ease-out;transition:transform .3s ease-out}.modal.in .modal-dialog{-webkit-transform:translate(0,0);-ms-transform:translate(0,0);transform:translate(0,0)}.modal-dialog{z-index:1050;width:auto;padding:10px;margin-right:auto;margin-left:auto}.modal-content{position:relative;background-color:#fff;border:1px solid #999;border:1px solid rgba(0,0,0,0.2);border-radius:6px;outline:0;-webkit-box-shadow:0 3px 9px rgba(0,0,0,0.5);box-shadow:0 3px 9px rgba(0,0,0,0.5);background-clip:padding-box}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1030;background-color:#000}.modal-backdrop.fade{opacity:0;filter:alpha(opacity=0)}.modal-backdrop.in{opacity:.5;filter:alpha(opacity=50)}.modal-header{min-height:16.428571429px;padding:15px;border-bottom:1px solid #e5e5e5}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:1.428571429}.modal-body{position:relative;padding:20px}.modal-footer{padding:19px 20px 20px;margin-top:15px;text-align:right;border-top:1px solid #e5e5e5}.modal-footer:before,.modal-footer:after{display:table;content:" "}.modal-footer:after{clear:both}.modal-footer:before,.modal-footer:after{display:table;content:" "}.modal-footer:after{clear:both}.modal-footer .btn+.btn{margin-bottom:0;margin-left:5px}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}@media screen and (min-width:768px){.modal-dialog{right:auto;left:50%;width:600px;padding-top:30px;padding-bottom:30px}.modal-content{-webkit-box-shadow:0 5px 15px rgba(0,0,0,0.5);box-shadow:0 5px 15px rgba(0,0,0,0.5)}}.tooltip{position:absolute;z-index:1030;display:block;font-size:12px;line-height:1.4;opacity:0;filter:alpha(opacity=0);visibility:visible}.tooltip.in{opacity:.9;filter:alpha(opacity=90)}.tooltip.top{padding:5px 0;margin-top:-3px}.tooltip.right{padding:0 5px;margin-left:3px}.tooltip.bottom{padding:5px 0;margin-top:3px}.tooltip.left{padding:0 5px;margin-left:-3px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;text-decoration:none;background-color:#000;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-top-color:#000;border-width:5px 5px 0}.tooltip.top-left .tooltip-arrow{bottom:0;left:5px;border-top-color:#000;border-width:5px 5px 0}.tooltip.top-right .tooltip-arrow{right:5px;bottom:0;border-top-color:#000;border-width:5px 5px 0}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-right-color:#000;border-width:5px 5px 5px 0}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-left-color:#000;border-width:5px 0 5px 5px}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-bottom-color:#000;border-width:0 5px 5px}.tooltip.bottom-left .tooltip-arrow{top:0;left:5px;border-bottom-color:#000;border-width:0 5px 5px}.tooltip.bottom-right .tooltip-arrow{top:0;right:5px;border-bottom-color:#000;border-width:0 5px 5px}.popover{position:absolute;top:0;left:0;z-index:1010;display:none;max-width:276px;padding:1px;text-align:left;white-space:normal;background-color:#fff;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.2);border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,0.2);box-shadow:0 5px 10px rgba(0,0,0,0.2);background-clip:padding-box}.popover.top{margin-top:-10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-left:-10px}.popover-title{padding:8px 14px;margin:0;font-size:14px;font-weight:normal;line-height:18px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-radius:5px 5px 0 0}.popover-content{padding:9px 14px}.popover .arrow,.popover .arrow:after{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover .arrow{border-width:11px}.popover .arrow:after{border-width:10px;content:""}.popover.top .arrow{bottom:-11px;left:50%;margin-left:-11px;border-top-color:#999;border-top-color:rgba(0,0,0,0.25);border-bottom-width:0}.popover.top .arrow:after{bottom:1px;margin-left:-10px;border-top-color:#fff;border-bottom-width:0;content:" "}.popover.right .arrow{top:50%;left:-11px;margin-top:-11px;border-right-color:#999;border-right-color:rgba(0,0,0,0.25);border-left-width:0}.popover.right .arrow:after{bottom:-10px;left:1px;border-right-color:#fff;border-left-width:0;content:" "}.popover.bottom .arrow{top:-11px;left:50%;margin-left:-11px;border-bottom-color:#999;border-bottom-color:rgba(0,0,0,0.25);border-top-width:0}.popover.bottom .arrow:after{top:1px;margin-left:-10px;border-bottom-color:#fff;border-top-width:0;content:" "}.popover.left .arrow{top:50%;right:-11px;margin-top:-11px;border-left-color:#999;border-left-color:rgba(0,0,0,0.25);border-right-width:0}.popover.left .arrow:after{right:1px;bottom:-10px;border-left-color:#fff;border-right-width:0;content:" "}.carousel{position:relative}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner>.item{position:relative;display:none;-webkit-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel-inner>.item>img,.carousel-inner>.item>a>img{display:block;height:auto;max-width:100%;line-height:1}.carousel-inner>.active,.carousel-inner>.next,.carousel-inner>.prev{display:block}.carousel-inner>.active{left:0}.carousel-inner>.next,.carousel-inner>.prev{position:absolute;top:0;width:100%}.carousel-inner>.next{left:100%}.carousel-inner>.prev{left:-100%}.carousel-inner>.next.left,.carousel-inner>.prev.right{left:0}.carousel-inner>.active.left{left:-100%}.carousel-inner>.active.right{left:100%}.carousel-control{position:absolute;top:0;bottom:0;left:0;width:15%;font-size:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,0.6);opacity:.5;filter:alpha(opacity=50)}.carousel-control.left{background-image:-webkit-gradient(linear,0 top,100% top,from(rgba(0,0,0,0.5)),to(rgba(0,0,0,0.0001)));background-image:-webkit-linear-gradient(left,color-stop(rgba(0,0,0,0.5) 0),color-stop(rgba(0,0,0,0.0001) 100%));background-image:-moz-linear-gradient(left,rgba(0,0,0,0.5) 0,rgba(0,0,0,0.0001) 100%);background-image:linear-gradient(to right,rgba(0,0,0,0.5) 0,rgba(0,0,0,0.0001) 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000',endColorstr='#00000000',GradientType=1)}.carousel-control.right{right:0;left:auto;background-image:-webkit-gradient(linear,0 top,100% top,from(rgba(0,0,0,0.0001)),to(rgba(0,0,0,0.5)));background-image:-webkit-linear-gradient(left,color-stop(rgba(0,0,0,0.0001) 0),color-stop(rgba(0,0,0,0.5) 100%));background-image:-moz-linear-gradient(left,rgba(0,0,0,0.0001) 0,rgba(0,0,0,0.5) 100%);background-image:linear-gradient(to right,rgba(0,0,0,0.0001) 0,rgba(0,0,0,0.5) 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000',endColorstr='#80000000',GradientType=1)}.carousel-control:hover,.carousel-control:focus{color:#fff;text-decoration:none;opacity:.9;filter:alpha(opacity=90)}.carousel-control .icon-prev,.carousel-control .icon-next,.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right{position:absolute;top:50%;left:50%;z-index:5;display:inline-block}.carousel-control .icon-prev,.carousel-control .icon-next{width:20px;height:20px;margin-top:-10px;margin-left:-10px;font-family:serif}.carousel-control .icon-prev:before{content:'\2039'}.carousel-control .icon-next:before{content:'\203a'}.carousel-indicators{position:absolute;bottom:10px;left:50%;z-index:15;width:60%;padding-left:0;margin-left:-30%;text-align:center;list-style:none}.carousel-indicators li{display:inline-block;width:10px;height:10px;margin:1px;text-indent:-999px;cursor:pointer;border:1px solid #fff;border-radius:10px}.carousel-indicators .active{width:12px;height:12px;margin:0;background-color:#fff}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,0.6)}.carousel-caption .btn{text-shadow:none}@media screen and (min-width:768px){.carousel-control .icon-prev,.carousel-control .icon-next{width:30px;height:30px;margin-top:-15px;margin-left:-15px;font-size:30px}.carousel-caption{right:20%;left:20%;padding-bottom:30px}.carousel-indicators{bottom:20px}}.clearfix:before,.clearfix:after{display:table;content:" "}.clearfix:after{clear:both}.pull-right{float:right!important}.pull-left{float:left!important}.hide{display:none!important}.show{display:block!important}.invisible{visibility:hidden}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.affix{position:fixed}@-ms-viewport{width:device-width}@media screen and (max-width:400px){@-ms-viewport{width:320px}}.hidden{display:none!important;visibility:hidden!important}.visible-xs{display:none!important}tr.visible-xs{display:none!important}th.visible-xs,td.visible-xs{display:none!important}@media(max-width:767px){.visible-xs{display:block!important}tr.visible-xs{display:table-row!important}th.visible-xs,td.visible-xs{display:table-cell!important}}@media(min-width:768px) and (max-width:991px){.visible-xs.visible-sm{display:block!important}tr.visible-xs.visible-sm{display:table-row!important}th.visible-xs.visible-sm,td.visible-xs.visible-sm{display:table-cell!important}}@media(min-width:992px) and (max-width:1199px){.visible-xs.visible-md{display:block!important}tr.visible-xs.visible-md{display:table-row!important}th.visible-xs.visible-md,td.visible-xs.visible-md{display:table-cell!important}}@media(min-width:1200px){.visible-xs.visible-lg{display:block!important}tr.visible-xs.visible-lg{display:table-row!important}th.visible-xs.visible-lg,td.visible-xs.visible-lg{display:table-cell!important}}.visible-sm{display:none!important}tr.visible-sm{display:none!important}th.visible-sm,td.visible-sm{display:none!important}@media(max-width:767px){.visible-sm.visible-xs{display:block!important}tr.visible-sm.visible-xs{display:table-row!important}th.visible-sm.visible-xs,td.visible-sm.visible-xs{display:table-cell!important}}@media(min-width:768px) and (max-width:991px){.visible-sm{display:block!important}tr.visible-sm{display:table-row!important}th.visible-sm,td.visible-sm{display:table-cell!important}}@media(min-width:992px) and (max-width:1199px){.visible-sm.visible-md{display:block!important}tr.visible-sm.visible-md{display:table-row!important}th.visible-sm.visible-md,td.visible-sm.visible-md{display:table-cell!important}}@media(min-width:1200px){.visible-sm.visible-lg{display:block!important}tr.visible-sm.visible-lg{display:table-row!important}th.visible-sm.visible-lg,td.visible-sm.visible-lg{display:table-cell!important}}.visible-md{display:none!important}tr.visible-md{display:none!important}th.visible-md,td.visible-md{display:none!important}@media(max-width:767px){.visible-md.visible-xs{display:block!important}tr.visible-md.visible-xs{display:table-row!important}th.visible-md.visible-xs,td.visible-md.visible-xs{display:table-cell!important}}@media(min-width:768px) and (max-width:991px){.visible-md.visible-sm{display:block!important}tr.visible-md.visible-sm{display:table-row!important}th.visible-md.visible-sm,td.visible-md.visible-sm{display:table-cell!important}}@media(min-width:992px) and (max-width:1199px){.visible-md{display:block!important}tr.visible-md{display:table-row!important}th.visible-md,td.visible-md{display:table-cell!important}}@media(min-width:1200px){.visible-md.visible-lg{display:block!important}tr.visible-md.visible-lg{display:table-row!important}th.visible-md.visible-lg,td.visible-md.visible-lg{display:table-cell!important}}.visible-lg{display:none!important}tr.visible-lg{display:none!important}th.visible-lg,td.visible-lg{display:none!important}@media(max-width:767px){.visible-lg.visible-xs{display:block!important}tr.visible-lg.visible-xs{display:table-row!important}th.visible-lg.visible-xs,td.visible-lg.visible-xs{display:table-cell!important}}@media(min-width:768px) and (max-width:991px){.visible-lg.visible-sm{display:block!important}tr.visible-lg.visible-sm{display:table-row!important}th.visible-lg.visible-sm,td.visible-lg.visible-sm{display:table-cell!important}}@media(min-width:992px) and (max-width:1199px){.visible-lg.visible-md{display:block!important}tr.visible-lg.visible-md{display:table-row!important}th.visible-lg.visible-md,td.visible-lg.visible-md{display:table-cell!important}}@media(min-width:1200px){.visible-lg{display:block!important}tr.visible-lg{display:table-row!important}th.visible-lg,td.visible-lg{display:table-cell!important}}.hidden-xs{display:block!important}tr.hidden-xs{display:table-row!important}th.hidden-xs,td.hidden-xs{display:table-cell!important}@media(max-width:767px){.hidden-xs{display:none!important}tr.hidden-xs{display:none!important}th.hidden-xs,td.hidden-xs{display:none!important}}@media(min-width:768px) and (max-width:991px){.hidden-xs.hidden-sm{display:none!important}tr.hidden-xs.hidden-sm{display:none!important}th.hidden-xs.hidden-sm,td.hidden-xs.hidden-sm{display:none!important}}@media(min-width:992px) and (max-width:1199px){.hidden-xs.hidden-md{display:none!important}tr.hidden-xs.hidden-md{display:none!important}th.hidden-xs.hidden-md,td.hidden-xs.hidden-md{display:none!important}}@media(min-width:1200px){.hidden-xs.hidden-lg{display:none!important}tr.hidden-xs.hidden-lg{display:none!important}th.hidden-xs.hidden-lg,td.hidden-xs.hidden-lg{display:none!important}}.hidden-sm{display:block!important}tr.hidden-sm{display:table-row!important}th.hidden-sm,td.hidden-sm{display:table-cell!important}@media(max-width:767px){.hidden-sm.hidden-xs{display:none!important}tr.hidden-sm.hidden-xs{display:none!important}th.hidden-sm.hidden-xs,td.hidden-sm.hidden-xs{display:none!important}}@media(min-width:768px) and (max-width:991px){.hidden-sm{display:none!important}tr.hidden-sm{display:none!important}th.hidden-sm,td.hidden-sm{display:none!important}}@media(min-width:992px) and (max-width:1199px){.hidden-sm.hidden-md{display:none!important}tr.hidden-sm.hidden-md{display:none!important}th.hidden-sm.hidden-md,td.hidden-sm.hidden-md{display:none!important}}@media(min-width:1200px){.hidden-sm.hidden-lg{display:none!important}tr.hidden-sm.hidden-lg{display:none!important}th.hidden-sm.hidden-lg,td.hidden-sm.hidden-lg{display:none!important}}.hidden-md{display:block!important}tr.hidden-md{display:table-row!important}th.hidden-md,td.hidden-md{display:table-cell!important}@media(max-width:767px){.hidden-md.hidden-xs{display:none!important}tr.hidden-md.hidden-xs{display:none!important}th.hidden-md.hidden-xs,td.hidden-md.hidden-xs{display:none!important}}@media(min-width:768px) and (max-width:991px){.hidden-md.hidden-sm{display:none!important}tr.hidden-md.hidden-sm{display:none!important}th.hidden-md.hidden-sm,td.hidden-md.hidden-sm{display:none!important}}@media(min-width:992px) and (max-width:1199px){.hidden-md{display:none!important}tr.hidden-md{display:none!important}th.hidden-md,td.hidden-md{display:none!important}}@media(min-width:1200px){.hidden-md.hidden-lg{display:none!important}tr.hidden-md.hidden-lg{display:none!important}th.hidden-md.hidden-lg,td.hidden-md.hidden-lg{display:none!important}}.hidden-lg{display:block!important}tr.hidden-lg{display:table-row!important}th.hidden-lg,td.hidden-lg{display:table-cell!important}@media(max-width:767px){.hidden-lg.hidden-xs{display:none!important}tr.hidden-lg.hidden-xs{display:none!important}th.hidden-lg.hidden-xs,td.hidden-lg.hidden-xs{display:none!important}}@media(min-width:768px) and (max-width:991px){.hidden-lg.hidden-sm{display:none!important}tr.hidden-lg.hidden-sm{display:none!important}th.hidden-lg.hidden-sm,td.hidden-lg.hidden-sm{display:none!important}}@media(min-width:992px) and (max-width:1199px){.hidden-lg.hidden-md{display:none!important}tr.hidden-lg.hidden-md{display:none!important}th.hidden-lg.hidden-md,td.hidden-lg.hidden-md{display:none!important}}@media(min-width:1200px){.hidden-lg{display:none!important}tr.hidden-lg{display:none!important}th.hidden-lg,td.hidden-lg{display:none!important}}.visible-print{display:none!important}tr.visible-print{display:none!important}th.visible-print,td.visible-print{display:none!important}@media print{.visible-print{display:block!important}tr.visible-print{display:table-row!important}th.visible-print,td.visible-print{display:table-cell!important}.hidden-print{display:none!important}tr.hidden-print{display:none!important}th.hidden-print,td.hidden-print{display:none!important}} \ No newline at end of file diff --git a/sentry-provider/sentry-provider-db/src/main/webapp/css/sentry.css b/sentry-provider/sentry-provider-db/src/main/webapp/css/sentry.css new file mode 100644 index 000000000..e5b3d4374 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/main/webapp/css/sentry.css @@ -0,0 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* General styling */ +body { padding-top: 80px; } +.navbar-collapse {margin-top:10px} diff --git a/sentry-provider/sentry-provider-db/src/main/webapp/sentry.png b/sentry-provider/sentry-provider-db/src/main/webapp/sentry.png new file mode 100644 index 000000000..67edd9074 Binary files /dev/null and b/sentry-provider/sentry-provider-db/src/main/webapp/sentry.png differ diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/persistent/SentryStoreIntegrationBase.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/persistent/SentryStoreIntegrationBase.java index 79510228e..f14b58667 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/persistent/SentryStoreIntegrationBase.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/persistent/SentryStoreIntegrationBase.java @@ -33,7 +33,7 @@ public abstract class SentryStoreIntegrationBase { protected final static String[] adminGroups = { "adminGroup" }; private static File dataDir; private static File policyFilePath; - private static Configuration conf; + protected static Configuration conf; protected static DelegateSentryStore sentryStore; protected static PolicyFile policyFile; @@ -49,6 +49,7 @@ private static void setup(Configuration conf) throws Exception { conf.set(ServerConfig.SENTRY_VERIFY_SCHEM_VERSION, "false"); conf.set(ServerConfig.SENTRY_STORE_JDBC_URL, "jdbc:derby:;databaseName=" + dataDir.getPath() + ";create=true"); + conf.set(ServerConfig.SENTRY_STORE_JDBC_PASS, "dummy"); conf.setStrings(ServerConfig.ADMIN_GROUPS, adminGroups); conf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING, ServerConfig.SENTRY_STORE_LOCAL_GROUP_MAPPING); diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/persistent/TestDelegateSentryStore.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/persistent/TestDelegateSentryStore.java index 751bc3ff9..cfcbbd13f 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/persistent/TestDelegateSentryStore.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/persistent/TestDelegateSentryStore.java @@ -17,8 +17,8 @@ */ package org.apache.sentry.provider.db.generic.service.persistent; -import static junit.framework.Assert.assertEquals; -import static junit.framework.Assert.fail; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; import java.util.Set; @@ -59,7 +59,7 @@ public void testCaseInsensitiveCreateDropRole() throws Exception { sentryStore.createRole(SEARCH, roleName1, grantor); try { sentryStore.createRole(SEARCH, roleName2, grantor); - fail("SentryAlreadyExistsException should have been thrown"); + fail("Fail to throw SentryAlreadyExistsException"); } catch (SentryAlreadyExistsException e) { //ignore the exception } @@ -67,7 +67,7 @@ public void testCaseInsensitiveCreateDropRole() throws Exception { try { sentryStore.dropRole(SEARCH, roleName2, grantor); } catch (SentryNoSuchObjectException e) { - fail("SentryNoSuchObjectException shouldn't have been thrown"); + fail("Shouldn't throw SentryNoSuchObjectException"); } } diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/persistent/TestPrivilegeOperatePersistence.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/persistent/TestPrivilegeOperatePersistence.java index 88933911a..9cbd1bd98 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/persistent/TestPrivilegeOperatePersistence.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/persistent/TestPrivilegeOperatePersistence.java @@ -17,19 +17,27 @@ */ package org.apache.sentry.provider.db.generic.service.persistent; -import static junit.framework.Assert.assertEquals; -import static junit.framework.Assert.fail; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; import java.util.Arrays; +import java.util.Collections; import java.util.List; +import com.google.common.collect.Lists; +import org.apache.hadoop.conf.Configuration; +import org.apache.sentry.SentryUserException; import org.apache.sentry.core.common.Authorizable; +import org.apache.sentry.core.common.BitFieldAction; +import org.apache.sentry.core.common.BitFieldActionFactory; import org.apache.sentry.core.model.search.Collection; import org.apache.sentry.core.model.search.Field; import org.apache.sentry.core.model.search.SearchConstants; +import org.apache.sentry.core.model.sqoop.SqoopActionConstant; import org.apache.sentry.provider.db.SentryGrantDeniedException; import org.apache.sentry.provider.db.generic.service.persistent.PrivilegeObject.Builder; import org.apache.sentry.provider.file.PolicyFile; +import org.apache.sentry.service.thrift.ServiceConstants; import org.junit.Before; import org.junit.Test; @@ -68,50 +76,7 @@ public void configure() throws Exception { */ @Test public void testGrantPrivilege() throws Exception { - String roleName = "r1"; - /** - * grantor is admin, there is no need to check grant option - */ - String grantor = ADMIN_USER; - PrivilegeObject queryPrivilege = new Builder() - .setComponent(SEARCH) - .setAction(SearchConstants.QUERY) - .setService(SERVICE) - .setAuthorizables(Arrays.asList(new Collection(COLLECTION_NAME))) - .withGrantOption(null) - .build(); - - sentryStore.createRole(SEARCH, roleName, grantor); - sentryStore.alterRoleGrantPrivilege(SEARCH, roleName, queryPrivilege, grantor); - - assertEquals(Sets.newHashSet(queryPrivilege), - sentryStore.getPrivilegesByRole(SEARCH, Sets.newHashSet(roleName))); - - PrivilegeObject queryPrivilegeWithOption = new Builder() - .setComponent(SEARCH) - .setAction(SearchConstants.QUERY) - .setService(SERVICE) - .setAuthorizables(Arrays.asList(new Collection(COLLECTION_NAME))) - .withGrantOption(true) - .build(); - - sentryStore.alterRoleGrantPrivilege(SEARCH, roleName, queryPrivilegeWithOption, grantor); - - assertEquals(Sets.newHashSet(queryPrivilege, queryPrivilegeWithOption), - sentryStore.getPrivilegesByRole(SEARCH, Sets.newHashSet(roleName))); - - PrivilegeObject queryPrivilegeWithNoOption = new Builder() - .setComponent(SEARCH) - .setAction(SearchConstants.QUERY) - .setService(SERVICE) - .setAuthorizables(Arrays.asList(new Collection(COLLECTION_NAME))) - .withGrantOption(false) - .build(); - - sentryStore.alterRoleGrantPrivilege(SEARCH, roleName, queryPrivilegeWithNoOption, grantor); - - assertEquals(Sets.newHashSet(queryPrivilege, queryPrivilegeWithOption, queryPrivilegeWithNoOption), - sentryStore.getPrivilegesByRole(SEARCH, Sets.newHashSet(roleName))); + testGrantPrivilege(sentryStore, SEARCH); } @Test @@ -497,7 +462,7 @@ public void testRevokePrivilegeWithAllPrivilegeExist() throws Exception { String grantor = ADMIN_USER; PrivilegeObject allPrivilege = new Builder() .setComponent(SEARCH) - .setAction(SearchConstants.QUERY) + .setAction(SearchConstants.ALL) .setService(SERVICE) .setAuthorizables(Arrays.asList(new Collection(COLLECTION_NAME), new Field(FIELD_NAME))) .build(); @@ -525,6 +490,58 @@ public void testRevokePrivilegeWithAllPrivilegeExist() throws Exception { sentryStore.getPrivilegesByRole(SEARCH, Sets.newHashSet(roleName))); } + /** + * Grant update, query and all privilege to role r1 + * Revoke query privilege from role r1 + * there is update privilege related to role r1 + */ + @Test + public void testRevokePrivilegeWithAllPrivilegesGranted() throws Exception { + String roleName = "r1"; + /** + * grantor is admin, there is no need to check grant option + */ + String grantor = ADMIN_USER; + PrivilegeObject allPrivilege = new Builder() + .setComponent(SEARCH) + .setAction(SearchConstants.ALL) + .setService(SERVICE) + .setAuthorizables(Arrays.asList(new Collection(COLLECTION_NAME), new Field(FIELD_NAME))) + .build(); + + PrivilegeObject updatePrivilege = new Builder(allPrivilege) + .setAction(SearchConstants.UPDATE) + .build(); + + PrivilegeObject queryPrivilege = new Builder(allPrivilege) + .setAction(SearchConstants.QUERY) + .build(); + + sentryStore.createRole(SEARCH, roleName, grantor); + //grant query to role r1 + sentryStore.alterRoleGrantPrivilege(SEARCH, roleName, queryPrivilege, grantor); + assertEquals(Sets.newHashSet(queryPrivilege), + sentryStore.getPrivilegesByRole(SEARCH, Sets.newHashSet(roleName))); + + //grant update to role r1 + sentryStore.alterRoleGrantPrivilege(SEARCH, roleName, updatePrivilege, grantor); + assertEquals(Sets.newHashSet(queryPrivilege, updatePrivilege), + sentryStore.getPrivilegesByRole(SEARCH, Sets.newHashSet(roleName))); + /** + * grant all action privilege to role r1, because all action includes query and update action, + * The role r1 only has the action all privilege + */ + sentryStore.alterRoleGrantPrivilege(SEARCH, roleName, allPrivilege, grantor); + assertEquals(Sets.newHashSet(allPrivilege), + sentryStore.getPrivilegesByRole(SEARCH, Sets.newHashSet(roleName))); + /** + * revoke update privilege from role r1, the query privilege has been left + */ + sentryStore.alterRoleRevokePrivilege(SEARCH, roleName, updatePrivilege, grantor); + assertEquals(Sets.newHashSet(queryPrivilege), + sentryStore.getPrivilegesByRole(SEARCH, Sets.newHashSet(roleName))); + } + @Test public void testRevokeParentPrivilegeWithChildsExist() throws Exception { String roleName = "r1"; @@ -900,4 +917,221 @@ public void testGetPrivilegesByProvider() throws Exception { sentryStore.getPrivilegesByProvider(SEARCH, service1, Sets.newHashSet(roleName1,roleName2), Sets.newHashSet(group), authorizables)); } + + @Test + public void testGetPrivilegesByAuthorizable() throws Exception { + String roleName1 = "r1"; + String roleName2 = "r2"; + String roleName3 = "r3"; + String grantor = ADMIN_USER; + + String service1 = "service1"; + + PrivilegeObject queryPrivilege1 = new Builder() + .setComponent(SEARCH) + .setAction(SearchConstants.QUERY) + .setService(service1) + .setAuthorizables(Arrays.asList(new Collection(COLLECTION_NAME))) + .build(); + + PrivilegeObject updatePrivilege1 = new Builder() + .setComponent(SEARCH) + .setAction(SearchConstants.UPDATE) + .setService(service1) + .setAuthorizables(Arrays.asList(new Collection(COLLECTION_NAME), new Field(FIELD_NAME))) + .build(); + + PrivilegeObject queryPrivilege2 = new Builder() + .setComponent(SEARCH) + .setAction(SearchConstants.QUERY) + .setService(service1) + .setAuthorizables(Arrays.asList(new Collection(COLLECTION_NAME))) + .build(); + + PrivilegeObject updatePrivilege2 = new Builder() + .setComponent(SEARCH) + .setAction(SearchConstants.UPDATE) + .setService(service1) + .setAuthorizables(Arrays.asList(new Collection(COLLECTION_NAME), new Field(FIELD_NAME))) + .build(); + + sentryStore.createRole(SEARCH, roleName1, grantor); + sentryStore.createRole(SEARCH, roleName2, grantor); + sentryStore.createRole(SEARCH, roleName3, grantor); + + sentryStore.alterRoleGrantPrivilege(SEARCH, roleName1, queryPrivilege1, grantor); + sentryStore.alterRoleGrantPrivilege(SEARCH, roleName1, updatePrivilege1, grantor); + sentryStore.alterRoleGrantPrivilege(SEARCH, roleName2, queryPrivilege2, grantor); + sentryStore.alterRoleGrantPrivilege(SEARCH, roleName3, updatePrivilege2, grantor); + + assertEquals(0, sentryStore.getPrivilegesByAuthorizable(SEARCH, service1, null, + Arrays.asList(new Collection(COLLECTION_NAME), new Field(FIELD_NAME))).size()); + assertEquals(2, sentryStore.getPrivilegesByAuthorizable(SEARCH, service1, + Sets.newHashSet(roleName1), null).size()); + assertEquals(2, sentryStore.getPrivilegesByAuthorizable(SEARCH, service1, + Sets.newHashSet(roleName1,roleName2), null).size()); + assertEquals(2, sentryStore.getPrivilegesByAuthorizable(SEARCH, service1, + Sets.newHashSet(roleName1,roleName2, roleName3), null).size()); + } + + @Test(expected = RuntimeException.class) + public void testGrantPrivilegeExternalComponentMissingConf() throws SentryUserException { + testGrantPrivilege(sentryStore, "externalComponent"); + } + + @Test(expected = RuntimeException.class) + public void testGrantPrivilegeExternalComponentInvalidConf() throws Exception { + String externalComponent = "mycomponent"; + Configuration confCopy = new Configuration(conf); + confCopy.set(String.format(ServiceConstants.ServerConfig.SENTRY_COMPONENT_ACTION_FACTORY_FORMAT, externalComponent), + InvalidActionFactory.class.getName()); + SentryStoreLayer store = new DelegateSentryStore(confCopy); + testGrantPrivilege(store, externalComponent); + } + + @Test + public void testGrantPrivilegeExternalComponent() throws Exception { + String externalComponent = "mycomponent"; + Configuration confCopy = new Configuration(conf); + confCopy.set(String.format(ServiceConstants.ServerConfig.SENTRY_COMPONENT_ACTION_FACTORY_FORMAT, externalComponent), + MyComponentActionFactory.class.getName()); + SentryStoreLayer store = new DelegateSentryStore(confCopy); + testGrantPrivilege(store, externalComponent); + } + + @Test + public void testGrantPrivilegeExternalComponentCaseInsensitivity() throws Exception { + String externalComponent = "MyCoMpOnEnT"; + Configuration confCopy = new Configuration(conf); + confCopy.set(String.format(ServiceConstants.ServerConfig.SENTRY_COMPONENT_ACTION_FACTORY_FORMAT, "mycomponent"), + MyComponentActionFactory.class.getName()); + SentryStoreLayer store = new DelegateSentryStore(confCopy); + testGrantPrivilege(store, externalComponent); + } + + private void testGrantPrivilege(SentryStoreLayer sentryStore, String component) throws SentryUserException { + String roleName = "r1"; + /** + * grantor is admin, there is no need to check grant option + */ + String grantor = ADMIN_USER; + PrivilegeObject queryPrivilege = new Builder() + .setComponent(component) + .setAction(SearchConstants.QUERY) + .setService(SERVICE) + .setAuthorizables(Collections.singletonList(new Collection(COLLECTION_NAME))) + .withGrantOption(null) + .build(); + + sentryStore.createRole(component, roleName, grantor); + sentryStore.alterRoleGrantPrivilege(component, roleName, queryPrivilege, grantor); + + assertEquals(Sets.newHashSet(queryPrivilege), + sentryStore.getPrivilegesByRole(component, Sets.newHashSet(roleName))); + + PrivilegeObject queryPrivilegeWithOption = new Builder() + .setComponent(component) + .setAction(SearchConstants.QUERY) + .setService(SERVICE) + .setAuthorizables(Collections.singletonList(new Collection(COLLECTION_NAME))) + .withGrantOption(true) + .build(); + + sentryStore.alterRoleGrantPrivilege(component, roleName, queryPrivilegeWithOption, grantor); + + assertEquals(Sets.newHashSet(queryPrivilege, queryPrivilegeWithOption), + sentryStore.getPrivilegesByRole(component, Sets.newHashSet(roleName))); + + PrivilegeObject queryPrivilegeWithNoOption = new Builder() + .setComponent(component) + .setAction(SearchConstants.QUERY) + .setService(SERVICE) + .setAuthorizables(Collections.singletonList(new Collection(COLLECTION_NAME))) + .withGrantOption(false) + .build(); + + sentryStore.alterRoleGrantPrivilege(component, roleName, queryPrivilegeWithNoOption, grantor); + + assertEquals(Sets.newHashSet(queryPrivilege, queryPrivilegeWithOption, queryPrivilegeWithNoOption), + sentryStore.getPrivilegesByRole(component, Sets.newHashSet(roleName))); + } + + public static final class InvalidActionFactory { + + } + + public static final class MyComponentActionFactory extends BitFieldActionFactory { + + public enum MyComponentActionType { + FOO("foo", 1), + BAR("bar", 2), + QUERY(SearchConstants.QUERY, 4), + ALL("*", FOO.getCode() | BAR.getCode() | QUERY.getCode()); + + private String name; + private int code; + MyComponentActionType(String name, int code) { + this.name = name; + this.code = code; + } + + public int getCode() { + return code; + } + + public String getName() { + return name; + } + + static MyComponentActionType getActionByName(String name) { + for (MyComponentActionType action : MyComponentActionType.values()) { + if (action.name.equalsIgnoreCase(name)) { + return action; + } + } + throw new RuntimeException("can't get MyComponentActionType by name:" + name); + } + + static List getActionByCode(int code) { + List actions = Lists.newArrayList(); + for (MyComponentActionType action : MyComponentActionType.values()) { + if ((action.code & code) == action.code && action != MyComponentActionType.ALL) { + //MyComponentActionType.ALL action should not return in the list + actions.add(action); + } + } + if (actions.isEmpty()) { + throw new RuntimeException("can't get sqoopActionType by code:" + code); + } + return actions; + } + } + + public static class MyComponentAction extends BitFieldAction { + public MyComponentAction(String name) { + this(MyComponentActionType.getActionByName(name)); + } + public MyComponentAction(MyComponentActionType myComponentActionType) { + super(myComponentActionType.name, myComponentActionType.code); + } + } + + @Override + public List getActionsByCode(int actionCode) { + List actions = Lists.newArrayList(); + for (MyComponentActionType action : MyComponentActionType.getActionByCode(actionCode)) { + actions.add(new MyComponentAction(action)); + } + return actions; + } + + @Override + public BitFieldAction getActionByName(String name) { + // Check the name is All + if (SqoopActionConstant.ALL_NAME.equalsIgnoreCase(name)) { + return new MyComponentAction(MyComponentActionType.ALL); + } + return new MyComponentAction(name); + } + } } diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/persistent/TestSentryGMPrivilege.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/persistent/TestSentryGMPrivilege.java index 141169285..258721e90 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/persistent/TestSentryGMPrivilege.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/persistent/TestSentryGMPrivilege.java @@ -17,9 +17,9 @@ */ package org.apache.sentry.provider.db.generic.service.persistent; -import static junit.framework.Assert.assertTrue; -import static junit.framework.Assert.assertFalse; -import static junit.framework.Assert.fail; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.fail; import java.util.Arrays; @@ -35,21 +35,21 @@ public class TestSentryGMPrivilege { @Test public void testValidateAuthorizables() throws Exception { try { - MSentryGMPrivilege fieldPrivilege = new MSentryGMPrivilege("solr", + new MSentryGMPrivilege("solr", "service1", Arrays.asList(new Collection("c1"), new Field("f1")),SearchConstants.QUERY, false); } catch (IllegalStateException e) { fail("unexpect happend: it is a validated privilege"); } try { - MSentryGMPrivilege collectionPrivilege = new MSentryGMPrivilege("solr", + new MSentryGMPrivilege("solr", "service1", Arrays.asList(new Collection(""), new Field("f1")),SearchConstants.QUERY, false); fail("unexpect happend: it is not a validated privilege, The empty name of authorizable can't be empty"); } catch (IllegalStateException e) { } try { - MSentryGMPrivilege fieldPrivilege = new MSentryGMPrivilege("solr", + new MSentryGMPrivilege("solr", "service1", Arrays.asList(null, new Field("f1")),SearchConstants.QUERY, false); fail("unexpect happend: it is not a validated privilege, The authorizable can't be null"); } catch (IllegalStateException e) { diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/persistent/TestSentryRole.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/persistent/TestSentryRole.java index f8eecd98f..7b4d3b8ac 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/persistent/TestSentryRole.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/persistent/TestSentryRole.java @@ -17,9 +17,9 @@ */ package org.apache.sentry.provider.db.generic.service.persistent; -import static junit.framework.Assert.assertEquals; -import static junit.framework.Assert.assertTrue; -import static junit.framework.Assert.fail; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.File; import java.util.Arrays; @@ -39,9 +39,7 @@ import org.apache.sentry.provider.db.service.persistent.SentryStore; import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; import org.junit.After; -import org.junit.AfterClass; import org.junit.Before; -import org.junit.BeforeClass; import org.junit.Test; import com.google.common.base.Preconditions; diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericServiceIntegrationBase.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericServiceIntegrationBase.java new file mode 100644 index 000000000..cec925b69 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/thrift/SentryGenericServiceIntegrationBase.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.db.generic.service.thrift; + +import java.security.PrivilegedExceptionAction; +import java.util.Set; + +import javax.security.auth.Subject; + +import org.apache.sentry.service.thrift.SentryServiceIntegrationBase; +import org.junit.After; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class SentryGenericServiceIntegrationBase extends SentryServiceIntegrationBase { + private static final Logger LOGGER = LoggerFactory.getLogger(SentryGenericServiceIntegrationBase.class); + protected static final String SOLR = "SOLR"; + protected SentryGenericServiceClient client; + + /** + * use the generic client to connect sentry service + */ + @Override + public void connectToSentryService() throws Exception { + // The client should already be logged in when running in solr + // therefore we must manually login in the integration tests + if (kerberos) { + this.client = Subject.doAs(clientSubject, new PrivilegedExceptionAction() { + @Override + public SentryGenericServiceClient run() throws Exception { + return SentryGenericServiceClientFactory.create(conf); + } + }); + } else { + this.client = SentryGenericServiceClientFactory.create(conf); + } + } + + @After + public void after() { + try { + runTestAsSubject(new TestOperation(){ + @Override + public void runTestAsSubject() throws Exception { + Set tRoles = client.listAllRoles(ADMIN_USER, SOLR); + for (TSentryRole tRole : tRoles) { + client.dropRole(ADMIN_USER, tRole.getRoleName(), SOLR); + } + if(client != null) { + client.close(); + } + } + }); + } catch (Exception e) { + LOGGER.error(e.getMessage(), e); + } finally { + policyFilePath.delete(); + } + } +} diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/thrift/TestAuditLogForSentryGenericService.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/thrift/TestAuditLogForSentryGenericService.java new file mode 100644 index 000000000..c3adacf52 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/thrift/TestAuditLogForSentryGenericService.java @@ -0,0 +1,299 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.provider.db.generic.service.thrift; + +import static org.hamcrest.core.Is.is; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.security.PrivilegedExceptionAction; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import javax.security.auth.Subject; + +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.apache.sentry.provider.db.log.appender.AuditLoggerTestAppender; +import org.apache.sentry.provider.db.log.util.CommandUtil; +import org.apache.sentry.provider.db.log.util.Constants; +import org.apache.sentry.service.thrift.SentryServiceIntegrationBase; +import org.codehaus.jettison.json.JSONObject; +import org.junit.After; +import org.junit.BeforeClass; +import org.junit.Test; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; + +public class TestAuditLogForSentryGenericService extends SentryServiceIntegrationBase { + + private SentryGenericServiceClient client; + private static final String COMPONENT = "SQOOP"; + private static final org.slf4j.Logger LOGGER = LoggerFactory + .getLogger(TestAuditLogForSentryGenericService.class); + + @BeforeClass + public static void setup() throws Exception { + SentryServiceIntegrationBase.setup(); + Logger logger = Logger.getLogger("sentry.generic.authorization.ddl.logger"); + AuditLoggerTestAppender testAppender = new AuditLoggerTestAppender(); + logger.addAppender(testAppender); + logger.setLevel(Level.INFO); + } + + @Override + @After + public void after() { + try { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + Set tRoles = client.listAllRoles(ADMIN_USER, COMPONENT); + for (TSentryRole tRole : tRoles) { + client.dropRole(ADMIN_USER, tRole.getRoleName(), COMPONENT); + } + if (client != null) { + client.close(); + } + } + }); + } catch (Exception e) { + // log the exception + LOGGER.warn("Exception happened after test case.", e); + } finally { + policyFilePath.delete(); + } + } + + /** + * use the generic client to connect sentry service + */ + @Override + public void connectToSentryService() throws Exception { + if (kerberos) { + this.client = Subject.doAs(clientSubject, + new PrivilegedExceptionAction() { + @Override + public SentryGenericServiceClient run() throws Exception { + return SentryGenericServiceClientFactory.create(conf); + } + }); + } else { + this.client = SentryGenericServiceClientFactory.create(conf); + } + } + + @Test + public void testAuditLogForGenericModel() throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + String requestorUserName = ADMIN_USER; + Set requestorUserGroupNames = Sets.newHashSet(ADMIN_GROUP); + String roleName = "admin_r"; + String testGroupName = "g1"; + String action = "all"; + String service = "sentryService"; + setLocalGroupMapping(requestorUserName, requestorUserGroupNames); + writePolicyFile(); + + // test the audit log for create role, success + client.createRole(requestorUserName, roleName, COMPONENT); + Map fieldValueMap = new HashMap(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_CREATE_ROLE); + fieldValueMap.put(Constants.LOG_FIELD_COMPONENT, COMPONENT); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "CREATE ROLE " + roleName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.TRUE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + + // test the audit log for create role, failed + try { + client.createRole(requestorUserName, roleName, COMPONENT); + fail("Exception should have been thrown"); + } catch (Exception e) { + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_CREATE_ROLE); + fieldValueMap.put(Constants.LOG_FIELD_COMPONENT, COMPONENT); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "CREATE ROLE " + roleName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.FALSE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + } + + // test the audit log for add role to group, success + client.addRoleToGroups(requestorUserName, roleName, COMPONENT, + Sets.newHashSet(testGroupName)); + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_ADD_ROLE); + fieldValueMap.put(Constants.LOG_FIELD_COMPONENT, COMPONENT); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "GRANT ROLE " + roleName + + " TO GROUP " + testGroupName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.TRUE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + + // test the audit log for add role to group, failed + try { + client.addRoleToGroups(requestorUserName, "invalidRole", COMPONENT, + Sets.newHashSet(testGroupName)); + fail("Exception should have been thrown"); + } catch (Exception e) { + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_ADD_ROLE); + fieldValueMap.put(Constants.LOG_FIELD_COMPONENT, COMPONENT); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "GRANT ROLE invalidRole TO GROUP " + + testGroupName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.FALSE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + } + + // test the audit log for grant privilege, success + TSentryPrivilege privilege = new TSentryPrivilege(COMPONENT, service, Lists.newArrayList( + new TAuthorizable("resourceType1", "resourceName1"), new TAuthorizable("resourceType2", + "resourceName2")), action); + client.grantPrivilege(requestorUserName, roleName, COMPONENT, privilege); + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_GRANT_PRIVILEGE); + fieldValueMap.put(Constants.LOG_FIELD_COMPONENT, COMPONENT); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, + "GRANT ALL ON resourceType1 resourceName1 resourceType2 resourceName2 TO ROLE " + + roleName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.TRUE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + + // for error audit log + TSentryPrivilege invalidPrivilege = new TSentryPrivilege(COMPONENT, service, + Lists.newArrayList(new TAuthorizable("resourceType1", "resourceName1")), + "invalidAction"); + // test the audit log for grant privilege, failed + try { + client.grantPrivilege(requestorUserName, roleName, COMPONENT, invalidPrivilege); + fail("Exception should have been thrown"); + } catch (Exception e) { + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_GRANT_PRIVILEGE); + fieldValueMap.put(Constants.LOG_FIELD_COMPONENT, COMPONENT); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, + "GRANT INVALIDACTION ON resourceType1 resourceName1 TO ROLE " + roleName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.FALSE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + } + + // test the audit log for revoke privilege, success + client.revokePrivilege(requestorUserName, roleName, COMPONENT, privilege); + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_REVOKE_PRIVILEGE); + fieldValueMap.put(Constants.LOG_FIELD_COMPONENT, COMPONENT); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, + "REVOKE ALL ON resourceType1 resourceName1 resourceType2 resourceName2 FROM ROLE " + + roleName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.TRUE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + + // test the audit log for revoke privilege, failed + try { + client.revokePrivilege(requestorUserName, "invalidRole", COMPONENT, invalidPrivilege); + fail("Exception should have been thrown"); + } catch (Exception e) { + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_REVOKE_PRIVILEGE); + fieldValueMap.put(Constants.LOG_FIELD_COMPONENT, COMPONENT); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, + "REVOKE INVALIDACTION ON resourceType1 resourceName1 FROM ROLE invalidRole"); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.FALSE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + } + + // test the audit log for delete role from group, success + client.deleteRoleToGroups(requestorUserName, roleName, COMPONENT, + Sets.newHashSet(testGroupName)); + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_DELETE_ROLE); + fieldValueMap.put(Constants.LOG_FIELD_COMPONENT, COMPONENT); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "REVOKE ROLE " + roleName + + " FROM GROUP " + testGroupName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.TRUE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + // test the audit log for delete role from group, failed + try { + client.deleteRoleToGroups(requestorUserName, "invalidRole", COMPONENT, + Sets.newHashSet(testGroupName)); + fail("Exception should have been thrown"); + } catch (Exception e) { + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_DELETE_ROLE); + fieldValueMap.put(Constants.LOG_FIELD_COMPONENT, COMPONENT); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, + "REVOKE ROLE invalidRole FROM GROUP " + testGroupName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.FALSE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + } + // test the audit log for drop role, success + client.dropRole(requestorUserName, roleName, COMPONENT); + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_DROP_ROLE); + fieldValueMap.put(Constants.LOG_FIELD_COMPONENT, COMPONENT); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "DROP ROLE " + roleName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.TRUE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + // test the audit log for drop role, failed + try { + client.dropRole(requestorUserName, roleName, COMPONENT); + fail("Exception should have been thrown"); + } catch (Exception e) { + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_DROP_ROLE); + fieldValueMap.put(Constants.LOG_FIELD_COMPONENT, COMPONENT); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "DROP ROLE " + roleName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.FALSE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + } + } + }); + } + + private void assertAuditLog(Map fieldValueMap) throws Exception { + assertThat(AuditLoggerTestAppender.getLastLogLevel(), is(Level.INFO)); + JSONObject jsonObject = new JSONObject(AuditLoggerTestAppender.getLastLogEvent()); + if (fieldValueMap != null) { + for (Map.Entry entry : fieldValueMap.entrySet()) { + String entryKey = entry.getKey(); + if (Constants.LOG_FIELD_IP_ADDRESS.equals(entryKey)) { + assertTrue(CommandUtil.assertIPInAuditLog(jsonObject.get(entryKey).toString())); + } else { + assertTrue(entry.getValue().equalsIgnoreCase(jsonObject.get(entryKey).toString())); + } + } + } + } +} diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/thrift/TestSentryGenericPolicyProcessor.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/thrift/TestSentryGenericPolicyProcessor.java index b86c6b2c1..84eeb8216 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/thrift/TestSentryGenericPolicyProcessor.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/thrift/TestSentryGenericPolicyProcessor.java @@ -17,18 +17,12 @@ */ package org.apache.sentry.provider.db.generic.service.thrift; -import static junit.framework.Assert.assertEquals; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyListOf; import static org.mockito.Matchers.anySetOf; import static org.mockito.Matchers.anyString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Set; -import java.util.UUID; +import java.util.*; import org.apache.hadoop.conf.Configuration; import org.apache.sentry.core.common.Authorizable; @@ -43,7 +37,8 @@ import org.apache.sentry.provider.db.generic.service.persistent.PrivilegeObject; import org.apache.sentry.provider.db.generic.service.persistent.SentryStoreLayer; import org.apache.sentry.provider.db.generic.service.persistent.PrivilegeObject.Builder; -import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericPolicyProcessor; +import org.apache.sentry.provider.db.service.model.MSentryGMPrivilege; +import org.apache.sentry.provider.db.service.model.MSentryRole; import org.apache.sentry.provider.db.service.persistent.CommitContext; import org.apache.sentry.provider.db.service.thrift.PolicyStoreConstants; import org.apache.sentry.provider.db.service.thrift.SentryConfigurationException; @@ -52,16 +47,17 @@ import org.apache.sentry.service.thrift.TSentryResponseStatus; import org.junit.Before; import org.junit.Test; +import org.mockito.Mockito; import com.google.common.collect.Sets; -public class TestSentryGenericPolicyProcessor { +public class TestSentryGenericPolicyProcessor extends org.junit.Assert { private static final String ADMIN_GROUP = "admin_group"; private static final String ADMIN_USER = "admin_user"; private static final UUID SERVER_UUID = UUID.randomUUID(); private static final long SEQ_ID = 10000; - private SentryStoreLayer mockStore = mock(SentryStoreLayer.class); + private SentryStoreLayer mockStore = Mockito.mock(SentryStoreLayer.class); private SentryGenericPolicyProcessor processor; @Before @@ -118,22 +114,22 @@ private Status fromTSentryStatus(TSentryResponseStatus status) { @Test public void testAdminOperation() throws Exception { - when(mockStore.createRole(anyString(), anyString(), anyString())) + Mockito.when(mockStore.createRole(anyString(), anyString(), anyString())) .thenReturn(new CommitContext(SERVER_UUID, SEQ_ID)); - when(mockStore.dropRole(anyString(), anyString(), anyString())) + Mockito.when(mockStore.dropRole(anyString(), anyString(), anyString())) .thenReturn(new CommitContext(SERVER_UUID, SEQ_ID + 1)); - when(mockStore.alterRoleAddGroups(anyString(), anyString(), anySetOf(String.class),anyString())) + Mockito.when(mockStore.alterRoleAddGroups(anyString(), anyString(), anySetOf(String.class),anyString())) .thenReturn(new CommitContext(SERVER_UUID, SEQ_ID + 2)); - when(mockStore.alterRoleDeleteGroups(anyString(), anyString(),anySetOf(String.class), anyString())) + Mockito.when(mockStore.alterRoleDeleteGroups(anyString(), anyString(),anySetOf(String.class), anyString())) .thenReturn(new CommitContext(SERVER_UUID, SEQ_ID + 3)); - when(mockStore.dropPrivilege(anyString(), any(PrivilegeObject.class), anyString())) + Mockito.when(mockStore.dropPrivilege(anyString(), any(PrivilegeObject.class), anyString())) .thenReturn(new CommitContext(SERVER_UUID, SEQ_ID + 4)); - when(mockStore.renamePrivilege(anyString(), anyString(), anyListOf(Authorizable.class), + Mockito.when(mockStore.renamePrivilege(anyString(), anyString(), anyListOf(Authorizable.class), anyListOf(Authorizable.class), anyString())) .thenReturn(new CommitContext(SERVER_UUID, SEQ_ID + 5)); testOperation(ADMIN_USER, Status.OK); @@ -141,10 +137,10 @@ public void testAdminOperation() throws Exception { @Test public void testGrantAndRevokePrivilege() throws Exception { - when(mockStore.alterRoleGrantPrivilege(anyString(), anyString(), any(PrivilegeObject.class), anyString())) + Mockito.when(mockStore.alterRoleGrantPrivilege(anyString(), anyString(), any(PrivilegeObject.class), anyString())) .thenReturn(new CommitContext(SERVER_UUID, SEQ_ID + 6)); - when(mockStore.alterRoleRevokePrivilege(anyString(), anyString(),any(PrivilegeObject.class), anyString())) + Mockito.when(mockStore.alterRoleRevokePrivilege(anyString(), anyString(),any(PrivilegeObject.class), anyString())) .thenReturn(new CommitContext(SERVER_UUID, SEQ_ID + 7)); setup(); @@ -166,28 +162,34 @@ public void testGrantAndRevokePrivilege() throws Exception { @Test public void testOperationWithException() throws Exception { - when(mockStore.createRole(anyString(), anyString(), anyString())) - .thenThrow(new SentryAlreadyExistsException("role already exists")); + String roleName = anyString(); + Mockito.when(mockStore.createRole(anyString(), roleName, anyString())) + .thenThrow(new SentryAlreadyExistsException("Role: " + roleName + " already exists")); - when(mockStore.dropRole(anyString(), anyString(), anyString())) - .thenThrow(new SentryNoSuchObjectException("role isn't exist")); + roleName = anyString(); + Mockito.when(mockStore.dropRole(anyString(), roleName, anyString())) + .thenThrow(new SentryNoSuchObjectException("Role: " + roleName + " doesn't exist")); - when(mockStore.alterRoleAddGroups(anyString(), anyString(), anySetOf(String.class),anyString())) - .thenThrow(new SentryNoSuchObjectException("role isn't exist")); + roleName = anyString(); + Mockito.when(mockStore.alterRoleAddGroups(anyString(), roleName, anySetOf(String.class),anyString())) + .thenThrow(new SentryNoSuchObjectException("Role: " + roleName + " doesn't exist")); - when(mockStore.alterRoleDeleteGroups(anyString(), anyString(),anySetOf(String.class), anyString())) - .thenThrow(new SentryNoSuchObjectException("role isn't exist")); + roleName = anyString(); + Mockito.when(mockStore.alterRoleDeleteGroups(anyString(), roleName, anySetOf(String.class), anyString())) + .thenThrow(new SentryNoSuchObjectException("Role: " + roleName + " doesn't exist")); - when(mockStore.alterRoleGrantPrivilege(anyString(), anyString(), any(PrivilegeObject.class), anyString())) - .thenThrow(new SentryGrantDeniedException("has no grant")); + roleName = anyString(); + Mockito.when(mockStore.alterRoleGrantPrivilege(anyString(), roleName, any(PrivilegeObject.class), anyString())) + .thenThrow(new SentryGrantDeniedException("Role: " + roleName + " is not allowed to do grant")); - when(mockStore.alterRoleRevokePrivilege(anyString(), anyString(),any(PrivilegeObject.class), anyString())) - .thenThrow(new SentryGrantDeniedException("has no grant")); + roleName = anyString(); + Mockito.when(mockStore.alterRoleRevokePrivilege(anyString(), roleName, any(PrivilegeObject.class), anyString())) + .thenThrow(new SentryGrantDeniedException("Role: " + roleName + " is not allowed to do grant")); - when(mockStore.dropPrivilege(anyString(), any(PrivilegeObject.class), anyString())) - .thenThrow(new SentryInvalidInputException("nvalid input privilege object")); + Mockito.when(mockStore.dropPrivilege(anyString(), any(PrivilegeObject.class), anyString())) + .thenThrow(new SentryInvalidInputException("Invalid input privilege object")); - when(mockStore.renamePrivilege(anyString(), anyString(), anyListOf(Authorizable.class), + Mockito.when(mockStore.renamePrivilege(anyString(), anyString(), anyListOf(Authorizable.class), anyListOf(Authorizable.class), anyString())) .thenThrow(new RuntimeException("Unknown error")); @@ -254,16 +256,29 @@ public void testGetRolesAndPrivileges() throws Exception { .setAction(SearchConstants.UPDATE) .build(); - when(mockStore.getRolesByGroups(anyString(), anySetOf(String.class))) + MSentryGMPrivilege mSentryGMPrivilege = new MSentryGMPrivilege("SOLR", "service1", + Arrays.asList(new Collection("c1"), new Field("f1")), + SearchConstants.QUERY, true); + + MSentryRole role = new MSentryRole("r1", 290); + mSentryGMPrivilege.setRoles(Sets.newHashSet(role)); + + Mockito.when(mockStore.getRolesByGroups(anyString(), anySetOf(String.class))) .thenReturn(Sets.newHashSet(roleName)); - when(mockStore.getPrivilegesByProvider(anyString(), anyString(), anySetOf(String.class), + Mockito.when(mockStore.getPrivilegesByProvider(anyString(), anyString(), anySetOf(String.class), anySetOf(String.class), anyListOf(Authorizable.class))) .thenReturn(Sets.newHashSet(queryPrivilege, updatePrivilege)); - when(mockStore.getGroupsByRoles(anyString(), anySetOf(String.class))) + Mockito.when(mockStore.getGroupsByRoles(anyString(), anySetOf(String.class))) .thenReturn(Sets.newHashSet(groupName)); + Mockito.when(mockStore.getPrivilegesByAuthorizable(anyString(), anyString(), anySetOf(String.class), anyListOf(Authorizable.class))) + .thenReturn(Sets.newHashSet(mSentryGMPrivilege)); + + Mockito.when(mockStore.getAllRoleNames()) + .thenReturn(Sets.newHashSet(roleName)); + TListSentryPrivilegesRequest request1 = new TListSentryPrivilegesRequest(); request1.setRoleName(roleName); request1.setRequestorUserName(ADMIN_USER); @@ -284,6 +299,18 @@ public void testGetRolesAndPrivileges() throws Exception { TListSentryPrivilegesForProviderResponse response3 = processor.list_sentry_privileges_for_provider(request3); assertEquals(Status.OK, fromTSentryStatus(response3.getStatus())); assertEquals(2, response3.getPrivileges().size()); + + TListSentryPrivilegesByAuthRequest request4 = new TListSentryPrivilegesByAuthRequest(); + request4.setGroups(Sets.newHashSet(groupName)); + request4.setRoleSet(new TSentryActiveRoleSet(true, null)); + request4.setRequestorUserName(ADMIN_USER); + + Set authorizablesSet = Sets.newHashSet("Collection=c1->Field=f1"); + request4.setAuthorizablesSet(authorizablesSet); + + TListSentryPrivilegesByAuthResponse response4 = processor.list_sentry_privileges_by_authorizable(request4); + assertEquals(Status.OK, fromTSentryStatus(response4.getStatus())); + assertEquals(1, response4.getPrivilegesMapByAuth().size()); } @Test(expected=SentryConfigurationException.class) @@ -301,7 +328,7 @@ public void testConfigCannotCreateSentryStore() throws Exception { } public static class MockGroupMapping implements GroupMappingService { - public MockGroupMapping(Configuration conf, String resource) { + public MockGroupMapping(Configuration conf, String resource) { //NOPMD } @Override public Set getGroups(String user) { diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/thrift/TestSentryGenericServiceIntegration.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/thrift/TestSentryGenericServiceIntegration.java index ae354d9fa..fcf0e7b9d 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/thrift/TestSentryGenericServiceIntegration.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/service/thrift/TestSentryGenericServiceIntegration.java @@ -17,64 +17,26 @@ */ package org.apache.sentry.provider.db.generic.service.thrift; -import static junit.framework.Assert.assertEquals; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; -import java.security.PrivilegedExceptionAction; import java.util.Arrays; import java.util.List; import java.util.Set; -import javax.security.auth.Subject; - import org.apache.sentry.SentryUserException; import org.apache.sentry.core.common.ActiveRoleSet; import org.apache.sentry.core.common.Authorizable; import org.apache.sentry.core.model.search.Collection; import org.apache.sentry.core.model.search.Field; import org.apache.sentry.core.model.search.SearchConstants; -import org.apache.sentry.service.thrift.SentryServiceIntegrationBase; -import org.junit.After; import org.junit.Test; import com.google.common.collect.Lists; import com.google.common.collect.Sets; -public class TestSentryGenericServiceIntegration extends SentryServiceIntegrationBase { - private static final String SOLR = "SOLR"; - private SentryGenericServiceClient client; - - /** - * use the generic client to connect sentry service - */ - @Override - public void connectToSentryService() throws Exception { - // The client should already be logged in when running in solr - // therefore we must manually login in the integration tests - if (kerberos) { - this.client = Subject.doAs(clientSubject, new PrivilegedExceptionAction() { - @Override - public SentryGenericServiceClient run() throws Exception { - return new SentryGenericServiceClient(conf); - } - }); - } else { - this.client = new SentryGenericServiceClient(conf); - } - } - - @After - public void after() throws SentryUserException { - Set tRoles = client.listAllRoles(ADMIN_USER, SOLR); - for (TSentryRole tRole : tRoles) { - client.dropRole(ADMIN_USER, tRole.getRoleName(), SOLR); - } - if(client != null) { - client.close(); - } - policyFilePath.delete(); - } +public class TestSentryGenericServiceIntegration extends SentryGenericServiceIntegrationBase { @Test public void testCreateDropShowRole() throws Exception { diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/tools/TestSentryShellSolr.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/tools/TestSentryShellSolr.java new file mode 100644 index 000000000..8eab0281b --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/generic/tools/TestSentryShellSolr.java @@ -0,0 +1,525 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.provider.db.generic.tools; + +import com.google.common.io.Files; +import com.google.common.collect.Sets; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.PrintStream; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Set; + +import org.apache.commons.io.FileUtils; +import org.apache.sentry.SentryUserException; +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceIntegrationBase; +import org.apache.sentry.provider.db.generic.service.thrift.TSentryRole; +import org.apache.sentry.provider.db.generic.service.thrift.TSentryPrivilege; +import org.apache.sentry.provider.db.tools.SentryShellCommon; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +public class TestSentryShellSolr extends SentryGenericServiceIntegrationBase { + private File confDir; + private File confPath; + private static String TEST_ROLE_NAME_1 = "testRole1"; + private static String TEST_ROLE_NAME_2 = "testRole2"; + private String requestorName = ""; + private String service = "service1"; + + @Before + public void prepareForTest() throws Exception { + confDir = Files.createTempDir(); + confPath = new File(confDir, "sentry-site.xml"); + if (confPath.createNewFile()) { + FileOutputStream to = new FileOutputStream(confPath); + conf.writeXml(to); + to.close(); + } + requestorName = System.getProperty("user.name", ""); + Set requestorUserGroupNames = Sets.newHashSet(ADMIN_GROUP); + setLocalGroupMapping(requestorName, requestorUserGroupNames); + // add ADMIN_USER for the after() in SentryServiceIntegrationBase + setLocalGroupMapping(ADMIN_USER, requestorUserGroupNames); + writePolicyFile(); + } + + @After + public void clearTestData() throws Exception { + FileUtils.deleteQuietly(confDir); + } + + @Test + public void testCreateDropRole() throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + // test: create role with -cr + String[] args = { "-cr", "-r", TEST_ROLE_NAME_1, "-conf", confPath.getAbsolutePath() }; + SentryShellSolr.main(args); + // test: create role with --create_role + args = new String[] { "--create_role", "-r", TEST_ROLE_NAME_2, "-conf", + confPath.getAbsolutePath() }; + SentryShellSolr.main(args); + + // validate the result, list roles with -lr + args = new String[] { "-lr", "-conf", confPath.getAbsolutePath() }; + SentryShellSolr sentryShell = new SentryShellSolr(); + Set roleNames = getShellResultWithOSRedirect(sentryShell, args, true); + validateRoleNames(roleNames, TEST_ROLE_NAME_1, TEST_ROLE_NAME_2); + + // validate the result, list roles with --list_role + args = new String[] { "--list_role", "-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellSolr(); + roleNames = getShellResultWithOSRedirect(sentryShell, args, true); + validateRoleNames(roleNames, TEST_ROLE_NAME_1, TEST_ROLE_NAME_2); + + // test: drop role with -dr + args = new String[] { "-dr", "-r", TEST_ROLE_NAME_1, "-conf", confPath.getAbsolutePath() }; + SentryShellSolr.main(args); + // test: drop role with --drop_role + args = new String[] { "--drop_role", "-r", TEST_ROLE_NAME_2, "-conf", + confPath.getAbsolutePath() }; + SentryShellSolr.main(args); + + // validate the result + Set roles = client.listAllRoles(requestorName, SOLR); + assertEquals("Incorrect number of roles", 0, roles.size()); + } + }); + } + + @Test + public void testAddDeleteRoleForGroup() throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + // Group names are case sensitive - mixed case names should work + String TEST_GROUP_1 = "testGroup1"; + String TEST_GROUP_2 = "testGroup2"; + String TEST_GROUP_3 = "testGroup3"; + + // create the role for test + client.createRole(requestorName, TEST_ROLE_NAME_1, SOLR); + client.createRole(requestorName, TEST_ROLE_NAME_2, SOLR); + // test: add role to group with -arg + String[] args = { "-arg", "-r", TEST_ROLE_NAME_1, "-g", TEST_GROUP_1, "-conf", + confPath.getAbsolutePath() }; + SentryShellSolr.main(args); + // test: add role to multiple groups + args = new String[] { "-arg", "-r", TEST_ROLE_NAME_1, "-g", TEST_GROUP_2 + "," + TEST_GROUP_3, + "-conf", + confPath.getAbsolutePath() }; + SentryShellSolr.main(args); + // test: add role to group with --add_role_group + args = new String[] { "--add_role_group", "-r", TEST_ROLE_NAME_2, "-g", TEST_GROUP_1, + "-conf", + confPath.getAbsolutePath() }; + SentryShellSolr.main(args); + + // validate the result list roles with -lr and -g + args = new String[] { "-lr", "-g", TEST_GROUP_1, "-conf", confPath.getAbsolutePath() }; + SentryShellSolr sentryShell = new SentryShellSolr(); + Set roleNames = getShellResultWithOSRedirect(sentryShell, args, true); + validateRoleNames(roleNames, TEST_ROLE_NAME_1, TEST_ROLE_NAME_2); + + // list roles with --list_role and -g + args = new String[] { "--list_role", "-g", TEST_GROUP_2, "-conf", + confPath.getAbsolutePath() }; + sentryShell = new SentryShellSolr(); + roleNames = getShellResultWithOSRedirect(sentryShell, args, true); + validateRoleNames(roleNames, TEST_ROLE_NAME_1); + + args = new String[] { "--list_role", "-g", TEST_GROUP_3, "-conf", + confPath.getAbsolutePath() }; + sentryShell = new SentryShellSolr(); + roleNames = getShellResultWithOSRedirect(sentryShell, args, true); + validateRoleNames(roleNames, TEST_ROLE_NAME_1); + + // test: delete role from group with -drg + args = new String[] { "-drg", "-r", TEST_ROLE_NAME_1, "-g", TEST_GROUP_1, "-conf", + confPath.getAbsolutePath() }; + SentryShellSolr.main(args); + // test: delete role to multiple groups + args = new String[] { "-drg", "-r", TEST_ROLE_NAME_1, "-g", TEST_GROUP_2 + "," + TEST_GROUP_3, + "-conf", + confPath.getAbsolutePath() }; + SentryShellSolr.main(args); + // test: delete role from group with --delete_role_group + args = new String[] { "--delete_role_group", "-r", TEST_ROLE_NAME_2, "-g", TEST_GROUP_1, + "-conf", confPath.getAbsolutePath() }; + SentryShellSolr.main(args); + + // validate the result + Set roles = client.listRolesByGroupName(requestorName, TEST_GROUP_1, SOLR); + assertEquals("Incorrect number of roles", 0, roles.size()); + roles = client.listRolesByGroupName(requestorName, TEST_GROUP_2, SOLR); + assertEquals("Incorrect number of roles", 0, roles.size()); + roles = client.listRolesByGroupName(requestorName, TEST_GROUP_3, SOLR); + assertEquals("Incorrect number of roles", 0, roles.size()); + // clear the test data + client.dropRole(requestorName, TEST_ROLE_NAME_1, SOLR); + client.dropRole(requestorName, TEST_ROLE_NAME_2, SOLR); + } + }); + } + + @Test + public void testCaseSensitiveGroupName() throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + + // create the role for test + client.createRole(requestorName, TEST_ROLE_NAME_1, SOLR); + // add role to a group (lower case) + String[] args = { "-arg", "-r", TEST_ROLE_NAME_1, "-g", "group1", "-conf", + confPath.getAbsolutePath() }; + SentryShellSolr.main(args); + + // validate the roles when group name is same case as above + args = new String[] { "-lr", "-g", "group1", "-conf", confPath.getAbsolutePath() }; + SentryShellSolr sentryShell = new SentryShellSolr(); + Set roleNames = getShellResultWithOSRedirect(sentryShell, args, true); + validateRoleNames(roleNames, TEST_ROLE_NAME_1); + + // roles should be empty when group name is different case than above + args = new String[] { "-lr", "-g", "GROUP1", "-conf", confPath.getAbsolutePath() }; + roleNames = getShellResultWithOSRedirect(sentryShell, args, true); + validateRoleNames(roleNames); + } + }); + } + + public static String grant(boolean shortOption) { + return shortOption ? "-gpr" : "--grant_privilege_role"; + } + + public static String revoke(boolean shortOption) { + return shortOption ? "-rpr" : "--revoke_privilege_role"; + } + + public static String list(boolean shortOption) { + return shortOption ? "-lp" : "--list_privilege"; + } + + private void assertGrantRevokePrivilege(final boolean shortOption) throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + // create the role for test + client.createRole(requestorName, TEST_ROLE_NAME_1, SOLR); + client.createRole(requestorName, TEST_ROLE_NAME_2, SOLR); + + String [] privs = { + "Collection=*->action=*", + "Collection=collection2->action=update", + "Collection=collection3->action=query", + }; + for (int i = 0; i < privs.length; ++i) { + // test: grant privilege to role + String [] args = new String [] { grant(shortOption), "-r", TEST_ROLE_NAME_1, "-p", + privs[ i ], + "-conf", confPath.getAbsolutePath() }; + SentryShellSolr.main(args); + } + + // test the list privilege + String [] args = new String[] { list(shortOption), "-r", TEST_ROLE_NAME_1, "-conf", confPath.getAbsolutePath() }; + SentryShellSolr sentryShell = new SentryShellSolr(); + Set privilegeStrs = getShellResultWithOSRedirect(sentryShell, args, true); + assertEquals("Incorrect number of privileges", privs.length, privilegeStrs.size()); + for (int i = 0; i < privs.length; ++i) { + assertTrue("Expected privilege: " + privs[ i ], privilegeStrs.contains(privs[ i ])); + } + + for (int i = 0; i < privs.length; ++i) { + args = new String[] { revoke(shortOption), "-r", TEST_ROLE_NAME_1, "-p", + privs[ i ], "-conf", + confPath.getAbsolutePath() }; + SentryShellSolr.main(args); + Set privileges = client.listPrivilegesByRoleName(requestorName, + TEST_ROLE_NAME_1, SOLR, service); + assertEquals("Incorrect number of privileges", privs.length - (i + 1), privileges.size()); + } + + // clear the test data + client.dropRole(requestorName, TEST_ROLE_NAME_1, SOLR); + client.dropRole(requestorName, TEST_ROLE_NAME_2, SOLR); + } + }); + } + + + @Test + public void testGrantRevokePrivilegeWithShortOption() throws Exception { + assertGrantRevokePrivilege(true); + } + + @Test + public void testGrantRevokePrivilegeWithLongOption() throws Exception { + assertGrantRevokePrivilege(false); + } + + + @Test + public void testNegativeCaseWithInvalidArgument() throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + client.createRole(requestorName, TEST_ROLE_NAME_1, SOLR); + // test: create duplicate role with -cr + String[] args = { "-cr", "-r", TEST_ROLE_NAME_1, "-conf", confPath.getAbsolutePath() }; + SentryShellSolr sentryShell = new SentryShellSolr(); + try { + sentryShell.executeShell(args); + fail("Exception should be thrown for creating duplicate role"); + } catch (SentryUserException e) { + // expected exception + } + + // test: drop non-exist role with -dr + args = new String[] { "-dr", "-r", TEST_ROLE_NAME_2, "-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellSolr(); + try { + sentryShell.executeShell(args); + fail("Exception should be thrown for dropping non-exist role"); + } catch (SentryUserException e) { + // excepted exception + } + + // test: add non-exist role to group with -arg + args = new String[] { "-arg", "-r", TEST_ROLE_NAME_2, "-g", "testGroup1", "-conf", + confPath.getAbsolutePath() }; + sentryShell = new SentryShellSolr(); + try { + sentryShell.executeShell(args); + fail("Exception should be thrown for granting non-exist role to group"); + } catch (SentryUserException e) { + // excepted exception + } + + // test: drop group from non-exist role with -drg + args = new String[] { "-drg", "-r", TEST_ROLE_NAME_2, "-g", "testGroup1", "-conf", + confPath.getAbsolutePath() }; + sentryShell = new SentryShellSolr(); + try { + sentryShell.executeShell(args); + fail("Exception should be thrown for drop group from non-exist role"); + } catch (SentryUserException e) { + // excepted exception + } + + // test: grant privilege to role with the error privilege format + args = new String[] { "-gpr", "-r", TEST_ROLE_NAME_1, "-p", "serverserver1->action=*", + "-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellSolr(); + try { + sentryShell.executeShell(args); + fail("Exception should be thrown for the error privilege format, invalid key value."); + } catch (IllegalArgumentException e) { + // excepted exception + } + + // test: grant privilege to role with the error privilege hierarchy + args = new String[] { "-gpr", "-r", TEST_ROLE_NAME_1, "-p", + "server=server1->table=tbl1->column=col2->action=insert", "-conf", + confPath.getAbsolutePath() }; + sentryShell = new SentryShellSolr(); + try { + sentryShell.executeShell(args); + fail("Exception should be thrown for the error privilege format, invalid key value."); + } catch (IllegalArgumentException e) { + // expected exception + } + + // clear the test data + client.dropRole(requestorName, TEST_ROLE_NAME_1, SOLR); + } + }); + } + + @Test + public void testNegativeCaseWithoutRequiredArgument() throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + String strOptionConf = "conf"; + client.createRole(requestorName, TEST_ROLE_NAME_1, SOLR); + // test: the conf is required argument + String[] args = { "-cr", "-r", TEST_ROLE_NAME_1 }; + SentryShellSolr sentryShell = new SentryShellSolr(); + validateMissingParameterMsg(sentryShell, args, + SentryShellCommon.PREFIX_MESSAGE_MISSING_OPTION + strOptionConf); + + // test: -r is required when create role + args = new String[] { "-cr", "-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellSolr(); + validateMissingParameterMsg(sentryShell, args, + SentryShellCommon.PREFIX_MESSAGE_MISSING_OPTION + SentryShellCommon.OPTION_DESC_ROLE_NAME); + + // test: -r is required when drop role + args = new String[] { "-dr", "-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellSolr(); + validateMissingParameterMsg(sentryShell, args, + SentryShellCommon.PREFIX_MESSAGE_MISSING_OPTION + SentryShellCommon.OPTION_DESC_ROLE_NAME); + + // test: -r is required when add role to group + args = new String[] { "-arg", "-g", "testGroup1", "-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellSolr(); + validateMissingParameterMsg(sentryShell, args, + SentryShellCommon.PREFIX_MESSAGE_MISSING_OPTION + SentryShellCommon.OPTION_DESC_ROLE_NAME); + + // test: -g is required when add role to group + args = new String[] { "-arg", "-r", TEST_ROLE_NAME_2, "-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellSolr(); + validateMissingParameterMsg(sentryShell, args, + SentryShellCommon.PREFIX_MESSAGE_MISSING_OPTION + SentryShellCommon.OPTION_DESC_GROUP_NAME); + + // test: -r is required when delete role from group + args = new String[] { "-drg", "-g", "testGroup1", "-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellSolr(); + validateMissingParameterMsg(sentryShell, args, + SentryShellCommon.PREFIX_MESSAGE_MISSING_OPTION + SentryShellCommon.OPTION_DESC_ROLE_NAME); + + // test: -g is required when delete role from group + args = new String[] { "-drg", "-r", TEST_ROLE_NAME_2, "-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellSolr(); + validateMissingParameterMsg(sentryShell, args, + SentryShellCommon.PREFIX_MESSAGE_MISSING_OPTION + SentryShellCommon.OPTION_DESC_GROUP_NAME); + + // test: -r is required when grant privilege to role + args = new String[] { "-gpr", "-p", "server=server1", "-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellSolr(); + validateMissingParameterMsg(sentryShell, args, + SentryShellCommon.PREFIX_MESSAGE_MISSING_OPTION + SentryShellCommon.OPTION_DESC_ROLE_NAME); + + // test: -p is required when grant privilege to role + args = new String[] { "-gpr", "-r", TEST_ROLE_NAME_1, "-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellSolr(); + validateMissingParameterMsg(sentryShell, args, + SentryShellCommon.PREFIX_MESSAGE_MISSING_OPTION + SentryShellCommon.OPTION_DESC_PRIVILEGE); + + // test: action is required in privilege + args = new String[] { "-gpr", "-r", TEST_ROLE_NAME_1, "-conf", confPath.getAbsolutePath(), "-p", "collection=collection1" }; + sentryShell = new SentryShellSolr(); + try { + getShellResultWithOSRedirect(sentryShell, args, false); + fail("Expected IllegalArgumentException"); + } catch (IllegalArgumentException e) { + assert("Privilege is invalid: action required but not specified.".equals(e.getMessage())); + } + + // test: -r is required when revoke privilege from role + args = new String[] { "-rpr", "-p", "server=server1", "-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellSolr(); + validateMissingParameterMsg(sentryShell, args, + SentryShellCommon.PREFIX_MESSAGE_MISSING_OPTION + SentryShellCommon.OPTION_DESC_ROLE_NAME); + + // test: -p is required when revoke privilege from role + args = new String[] { "-rpr", "-r", TEST_ROLE_NAME_1, "-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellSolr(); + validateMissingParameterMsg(sentryShell, args, + SentryShellCommon.PREFIX_MESSAGE_MISSING_OPTION + SentryShellCommon.OPTION_DESC_PRIVILEGE); + + // test: command option is required for shell + args = new String[] {"-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellSolr(); + validateMissingParameterMsgsContains(sentryShell, args, + SentryShellCommon.PREFIX_MESSAGE_MISSING_OPTION + "[", + "-arg Add role to group", + "-cr Create role", + "-rpr Revoke privilege from role", + "-drg Delete role from group", + "-lr List role", + "-lp List privilege", + "-gpr Grant privilege to role", + "-dr Drop role"); + + // clear the test data + client.dropRole(requestorName, TEST_ROLE_NAME_1, SOLR); + } + }); + } + + // redirect the System.out to ByteArrayOutputStream, then execute the command and parse the result. + private Set getShellResultWithOSRedirect(SentryShellSolr sentryShell, + String[] args, boolean expectedExecuteResult) throws Exception { + PrintStream oldOut = System.out; + ByteArrayOutputStream outContent = new ByteArrayOutputStream(); + System.setOut(new PrintStream(outContent)); + assertEquals(expectedExecuteResult, sentryShell.executeShell(args)); + Set resultSet = Sets.newHashSet(outContent.toString().split("\n")); + System.setOut(oldOut); + return resultSet; + } + + private void validateRoleNames(Set roleNames, String ... expectedRoleNames) { + if (expectedRoleNames != null && expectedRoleNames.length > 0) { + assertEquals("Found: " + roleNames.size() + " roles, expected: " + expectedRoleNames.length, + expectedRoleNames.length, roleNames.size()); + Set lowerCaseRoles = new HashSet(); + for (String role : roleNames) { + lowerCaseRoles.add(role.toLowerCase()); + } + + for (String expectedRole : expectedRoleNames) { + assertTrue("Expected role: " + expectedRole, + lowerCaseRoles.contains(expectedRole.toLowerCase())); + } + } + } + + private void validateMissingParameterMsg(SentryShellSolr sentryShell, String[] args, + String expectedErrorMsg) throws Exception { + Set errorMsgs = getShellResultWithOSRedirect(sentryShell, args, false); + assertTrue("Expected error message: " + expectedErrorMsg, errorMsgs.contains(expectedErrorMsg)); + } + + private void validateMissingParameterMsgsContains(SentryShellSolr sentryShell, String[] args, + String ... expectedErrorMsgsContains) throws Exception { + Set errorMsgs = getShellResultWithOSRedirect(sentryShell, args, false); + boolean foundAllMessages = false; + Iterator it = errorMsgs.iterator(); + while (it.hasNext()) { + String errorMessage = it.next(); + boolean missingExpected = false; + for (String expectedContains : expectedErrorMsgsContains) { + if (!errorMessage.contains(expectedContains)) { + missingExpected = true; + break; + } + } + if (!missingExpected) { + foundAllMessages = true; + break; + } + } + assertTrue(foundAllMessages); + } +} diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/log/appender/TestRollingFileWithoutDeleteAppender.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/log/appender/TestRollingFileWithoutDeleteAppender.java index 15393da84..ca9062bca 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/log/appender/TestRollingFileWithoutDeleteAppender.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/log/appender/TestRollingFileWithoutDeleteAppender.java @@ -18,13 +18,12 @@ package org.apache.sentry.provider.db.log.appender; -import static junit.framework.Assert.assertEquals; -import static junit.framework.Assert.fail; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; +import static org.junit.Assert.assertTrue; import java.io.File; -import junit.framework.ComparisonFailure; - import org.apache.commons.io.FileUtils; import org.apache.log4j.Logger; import org.apache.log4j.PatternLayout; @@ -74,6 +73,30 @@ public void testRollOver() throws Throwable { } + /*** + * Generate log enough to cause a single rollover. Verify the file name format + * @throws Throwable + */ + @Test + public void testFileNamePattern() throws Throwable { + if (dataDir == null) { + fail("Excepted temp folder for audit log is created."); + } + RollingFileWithoutDeleteAppender appender = new RollingFileWithoutDeleteAppender( + new PatternLayout("%m%n"), dataDir.getPath() + "/auditLog.log"); + appender.setMaximumFileSize(10); + sentryLogger.addAppender(appender); + sentryLogger.debug("123456789012345"); + File[] files = dataDir.listFiles(); + if (files != null) { + assertEquals(files.length, 2); + assertTrue(files[0].getName().contains("auditLog.log.")); + assertTrue(files[1].getName().contains("auditLog.log.")); + } else { + fail("Excepted 2 log files."); + } + } + @After public void destroy() { if (dataDir != null) { diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/log/entity/TestAuditMetadataLogEntity.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/log/entity/TestDbAuditMetadataLogEntity.java similarity index 87% rename from sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/log/entity/TestAuditMetadataLogEntity.java rename to sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/log/entity/TestDbAuditMetadataLogEntity.java index 95b51e9a4..3d336af94 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/log/entity/TestAuditMetadataLogEntity.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/log/entity/TestDbAuditMetadataLogEntity.java @@ -18,21 +18,21 @@ package org.apache.sentry.provider.db.log.entity; -import junit.framework.TestCase; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; import org.apache.sentry.provider.db.log.util.Constants; import org.codehaus.jackson.JsonNode; import org.codehaus.jackson.node.ContainerNode; import org.junit.Test; -public class TestAuditMetadataLogEntity extends TestCase { +public class TestDbAuditMetadataLogEntity { @Test public void testToJsonFormatLog() throws Throwable { - AuditMetadataLogEntity amle = new AuditMetadataLogEntity("serviceName", - "userName", "impersonator", "ipAddress", "operation", "eventTime", - "operationText", "allowed", "databaseName", "tableName", "columnName", - "resourcePath", "objectType"); + DBAuditMetadataLogEntity amle = new DBAuditMetadataLogEntity("serviceName", "userName", + "impersonator", "ipAddress", "operation", "eventTime", "operationText", "allowed", + "objectType", "component", "databaseName", "tableName", "columnName", "resourcePath"); String jsonAuditLog = amle.toJsonFormatLog(); ContainerNode rootNode = AuditMetadataLogEntity.parse(jsonAuditLog); assertEntryEquals(rootNode, Constants.LOG_FIELD_SERVICE_NAME, "serviceName"); diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/log/entity/TestGMAuditMetadataLogEntity.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/log/entity/TestGMAuditMetadataLogEntity.java new file mode 100644 index 000000000..bbee1b489 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/log/entity/TestGMAuditMetadataLogEntity.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.provider.db.log.entity; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.sentry.provider.db.log.util.Constants; +import org.codehaus.jackson.JsonNode; +import org.codehaus.jackson.node.ContainerNode; +import org.junit.Test; + +public class TestGMAuditMetadataLogEntity { + @Test + public void testToJsonFormatLog() throws Throwable { + + Map privilegesMap = new HashMap(); + privilegesMap.put("resourceType1", "resourceName1"); + privilegesMap.put("resourceType2", "resourceName2"); + privilegesMap.put("resourceType3", "resourceName3"); + privilegesMap.put("resourceType4", "resourceName4"); + GMAuditMetadataLogEntity gmamle = new GMAuditMetadataLogEntity("serviceName", "userName", + "impersonator", "ipAddress", "operation", "eventTime", "operationText", "allowed", + "objectType", "component", privilegesMap); + String jsonAuditLog = gmamle.toJsonFormatLog(); + ContainerNode rootNode = AuditMetadataLogEntity.parse(jsonAuditLog); + assertEntryEquals(rootNode, Constants.LOG_FIELD_SERVICE_NAME, "serviceName"); + assertEntryEquals(rootNode, Constants.LOG_FIELD_USER_NAME, "userName"); + assertEntryEquals(rootNode, Constants.LOG_FIELD_IMPERSONATOR, "impersonator"); + assertEntryEquals(rootNode, Constants.LOG_FIELD_IP_ADDRESS, "ipAddress"); + assertEntryEquals(rootNode, Constants.LOG_FIELD_OPERATION, "operation"); + assertEntryEquals(rootNode, Constants.LOG_FIELD_EVENT_TIME, "eventTime"); + assertEntryEquals(rootNode, Constants.LOG_FIELD_OPERATION_TEXT, "operationText"); + assertEntryEquals(rootNode, Constants.LOG_FIELD_ALLOWED, "allowed"); + assertEntryEquals(rootNode, Constants.LOG_FIELD_OBJECT_TYPE, "objectType"); + assertEntryEquals(rootNode, Constants.LOG_FIELD_COMPONENT, "component"); + assertEntryEquals(rootNode, "resourceType1", "resourceName1"); + assertEntryEquals(rootNode, "resourceType2", "resourceName2"); + assertEntryEquals(rootNode, "resourceType3", "resourceName3"); + assertEntryEquals(rootNode, "resourceType4", "resourceName4"); + } + + void assertEntryEquals(ContainerNode rootNode, String key, String value) { + JsonNode node = assertNodeContains(rootNode, key); + assertEquals(value, node.getTextValue()); + } + + private JsonNode assertNodeContains(ContainerNode rootNode, String key) { + JsonNode node = rootNode.get(key); + if (node == null) { + fail("No entry of name \"" + key + "\" found in " + rootNode.toString()); + } + return node; + } +} diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/log/entity/TestJsonLogEntityFactory.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/log/entity/TestJsonLogEntityFactory.java index bce471792..1ec884041 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/log/entity/TestJsonLogEntityFactory.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/log/entity/TestJsonLogEntityFactory.java @@ -18,18 +18,14 @@ package org.apache.sentry.provider.db.log.entity; -import static junit.framework.Assert.assertEquals; +import static org.junit.Assert.assertEquals; -import java.util.HashSet; import java.util.LinkedHashSet; import java.util.Set; import org.apache.hadoop.conf.Configuration; -import org.apache.log4j.Logger; import org.apache.sentry.core.model.db.AccessConstants; -import org.apache.sentry.provider.db.log.util.CommandUtil; import org.apache.sentry.provider.db.log.util.Constants; -import org.apache.sentry.provider.db.service.model.MSentryPrivilege; import org.apache.sentry.provider.db.service.thrift.TAlterSentryRoleAddGroupsRequest; import org.apache.sentry.provider.db.service.thrift.TAlterSentryRoleAddGroupsResponse; import org.apache.sentry.provider.db.service.thrift.TAlterSentryRoleDeleteGroupsRequest; @@ -44,6 +40,7 @@ import org.apache.sentry.provider.db.service.thrift.TDropSentryRoleResponse; import org.apache.sentry.provider.db.service.thrift.TSentryGroup; import org.apache.sentry.provider.db.service.thrift.TSentryPrivilege; +import org.apache.sentry.provider.db.service.thrift.ThriftUtil; import org.apache.sentry.service.thrift.ServiceConstants.PrivilegeScope; import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; import org.apache.sentry.service.thrift.Status; @@ -55,7 +52,6 @@ public class TestJsonLogEntityFactory { private static Configuration conf; - private Logger sentryLogger = Logger.getRootLogger(); private static String TEST_IP = "localhost/127.0.0.1"; private static String TEST_IMPERSONATOR = "impersonator"; @@ -70,8 +66,8 @@ public static void init() { conf = new Configuration(); conf.set(ServerConfig.SENTRY_SERVICE_NAME, ServerConfig.SENTRY_SERVICE_NAME_DEFAULT); - CommandUtil.setIpAddress(TEST_IP); - CommandUtil.setImpersonator(TEST_IMPERSONATOR); + ThriftUtil.setIpAddress(TEST_IP); + ThriftUtil.setImpersonator(TEST_IMPERSONATOR); } @Test @@ -81,18 +77,16 @@ public void testCreateRole() { request.setRequestorUserName(TEST_USER_NAME); request.setRoleName(TEST_ROLE_NAME); response.setStatus(Status.OK()); - AuditMetadataLogEntity amle = (AuditMetadataLogEntity) JsonLogEntityFactory + DBAuditMetadataLogEntity amle = (DBAuditMetadataLogEntity) JsonLogEntityFactory .getInstance().createJsonLogEntity(request, response, conf); assertCommon(amle, Constants.TRUE, Constants.OPERATION_CREATE_ROLE, "CREATE ROLE testRole", null, null, null, Constants.OBJECT_TYPE_ROLE); - sentryLogger.debug(amle.toJsonFormatLog()); response.setStatus(Status.InvalidInput("", null)); - amle = (AuditMetadataLogEntity) JsonLogEntityFactory.getInstance() + amle = (DBAuditMetadataLogEntity) JsonLogEntityFactory.getInstance() .createJsonLogEntity(request, response, conf); assertCommon(amle, Constants.FALSE, Constants.OPERATION_CREATE_ROLE, "CREATE ROLE testRole", null, null, null, Constants.OBJECT_TYPE_ROLE); - sentryLogger.debug(amle.toJsonFormatLog()); } @Test @@ -102,18 +96,16 @@ public void testDropRole() { request.setRequestorUserName(TEST_USER_NAME); request.setRoleName(TEST_ROLE_NAME); response.setStatus(Status.OK()); - AuditMetadataLogEntity amle = (AuditMetadataLogEntity) JsonLogEntityFactory + DBAuditMetadataLogEntity amle = (DBAuditMetadataLogEntity) JsonLogEntityFactory .getInstance().createJsonLogEntity(request, response, conf); assertCommon(amle, Constants.TRUE, Constants.OPERATION_DROP_ROLE, "DROP ROLE testRole", null, null, null, Constants.OBJECT_TYPE_ROLE); - sentryLogger.debug(amle.toJsonFormatLog()); response.setStatus(Status.InvalidInput("", null)); - amle = (AuditMetadataLogEntity) JsonLogEntityFactory.getInstance() + amle = (DBAuditMetadataLogEntity) JsonLogEntityFactory.getInstance() .createJsonLogEntity(request, response, conf); assertCommon(amle, Constants.FALSE, Constants.OPERATION_DROP_ROLE, "DROP ROLE testRole", null, null, null, Constants.OBJECT_TYPE_ROLE); - sentryLogger.debug(amle.toJsonFormatLog()); } @Test @@ -130,18 +122,15 @@ public void testGrantRole() { privileges.add(privilege); request.setPrivileges(privileges); response.setStatus(Status.OK()); - AuditMetadataLogEntity amle = new AuditMetadataLogEntity(); + DBAuditMetadataLogEntity amle = new DBAuditMetadataLogEntity(); Set amles = JsonLogEntityFactory .getInstance().createJsonLogEntitys(request, response, conf); assertEquals(amles.size(),1); - for (JsonLogEntity amle1 : amles) { - amle = (AuditMetadataLogEntity) amle1; - break; - } + amle = (DBAuditMetadataLogEntity) amles.iterator().next(); + assertCommon(amle, Constants.TRUE, Constants.OPERATION_GRANT_PRIVILEGE, "GRANT ALL ON DATABASE testDB TO ROLE testRole", TEST_DATABASE_NAME, null, null, Constants.OBJECT_TYPE_PRINCIPAL); - sentryLogger.debug(amle.toJsonFormatLog()); privilege = getPrivilege(AccessConstants.ALL, PrivilegeScope.TABLE.name(), null, TEST_TABLE_NAME, null, null); @@ -152,14 +141,11 @@ public void testGrantRole() { amles = JsonLogEntityFactory.getInstance() .createJsonLogEntitys(request, response, conf); assertEquals(amles.size(),1); - for (JsonLogEntity amle1 : amles) { - amle = (AuditMetadataLogEntity) amle1; - break; - } + amle = (DBAuditMetadataLogEntity) amles.iterator().next(); + assertCommon(amle, Constants.FALSE, Constants.OPERATION_GRANT_PRIVILEGE, "GRANT ALL ON TABLE testTable TO ROLE testRole", null, TEST_TABLE_NAME, null, Constants.OBJECT_TYPE_PRINCIPAL); - sentryLogger.debug(amle.toJsonFormatLog()); } @Test @@ -175,18 +161,15 @@ public void testRevokeRole() { privileges.add(privilege); request.setPrivileges(privileges); response.setStatus(Status.OK()); - AuditMetadataLogEntity amle = new AuditMetadataLogEntity(); + DBAuditMetadataLogEntity amle = new DBAuditMetadataLogEntity(); Set amles = JsonLogEntityFactory .getInstance().createJsonLogEntitys(request, response, conf); assertEquals(amles.size(),1); - for (JsonLogEntity amle1 : amles) { - amle = (AuditMetadataLogEntity) amle1; - break; - } + amle = (DBAuditMetadataLogEntity) amles.iterator().next(); + assertCommon(amle, Constants.TRUE, Constants.OPERATION_REVOKE_PRIVILEGE, "REVOKE ALL ON DATABASE testDB FROM ROLE testRole", TEST_DATABASE_NAME, null, null, Constants.OBJECT_TYPE_PRINCIPAL); - sentryLogger.debug(amle.toJsonFormatLog()); privilege = getPrivilege(AccessConstants.ALL, PrivilegeScope.TABLE.name(), null, TEST_TABLE_NAME, null, null); @@ -197,14 +180,11 @@ public void testRevokeRole() { amles = JsonLogEntityFactory.getInstance() .createJsonLogEntitys(request, response, conf); assertEquals(amles.size(),1); - for (JsonLogEntity amle1 : amles) { - amle = (AuditMetadataLogEntity) amle1; - break; - } + amle = (DBAuditMetadataLogEntity) amles.iterator().next(); + assertCommon(amle, Constants.FALSE, Constants.OPERATION_REVOKE_PRIVILEGE, "REVOKE ALL ON TABLE testTable FROM ROLE testRole", null, TEST_TABLE_NAME, null, Constants.OBJECT_TYPE_PRINCIPAL); - sentryLogger.debug(amle.toJsonFormatLog()); } @Test @@ -215,20 +195,18 @@ public void testAddRole() { request.setRoleName(TEST_ROLE_NAME); request.setGroups(getGroups()); response.setStatus(Status.OK()); - AuditMetadataLogEntity amle = (AuditMetadataLogEntity) JsonLogEntityFactory + DBAuditMetadataLogEntity amle = (DBAuditMetadataLogEntity) JsonLogEntityFactory .getInstance().createJsonLogEntity(request, response, conf); assertCommon(amle, Constants.TRUE, Constants.OPERATION_ADD_ROLE, "GRANT ROLE testRole TO GROUP testGroup", null, null, null, Constants.OBJECT_TYPE_ROLE); - sentryLogger.debug(amle.toJsonFormatLog()); response.setStatus(Status.InvalidInput("", null)); - amle = (AuditMetadataLogEntity) JsonLogEntityFactory.getInstance() + amle = (DBAuditMetadataLogEntity) JsonLogEntityFactory.getInstance() .createJsonLogEntity(request, response, conf); assertCommon(amle, Constants.FALSE, Constants.OPERATION_ADD_ROLE, "GRANT ROLE testRole TO GROUP testGroup", null, null, null, Constants.OBJECT_TYPE_ROLE); - sentryLogger.debug(amle.toJsonFormatLog()); } @Test @@ -239,23 +217,21 @@ public void testDeleteRole() { request.setRoleName(TEST_ROLE_NAME); request.setGroups(getGroups()); response.setStatus(Status.OK()); - AuditMetadataLogEntity amle = (AuditMetadataLogEntity) JsonLogEntityFactory + DBAuditMetadataLogEntity amle = (DBAuditMetadataLogEntity) JsonLogEntityFactory .getInstance().createJsonLogEntity(request, response, conf); assertCommon(amle, Constants.TRUE, Constants.OPERATION_DELETE_ROLE, "REVOKE ROLE testRole FROM GROUP testGroup", null, null, null, Constants.OBJECT_TYPE_ROLE); - sentryLogger.debug(amle.toJsonFormatLog()); response.setStatus(Status.InvalidInput("", null)); - amle = (AuditMetadataLogEntity) JsonLogEntityFactory.getInstance() + amle = (DBAuditMetadataLogEntity) JsonLogEntityFactory.getInstance() .createJsonLogEntity(request, response, conf); assertCommon(amle, Constants.FALSE, Constants.OPERATION_DELETE_ROLE, "REVOKE ROLE testRole FROM GROUP testGroup", null, null, null, Constants.OBJECT_TYPE_ROLE); - sentryLogger.debug(amle.toJsonFormatLog()); } - private void assertCommon(AuditMetadataLogEntity amle, + private void assertCommon(DBAuditMetadataLogEntity amle, String allowedExcepted, String operationExcepted, String operationTextExcepted, String databaseNameExcepted, String tableNameExcepted, String resourcePathExcepted, @@ -274,37 +250,6 @@ private void assertCommon(AuditMetadataLogEntity amle, assertEquals(objectTypeExcepted, amle.getObjectType()); } - // private TAlterSentryRoleGrantPrivilegeRequest getGrantPrivilegeRequest() { - // TAlterSentryRoleGrantPrivilegeRequest request = new - // TAlterSentryRoleGrantPrivilegeRequest(); - // request.setRoleName(TEST_ROLE_NAME); - // return request; - // } - // - // private TAlterSentryRoleGrantPrivilegeResponse getGrantPrivilegeResponse( - // TSentryResponseStatus status) { - // TAlterSentryRoleGrantPrivilegeResponse response = new - // TAlterSentryRoleGrantPrivilegeResponse(); - // response.setStatus(status); - // return response; - // } - - // private TAlterSentryRoleRevokePrivilegeRequest getRevokePrivilegeRequest() - // { - // TAlterSentryRoleRevokePrivilegeRequest request = new - // TAlterSentryRoleRevokePrivilegeRequest(); - // request.setRoleName(TEST_ROLE_NAME); - // return request; - // } - // - // private TAlterSentryRoleRevokePrivilegeResponse getRevokePrivilegeResponse( - // TSentryResponseStatus status) { - // TAlterSentryRoleRevokePrivilegeResponse response = new - // TAlterSentryRoleRevokePrivilegeResponse(); - // response.setStatus(status); - // return response; - // } - private TSentryPrivilege getPrivilege(String action, String privilegeScope, String dbName, String tableName, String serverName, String URI) { TSentryPrivilege privilege = new TSentryPrivilege(); diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/log/entity/TestJsonLogEntityFactoryGM.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/log/entity/TestJsonLogEntityFactoryGM.java new file mode 100644 index 000000000..dfae5abaf --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/log/entity/TestJsonLogEntityFactoryGM.java @@ -0,0 +1,259 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.provider.db.log.entity; + +import static org.junit.Assert.assertEquals; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.conf.Configuration; +import org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleAddGroupsRequest; +import org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleAddGroupsResponse; +import org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleDeleteGroupsRequest; +import org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleDeleteGroupsResponse; +import org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleGrantPrivilegeRequest; +import org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleGrantPrivilegeResponse; +import org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleRevokePrivilegeRequest; +import org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleRevokePrivilegeResponse; +import org.apache.sentry.provider.db.generic.service.thrift.TAuthorizable; +import org.apache.sentry.provider.db.generic.service.thrift.TCreateSentryRoleRequest; +import org.apache.sentry.provider.db.generic.service.thrift.TCreateSentryRoleResponse; +import org.apache.sentry.provider.db.generic.service.thrift.TDropSentryRoleRequest; +import org.apache.sentry.provider.db.generic.service.thrift.TDropSentryRoleResponse; +import org.apache.sentry.provider.db.generic.service.thrift.TSentryPrivilege; +import org.apache.sentry.provider.db.log.util.Constants; +import org.apache.sentry.provider.db.service.thrift.ThriftUtil; +import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; +import org.apache.sentry.service.thrift.Status; +import org.junit.BeforeClass; +import org.junit.Test; + +public class TestJsonLogEntityFactoryGM { + + private static Configuration conf; + private static String TEST_IP = "localhost/127.0.0.1"; + private static String TEST_IMPERSONATOR = "impersonator"; + private static String TEST_ROLE_NAME = "testRole"; + private static String TEST_USER_NAME = "requestUser"; + private static String TEST_GROUP = "testGroup"; + private static String TEST_ACTION = "action"; + private static String TEST_COMPONENT = "component"; + private static Map TEST_PRIVILEGES_MAP = new HashMap(); + + @BeforeClass + public static void init() { + conf = new Configuration(); + conf.set(ServerConfig.SENTRY_SERVICE_NAME, ServerConfig.SENTRY_SERVICE_NAME_DEFAULT); + ThriftUtil.setIpAddress(TEST_IP); + ThriftUtil.setImpersonator(TEST_IMPERSONATOR); + TEST_PRIVILEGES_MAP.put("resourceType1", "resourceName1"); + TEST_PRIVILEGES_MAP.put("resourceType2", "resourceName2"); + TEST_PRIVILEGES_MAP.put("resourceType3", "resourceName3"); + } + + @Test + public void testCreateRole() { + TCreateSentryRoleRequest request = new TCreateSentryRoleRequest(); + TCreateSentryRoleResponse response = new TCreateSentryRoleResponse(); + request.setRequestorUserName(TEST_USER_NAME); + request.setRoleName(TEST_ROLE_NAME); + response.setStatus(Status.OK()); + GMAuditMetadataLogEntity amle = (GMAuditMetadataLogEntity) JsonLogEntityFactory.getInstance() + .createJsonLogEntity(request, response, conf); + assertCommon(amle, Constants.TRUE, Constants.OPERATION_CREATE_ROLE, "CREATE ROLE testRole", + Constants.OBJECT_TYPE_ROLE, new HashMap()); + + response.setStatus(Status.InvalidInput("", null)); + amle = (GMAuditMetadataLogEntity) JsonLogEntityFactory.getInstance().createJsonLogEntity( + request, response, conf); + assertCommon(amle, Constants.FALSE, Constants.OPERATION_CREATE_ROLE, "CREATE ROLE testRole", + Constants.OBJECT_TYPE_ROLE, new HashMap()); + } + + @Test + public void testDropRole() { + TDropSentryRoleRequest request = new TDropSentryRoleRequest(); + TDropSentryRoleResponse response = new TDropSentryRoleResponse(); + request.setRequestorUserName(TEST_USER_NAME); + request.setRoleName(TEST_ROLE_NAME); + response.setStatus(Status.OK()); + GMAuditMetadataLogEntity amle = (GMAuditMetadataLogEntity) JsonLogEntityFactory + .getInstance().createJsonLogEntity(request, response, conf); + assertCommon(amle, Constants.TRUE, Constants.OPERATION_DROP_ROLE, "DROP ROLE testRole", + Constants.OBJECT_TYPE_ROLE, new HashMap()); + + response.setStatus(Status.InvalidInput("", null)); + amle = (GMAuditMetadataLogEntity) JsonLogEntityFactory.getInstance().createJsonLogEntity( + request, response, conf); + assertCommon(amle, Constants.FALSE, Constants.OPERATION_DROP_ROLE, "DROP ROLE testRole", + Constants.OBJECT_TYPE_ROLE, new HashMap()); + } + + @Test + public void testGrantRole() { + TAlterSentryRoleGrantPrivilegeRequest request = new TAlterSentryRoleGrantPrivilegeRequest(); + request.setRequestorUserName(TEST_USER_NAME); + request.setRoleName(TEST_ROLE_NAME); + + TAlterSentryRoleGrantPrivilegeResponse response = new TAlterSentryRoleGrantPrivilegeResponse(); + + TSentryPrivilege privilege = getPrivilege(); + request.setPrivilege(privilege); + response.setStatus(Status.OK()); + GMAuditMetadataLogEntity amle = (GMAuditMetadataLogEntity) JsonLogEntityFactory.getInstance() + .createJsonLogEntity( + request, response, conf); + assertCommon( + amle, + Constants.TRUE, + Constants.OPERATION_GRANT_PRIVILEGE, + "GRANT ACTION ON resourceType1 resourceName1 resourceType2 resourceName2 resourceType3 resourceName3 TO ROLE testRole", + Constants.OBJECT_TYPE_PRINCIPAL, TEST_PRIVILEGES_MAP); + + response.setStatus(Status.InvalidInput("", null)); + amle = (GMAuditMetadataLogEntity) JsonLogEntityFactory.getInstance().createJsonLogEntity( + request, response, conf); + assertCommon( + amle, + Constants.FALSE, + Constants.OPERATION_GRANT_PRIVILEGE, + "GRANT ACTION ON resourceType1 resourceName1 resourceType2 resourceName2 resourceType3 resourceName3 TO ROLE testRole", + Constants.OBJECT_TYPE_PRINCIPAL, TEST_PRIVILEGES_MAP); + } + + @Test + public void testRevokeRole() { + TAlterSentryRoleRevokePrivilegeRequest request = new TAlterSentryRoleRevokePrivilegeRequest(); + TAlterSentryRoleRevokePrivilegeResponse response = new TAlterSentryRoleRevokePrivilegeResponse(); + request.setRequestorUserName(TEST_USER_NAME); + request.setRoleName(TEST_ROLE_NAME); + + TSentryPrivilege privilege = getPrivilege(); + request.setPrivilege(privilege); + response.setStatus(Status.OK()); + GMAuditMetadataLogEntity amle = (GMAuditMetadataLogEntity) JsonLogEntityFactory.getInstance() + .createJsonLogEntity(request, response, conf); + assertCommon( + amle, + Constants.TRUE, + Constants.OPERATION_REVOKE_PRIVILEGE, + "REVOKE ACTION ON resourceType1 resourceName1 resourceType2 resourceName2 resourceType3 resourceName3 FROM ROLE testRole", + Constants.OBJECT_TYPE_PRINCIPAL, TEST_PRIVILEGES_MAP); + + response.setStatus(Status.InvalidInput("", null)); + amle = (GMAuditMetadataLogEntity) JsonLogEntityFactory.getInstance().createJsonLogEntity( + request, response, conf); + + assertCommon( + amle, + Constants.FALSE, + Constants.OPERATION_REVOKE_PRIVILEGE, + "REVOKE ACTION ON resourceType1 resourceName1 resourceType2 resourceName2 resourceType3 resourceName3 FROM ROLE testRole", + Constants.OBJECT_TYPE_PRINCIPAL, TEST_PRIVILEGES_MAP); + } + + @Test + public void testAddRole() { + TAlterSentryRoleAddGroupsRequest request = new TAlterSentryRoleAddGroupsRequest(); + TAlterSentryRoleAddGroupsResponse response = new TAlterSentryRoleAddGroupsResponse(); + request.setRequestorUserName(TEST_USER_NAME); + request.setRoleName(TEST_ROLE_NAME); + request.setGroups(getGroups()); + response.setStatus(Status.OK()); + GMAuditMetadataLogEntity amle = (GMAuditMetadataLogEntity) JsonLogEntityFactory.getInstance() + .createJsonLogEntity(request, response, conf); + assertCommon(amle, Constants.TRUE, Constants.OPERATION_ADD_ROLE, + "GRANT ROLE testRole TO GROUP testGroup", Constants.OBJECT_TYPE_ROLE, + new HashMap()); + + response.setStatus(Status.InvalidInput("", null)); + amle = (GMAuditMetadataLogEntity) JsonLogEntityFactory.getInstance().createJsonLogEntity( + request, response, conf); + assertCommon(amle, Constants.FALSE, Constants.OPERATION_ADD_ROLE, + "GRANT ROLE testRole TO GROUP testGroup", Constants.OBJECT_TYPE_ROLE, + new HashMap()); + } + + @Test + public void testDeleteRole() { + TAlterSentryRoleDeleteGroupsRequest request = new TAlterSentryRoleDeleteGroupsRequest(); + TAlterSentryRoleDeleteGroupsResponse response = new TAlterSentryRoleDeleteGroupsResponse(); + request.setRequestorUserName(TEST_USER_NAME); + request.setRoleName(TEST_ROLE_NAME); + request.setGroups(getGroups()); + response.setStatus(Status.OK()); + GMAuditMetadataLogEntity amle = (GMAuditMetadataLogEntity) JsonLogEntityFactory + .getInstance().createJsonLogEntity(request, response, conf); + assertCommon(amle, Constants.TRUE, Constants.OPERATION_DELETE_ROLE, + "REVOKE ROLE testRole FROM GROUP testGroup", Constants.OBJECT_TYPE_ROLE, + new HashMap()); + + response.setStatus(Status.InvalidInput("", null)); + amle = (GMAuditMetadataLogEntity) JsonLogEntityFactory.getInstance().createJsonLogEntity( + request, response, conf); + assertCommon(amle, Constants.FALSE, Constants.OPERATION_DELETE_ROLE, + "REVOKE ROLE testRole FROM GROUP testGroup", Constants.OBJECT_TYPE_ROLE, + new HashMap()); + } + + private void assertCommon(GMAuditMetadataLogEntity amle, String allowedExcepted, + String operationExcepted, String operationTextExcepted, String objectTypeExcepted, + Map privilegesExcepted) { + assertEquals(ServerConfig.SENTRY_SERVICE_NAME_DEFAULT, amle.getServiceName()); + assertEquals(TEST_IP, amle.getIpAddress()); + assertEquals(TEST_USER_NAME, amle.getUserName()); + assertEquals(TEST_IMPERSONATOR, amle.getImpersonator()); + assertEquals(allowedExcepted, amle.getAllowed()); + assertEquals(operationExcepted, amle.getOperation()); + assertEquals(operationTextExcepted, amle.getOperationText()); + assertEquals(objectTypeExcepted, amle.getObjectType()); + assertPrivilegesMap(privilegesExcepted, amle.getPrivilegesMap()); + } + + private void assertPrivilegesMap(Map privilegesExcepted, + Map privilegesActual) { + assertEquals(privilegesExcepted.size(), privilegesActual.size()); + for (Map.Entry privilege : privilegesExcepted.entrySet()) { + assertEquals(privilege.getValue(), privilegesActual.get(privilege.getKey())); + } + } + + private TSentryPrivilege getPrivilege() { + TSentryPrivilege privilege = new TSentryPrivilege(); + privilege.setAction(TEST_ACTION); + privilege.setComponent(TEST_COMPONENT); + List authorizables = new ArrayList(); + authorizables.add(new TAuthorizable("resourceType1", "resourceName1")); + authorizables.add(new TAuthorizable("resourceType2", "resourceName2")); + authorizables.add(new TAuthorizable("resourceType3", "resourceName3")); + privilege.setAuthorizables(authorizables); + return privilege; + } + + private Set getGroups() { + Set groups = new HashSet(); + groups.add(TEST_GROUP); + return groups; + } +} diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/log/util/TestCommandUtil.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/log/util/TestCommandUtil.java index 0a2b0b23f..8620f6297 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/log/util/TestCommandUtil.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/log/util/TestCommandUtil.java @@ -18,25 +18,24 @@ package org.apache.sentry.provider.db.log.util; -import java.util.LinkedHashSet; +import java.util.ArrayList; +import java.util.List; import java.util.Set; -import junit.framework.TestCase; +import org.junit.Assert; import org.apache.sentry.core.model.db.AccessConstants; -import org.apache.sentry.provider.db.service.thrift.TAlterSentryRoleAddGroupsRequest; -import org.apache.sentry.provider.db.service.thrift.TAlterSentryRoleDeleteGroupsRequest; +import org.apache.sentry.provider.db.generic.service.thrift.TAuthorizable; import org.apache.sentry.provider.db.service.thrift.TAlterSentryRoleGrantPrivilegeRequest; import org.apache.sentry.provider.db.service.thrift.TAlterSentryRoleRevokePrivilegeRequest; import org.apache.sentry.provider.db.service.thrift.TSentryGrantOption; -import org.apache.sentry.provider.db.service.thrift.TSentryGroup; import org.apache.sentry.provider.db.service.thrift.TSentryPrivilege; import org.apache.sentry.service.thrift.ServiceConstants.PrivilegeScope; import org.junit.Test; import com.google.common.collect.Sets; -public class TestCommandUtil extends TestCase { +public class TestCommandUtil extends Assert { @Test public void testCreateCmdForCreateOrDropRole() { @@ -56,18 +55,11 @@ public void testCreateCmdForCreateOrDropRole() { @Test public void testCreateCmdForRoleAddOrDeleteGroup1() { - TAlterSentryRoleAddGroupsRequest requestAdd = getRoleAddGroupsRequest(); - TAlterSentryRoleDeleteGroupsRequest requestDelete = getRoleDeleteGroupsRequest(); - - Set groups = getGroups(1); - requestAdd.setGroups(groups); - requestDelete.setGroups(groups); - - String createRoleAddGroupCmdResult = CommandUtil - .createCmdForRoleAddGroup(requestAdd); + String createRoleAddGroupCmdResult = CommandUtil.createCmdForRoleAddGroup("testRole", + getGroupStr(1)); String createRoleAddGroupCmdExcepted = "GRANT ROLE testRole TO GROUP testGroup1"; - String createRoleDeleteGroupCmdResult = CommandUtil - .createCmdForRoleDeleteGroup(requestDelete); + String createRoleDeleteGroupCmdResult = CommandUtil.createCmdForRoleDeleteGroup("testRole", + getGroupStr(1)); String createRoleDeleteGroupCmdExcepted = "REVOKE ROLE testRole FROM GROUP testGroup1"; assertEquals(createRoleAddGroupCmdExcepted, createRoleAddGroupCmdResult); @@ -77,19 +69,11 @@ public void testCreateCmdForRoleAddOrDeleteGroup1() { @Test public void testCreateCmdForRoleAddOrDeleteGroup2() { - - TAlterSentryRoleAddGroupsRequest requestAdd = getRoleAddGroupsRequest(); - TAlterSentryRoleDeleteGroupsRequest requestDelete = getRoleDeleteGroupsRequest(); - - Set groups = getGroups(3); - requestAdd.setGroups(groups); - requestDelete.setGroups(groups); - - String createRoleAddGroupCmdResult = CommandUtil - .createCmdForRoleAddGroup(requestAdd); + String createRoleAddGroupCmdResult = CommandUtil.createCmdForRoleAddGroup("testRole", + getGroupStr(3)); String createRoleAddGroupCmdExcepted = "GRANT ROLE testRole TO GROUP testGroup1, testGroup2, testGroup3"; - String createRoleDeleteGroupCmdResult = CommandUtil - .createCmdForRoleDeleteGroup(requestDelete); + String createRoleDeleteGroupCmdResult = CommandUtil.createCmdForRoleDeleteGroup("testRole", + getGroupStr(3)); String createRoleDeleteGroupCmdExcepted = "REVOKE ROLE testRole FROM GROUP testGroup1, testGroup2, testGroup3"; assertEquals(createRoleAddGroupCmdExcepted, createRoleAddGroupCmdResult); @@ -294,26 +278,55 @@ public void testCreateCmdForGrantOrRevokePrivilege8() { assertEquals(createRevokePrivilegeCmdExcepted, createRevokePrivilegeCmdResult); } - private TAlterSentryRoleAddGroupsRequest getRoleAddGroupsRequest() { - TAlterSentryRoleAddGroupsRequest request = new TAlterSentryRoleAddGroupsRequest(); - request.setRoleName("testRole"); - return request; + // generate the command without grant option + @Test + public void testCreateCmdForGrantOrRevokeGMPrivilege1() { + org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleGrantPrivilegeRequest grantRequest = getGrantGMPrivilegeRequest(); + org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleRevokePrivilegeRequest revokeRequest = getRevokeGMPrivilegeRequest(); + org.apache.sentry.provider.db.generic.service.thrift.TSentryPrivilege privilege = getGMPrivilege(); + grantRequest.setPrivilege(privilege); + revokeRequest.setPrivilege(privilege); + + String createGrantPrivilegeCmdResult = CommandUtil.createCmdForGrantGMPrivilege(grantRequest); + String createGrantPrivilegeCmdExcepted = "GRANT ACTION ON resourceType1 resourceName1 resourceType2 resourceName2 TO ROLE testRole"; + String createRevokePrivilegeCmdResult = CommandUtil + .createCmdForRevokeGMPrivilege(revokeRequest); + String createRevokePrivilegeCmdExcepted = "REVOKE ACTION ON resourceType1 resourceName1 resourceType2 resourceName2 FROM ROLE testRole"; + + assertEquals(createGrantPrivilegeCmdExcepted, createGrantPrivilegeCmdResult); + assertEquals(createRevokePrivilegeCmdExcepted, createRevokePrivilegeCmdResult); } - private TAlterSentryRoleDeleteGroupsRequest getRoleDeleteGroupsRequest() { - TAlterSentryRoleDeleteGroupsRequest request = new TAlterSentryRoleDeleteGroupsRequest(); - request.setRoleName("testRole"); - return request; + // generate the command with grant option + @Test + public void testCreateCmdForGrantOrRevokeGMPrivilege2() { + org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleGrantPrivilegeRequest grantRequest = getGrantGMPrivilegeRequest(); + org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleRevokePrivilegeRequest revokeRequest = getRevokeGMPrivilegeRequest(); + org.apache.sentry.provider.db.generic.service.thrift.TSentryPrivilege privilege = getGMPrivilege(); + privilege + .setGrantOption(org.apache.sentry.provider.db.generic.service.thrift.TSentryGrantOption.TRUE); + grantRequest.setPrivilege(privilege); + revokeRequest.setPrivilege(privilege); + + String createGrantPrivilegeCmdResult = CommandUtil.createCmdForGrantGMPrivilege(grantRequest); + String createGrantPrivilegeCmdExcepted = "GRANT ACTION ON resourceType1 resourceName1 resourceType2 resourceName2 TO ROLE testRole WITH GRANT OPTION"; + String createRevokePrivilegeCmdResult = CommandUtil + .createCmdForRevokeGMPrivilege(revokeRequest); + String createRevokePrivilegeCmdExcepted = "REVOKE ACTION ON resourceType1 resourceName1 resourceType2 resourceName2 FROM ROLE testRole WITH GRANT OPTION"; + + assertEquals(createGrantPrivilegeCmdExcepted, createGrantPrivilegeCmdResult); + assertEquals(createRevokePrivilegeCmdExcepted, createRevokePrivilegeCmdResult); } - private Set getGroups(int num) { - Set groups = new LinkedHashSet(); + private String getGroupStr(int num) { + StringBuilder sb = new StringBuilder(); for (int i = 0; i < num; i++) { - TSentryGroup group = new TSentryGroup(); - group.setGroupName("testGroup" + (i + 1)); - groups.add(group); + if (i > 0) { + sb.append(", "); + } + sb.append("testGroup" + (i + 1)); } - return groups; + return sb.toString(); } private TAlterSentryRoleGrantPrivilegeRequest getGrantPrivilegeRequest() { @@ -328,6 +341,18 @@ private TAlterSentryRoleRevokePrivilegeRequest getRevokePrivilegeRequest() { return request; } + private org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleGrantPrivilegeRequest getGrantGMPrivilegeRequest() { + org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleGrantPrivilegeRequest request = new org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleGrantPrivilegeRequest(); + request.setRoleName("testRole"); + return request; + } + + private org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleRevokePrivilegeRequest getRevokeGMPrivilegeRequest() { + org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleRevokePrivilegeRequest request = new org.apache.sentry.provider.db.generic.service.thrift.TAlterSentryRoleRevokePrivilegeRequest(); + request.setRoleName("testRole"); + return request; + } + private TSentryPrivilege getPrivilege(String action, String privilegeScope, String dbName, String tableName, String serverName, String URI) { TSentryPrivilege privilege = new TSentryPrivilege(); @@ -339,4 +364,15 @@ private TSentryPrivilege getPrivilege(String action, String privilegeScope, privilege.setURI(URI); return privilege; } + + private org.apache.sentry.provider.db.generic.service.thrift.TSentryPrivilege getGMPrivilege() { + org.apache.sentry.provider.db.generic.service.thrift.TSentryPrivilege privilege = new org.apache.sentry.provider.db.generic.service.thrift.TSentryPrivilege(); + privilege.setAction("ACTION"); + privilege.setComponent("COMPONENT"); + List authorizables = new ArrayList(); + authorizables.add(new TAuthorizable("resourceType1", "resourceName1")); + authorizables.add(new TAuthorizable("resourceType2", "resourceName2")); + privilege.setAuthorizables(authorizables); + return privilege; + } } diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/persistent/TestSentryPrivilege.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/persistent/TestSentryPrivilege.java index 31cca67ae..c31233b68 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/persistent/TestSentryPrivilege.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/persistent/TestSentryPrivilege.java @@ -18,8 +18,8 @@ package org.apache.sentry.provider.db.service.persistent; -import static junit.framework.Assert.assertFalse; -import static junit.framework.Assert.assertTrue; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import org.apache.sentry.core.model.db.AccessConstants; import org.apache.sentry.provider.db.service.model.MSentryPrivilege; diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/persistent/TestSentryServiceDiscovery.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/persistent/TestSentryServiceDiscovery.java index 2773a9e48..7cbcc111d 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/persistent/TestSentryServiceDiscovery.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/persistent/TestSentryServiceDiscovery.java @@ -25,13 +25,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.curator.test.TestingServer; -import org.apache.curator.utils.CloseableUtils; import org.apache.curator.x.discovery.ServiceInstance; import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; import org.junit.After; import org.junit.Before; import org.junit.Test; -import static junit.framework.Assert.assertEquals; +import static org.junit.Assert.assertEquals; public class TestSentryServiceDiscovery { diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/persistent/TestSentryStore.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/persistent/TestSentryStore.java index 35319dbf7..02f98e2f3 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/persistent/TestSentryStore.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/persistent/TestSentryStore.java @@ -18,11 +18,6 @@ package org.apache.sentry.provider.db.service.persistent; -import static junit.framework.Assert.assertEquals; -import static junit.framework.Assert.assertFalse; -import static junit.framework.Assert.assertTrue; -import static junit.framework.Assert.fail; - import java.io.File; import java.util.Arrays; import java.util.Collections; @@ -31,6 +26,9 @@ import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.alias.CredentialProvider; +import org.apache.hadoop.security.alias.CredentialProviderFactory; +import org.apache.hadoop.security.alias.UserProvider; import org.apache.sentry.core.model.db.AccessConstants; import org.apache.sentry.provider.db.SentryAlreadyExistsException; import org.apache.sentry.provider.db.SentryGrantDeniedException; @@ -46,6 +44,7 @@ import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; import org.junit.After; import org.junit.AfterClass; +import static org.junit.Assert.assertArrayEquals; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Ignore; @@ -55,7 +54,7 @@ import com.google.common.collect.Sets; import com.google.common.io.Files; -public class TestSentryStore { +public class TestSentryStore extends org.junit.Assert { private static File dataDir; private static SentryStore sentryStore; @@ -63,14 +62,24 @@ public class TestSentryStore { private static PolicyFile policyFile; private static File policyFilePath; final long NUM_PRIVS = 60; // > SentryStore.PrivCleaner.NOTIFY_THRESHOLD + private static Configuration conf = null; + private static char[] passwd = new char[] { '1', '2', '3'}; @BeforeClass public static void setup() throws Exception { + conf = new Configuration(false); + final String ourUrl = UserProvider.SCHEME_NAME + ":///"; + conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl); + CredentialProvider provider = CredentialProviderFactory.getProviders(conf).get(0); + provider.createCredentialEntry(ServerConfig. + SENTRY_STORE_JDBC_PASS, passwd); + provider.flush(); + dataDir = new File(Files.createTempDir(), "sentry_policy_db"); - Configuration conf = new Configuration(false); conf.set(ServerConfig.SENTRY_VERIFY_SCHEM_VERSION, "false"); conf.set(ServerConfig.SENTRY_STORE_JDBC_URL, "jdbc:derby:;databaseName=" + dataDir.getPath() + ";create=true"); + conf.set(ServerConfig.SENTRY_STORE_JDBC_PASS, "dummy"); conf.setStrings(ServerConfig.ADMIN_GROUPS, adminGroups); conf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING, ServerConfig.SENTRY_STORE_LOCAL_GROUP_MAPPING); @@ -102,6 +111,13 @@ public static void teardown() { FileUtils.deleteQuietly(dataDir); } } + + @Test + public void testCredentialProvider() throws Exception { + assertArrayEquals(passwd, conf.getPassword(ServerConfig. + SENTRY_STORE_JDBC_PASS)); + } + @Test public void testCaseInsensitiveRole() throws Exception { String roleName = "newRole"; @@ -513,7 +529,7 @@ public void testGrantRevokeTablePrivilegeDowngradeByDb() throws Exception { privilegeTable1.setTableName(table1); privilegeTable1.setAction(AccessConstants.ALL); privilegeTable1.setCreateTime(System.currentTimeMillis()); - TSentryPrivilege privilegeTable2 = privilegeTable1.deepCopy();; + TSentryPrivilege privilegeTable2 = privilegeTable1.deepCopy(); privilegeTable2.setTableName(table2); // Grant ALL on table1 and table2 @@ -592,7 +608,7 @@ public void testGrantRevokeColumnPrivilegeDowngradeByDb() throws Exception { privilegeCol1.setColumnName(column1); privilegeCol1.setAction(AccessConstants.ALL); privilegeCol1.setCreateTime(System.currentTimeMillis()); - TSentryPrivilege privilegeCol2 = privilegeCol1.deepCopy();; + TSentryPrivilege privilegeCol2 = privilegeCol1.deepCopy(); privilegeCol2.setColumnName(column2); // Grant ALL on column1 and column2 @@ -1544,17 +1560,17 @@ public void testSentryPrivilegeSize() throws Exception { String grantor = "g1"; - assertEquals(new Long(0), sentryStore.getPrivilegeCountGauge().getValue()); + assertEquals(Long.valueOf(0), sentryStore.getPrivilegeCountGauge().getValue()); sentryStore.alterSentryRoleGrantPrivilege(grantor, role1, privilege); - assertEquals(new Long(1), sentryStore.getPrivilegeCountGauge().getValue()); + assertEquals(Long.valueOf(1), sentryStore.getPrivilegeCountGauge().getValue()); sentryStore.alterSentryRoleGrantPrivilege(grantor, role2, privilege); - assertEquals(new Long(1), sentryStore.getPrivilegeCountGauge().getValue()); + assertEquals(Long.valueOf(1), sentryStore.getPrivilegeCountGauge().getValue()); privilege.setTableName("tb2"); sentryStore.alterSentryRoleGrantPrivilege(grantor, role2, privilege); - assertEquals(new Long(2), sentryStore.getPrivilegeCountGauge().getValue()); + assertEquals(Long.valueOf(2), sentryStore.getPrivilegeCountGauge().getValue()); } @Test @@ -1573,14 +1589,14 @@ public void testSentryGroupsSize() throws Exception { String grantor = "g1"; sentryStore.alterSentryRoleAddGroups(grantor, role1, groups); - assertEquals(new Long(1), sentryStore.getGroupCountGauge().getValue()); + assertEquals(Long.valueOf(1), sentryStore.getGroupCountGauge().getValue()); sentryStore.alterSentryRoleAddGroups(grantor, role2, groups); - assertEquals(new Long(1), sentryStore.getGroupCountGauge().getValue()); + assertEquals(Long.valueOf(1), sentryStore.getGroupCountGauge().getValue()); groups.add(new TSentryGroup("group2")); sentryStore.alterSentryRoleAddGroups(grantor, role2, groups); - assertEquals(new Long(2), sentryStore.getGroupCountGauge().getValue()); + assertEquals(Long.valueOf(2), sentryStore.getGroupCountGauge().getValue()); } @@ -1639,6 +1655,83 @@ public void testRenameTableWithColumn() throws Exception { assertEquals(1, privilegeSet.size()); } + @Test + public void testSentryTablePrivilegeSome() throws Exception { + String roleName = "test-table-privilege-some"; + String grantor = "g1"; + String dbName = "db1"; + String table = "tb1"; + sentryStore.createSentryRole(roleName); + TSentryPrivilege tSentryPrivilege = new TSentryPrivilege("TABLE", "server1", "ALL"); + tSentryPrivilege.setDbName(dbName); + tSentryPrivilege.setTableName(table); + sentryStore.alterSentryRoleGrantPrivilege(grantor, roleName, tSentryPrivilege); + + TSentryAuthorizable tSentryAuthorizable = new TSentryAuthorizable(); + tSentryAuthorizable.setDb(dbName); + tSentryAuthorizable.setTable(AccessConstants.SOME); + tSentryAuthorizable.setServer("server1"); + + Set privileges = + sentryStore.getTSentryPrivileges(new HashSet(Arrays.asList(roleName)), tSentryAuthorizable); + + assertTrue(privileges.size() == 1); + + Set tSentryGroups = new HashSet(); + tSentryGroups.add(new TSentryGroup("group1")); + sentryStore.alterSentryRoleAddGroups(grantor, roleName, tSentryGroups); + + TSentryActiveRoleSet thriftRoleSet = new TSentryActiveRoleSet(true, new HashSet(Arrays.asList(roleName))); + + Set privs = + sentryStore.listSentryPrivilegesForProvider(new HashSet(Arrays.asList("group1")), thriftRoleSet, tSentryAuthorizable); + + assertTrue(privs.size()==1); + assertTrue(privs.contains("server=server1->db=" + dbName + "->table=" + table + "->action=all")); + + } + + + @Test + public void testSentryColumnPrivilegeSome() throws Exception { + String roleName = "test-column-privilege-some"; + String grantor = "g1"; + String dbName = "db1"; + String table = "tb1"; + String column = "col1"; + sentryStore.createSentryRole(roleName); + TSentryPrivilege tSentryPrivilege = new TSentryPrivilege("TABLE", "server1", "ALL"); + tSentryPrivilege.setDbName(dbName); + tSentryPrivilege.setTableName(table); + tSentryPrivilege.setColumnName(column); + sentryStore.alterSentryRoleGrantPrivilege(grantor, roleName, tSentryPrivilege); + + TSentryAuthorizable tSentryAuthorizable = new TSentryAuthorizable(); + tSentryAuthorizable.setDb(dbName); + tSentryAuthorizable.setTable(table); + tSentryAuthorizable.setColumn(AccessConstants.SOME); + tSentryAuthorizable.setServer("server1"); + + Set privileges = + sentryStore.getTSentryPrivileges(new HashSet(Arrays.asList(roleName)), tSentryAuthorizable); + + assertTrue(privileges.size() == 1); + + Set tSentryGroups = new HashSet(); + tSentryGroups.add(new TSentryGroup("group1")); + sentryStore.alterSentryRoleAddGroups(grantor, roleName, tSentryGroups); + + TSentryActiveRoleSet thriftRoleSet = new TSentryActiveRoleSet(true, new HashSet(Arrays.asList(roleName))); + + Set privs = + sentryStore.listSentryPrivilegesForProvider(new HashSet(Arrays.asList("group1")), thriftRoleSet, tSentryAuthorizable); + + assertTrue(privs.size() == 1); + assertTrue(privs.contains("server=server1->db=" + dbName + "->table=" + table + "->column=" + + column + "->action=all")); + + } + protected static void addGroupsToUser(String user, String... groupNames) { policyFile.addGroupsToUser(user, groupNames); } diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/persistent/TestSentryStoreImportExport.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/persistent/TestSentryStoreImportExport.java new file mode 100644 index 000000000..a9e4ed6ca --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/persistent/TestSentryStoreImportExport.java @@ -0,0 +1,915 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.provider.db.service.persistent; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.sentry.core.model.db.AccessConstants; +import org.apache.sentry.provider.db.service.model.MSentryGroup; +import org.apache.sentry.provider.db.service.model.MSentryPrivilege; +import org.apache.sentry.provider.db.service.model.MSentryRole; +import org.apache.sentry.provider.db.service.thrift.TSentryGrantOption; +import org.apache.sentry.provider.db.service.thrift.TSentryMappingData; +import org.apache.sentry.provider.db.service.thrift.TSentryPrivilege; +import org.apache.sentry.provider.file.PolicyFile; +import org.apache.sentry.service.thrift.ServiceConstants.PrivilegeScope; +import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import com.google.common.io.Files; + +public class TestSentryStoreImportExport { + + private static File dataDir; + private static SentryStore sentryStore; + private static String[] adminGroups = { "adminGroup1" }; + private static PolicyFile policyFile; + private static File policyFilePath; + private TSentryPrivilege tSentryPrivilege1; + private TSentryPrivilege tSentryPrivilege2; + private TSentryPrivilege tSentryPrivilege3; + private TSentryPrivilege tSentryPrivilege4; + private TSentryPrivilege tSentryPrivilege5; + private TSentryPrivilege tSentryPrivilege6; + private TSentryPrivilege tSentryPrivilege7; + private TSentryPrivilege tSentryPrivilege8; + + @BeforeClass + public static void setupEnv() throws Exception { + dataDir = new File(Files.createTempDir(), "sentry_policy_db"); + Configuration conf = new Configuration(false); + conf.set(ServerConfig.SENTRY_VERIFY_SCHEM_VERSION, "false"); + conf.set(ServerConfig.SENTRY_STORE_JDBC_URL, "jdbc:derby:;databaseName=" + dataDir.getPath() + + ";create=true"); + conf.set(ServerConfig.SENTRY_STORE_JDBC_PASS, "sentry"); + conf.setStrings(ServerConfig.ADMIN_GROUPS, adminGroups); + conf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING, ServerConfig.SENTRY_STORE_LOCAL_GROUP_MAPPING); + policyFilePath = new File(dataDir, "local_policy_file.ini"); + conf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING_RESOURCE, policyFilePath.getPath()); + policyFile = new PolicyFile(); + sentryStore = new SentryStore(conf); + + String adminUser = "g1"; + addGroupsToUser(adminUser, adminGroups); + writePolicyFile(); + } + + @Before + public void setupPrivilege() { + preparePrivilege(); + } + + @After + public void clearStore() { + sentryStore.clearAllTables(); + } + + // create the privileges instance for test case: + // privilege1=[server=server1] + // privilege2=[server=server1, action=select, grantOption=false] + // privilege3=[server=server1, db=db2, action=insert, grantOption=true] + // privilege4=[server=server1, db=db1, table=tbl1, action=insert, grantOption=false] + // privilege5=[server=server1, db=db1, table=tbl2, column=col1, action=insert, grantOption=false] + // privilege6=[server=server1, db=db1, table=tbl3, column=col1, action=*, grantOption=true] + // privilege7=[server=server1, db=db1, table=tbl4, column=col1, action=all, grantOption=true] + // privilege8=[server=server1, uri=hdfs://testserver:9999/path1, action=insert, grantOption=false] + private void preparePrivilege() { + tSentryPrivilege1 = createTSentryPrivilege(PrivilegeScope.SERVER.name(), "server1", "", "", "", + "", "", TSentryGrantOption.UNSET); + tSentryPrivilege2 = createTSentryPrivilege(PrivilegeScope.SERVER.name(), "server1", "", "", "", + "", AccessConstants.SELECT, TSentryGrantOption.FALSE); + tSentryPrivilege3 = createTSentryPrivilege(PrivilegeScope.DATABASE.name(), "server1", "db2", + "", "", "", AccessConstants.INSERT, TSentryGrantOption.TRUE); + tSentryPrivilege4 = createTSentryPrivilege(PrivilegeScope.TABLE.name(), "server1", "db1", + "tbl1", "", "", AccessConstants.INSERT, TSentryGrantOption.FALSE); + tSentryPrivilege5 = createTSentryPrivilege(PrivilegeScope.COLUMN.name(), "server1", "db1", + "tbl2", "col1", "", AccessConstants.INSERT, TSentryGrantOption.FALSE); + tSentryPrivilege6 = createTSentryPrivilege(PrivilegeScope.COLUMN.name(), "server1", "db1", + "tbl3", "col1", "", AccessConstants.ALL, TSentryGrantOption.TRUE); + tSentryPrivilege7 = createTSentryPrivilege(PrivilegeScope.COLUMN.name(), "server1", "db1", + "tbl4", "col1", "", AccessConstants.ACTION_ALL, TSentryGrantOption.TRUE); + tSentryPrivilege8 = createTSentryPrivilege(PrivilegeScope.URI.name(), "server1", "", "", "", + "hdfs://testserver:9999/path1", AccessConstants.INSERT, TSentryGrantOption.FALSE); + } + + @AfterClass + public static void teardown() { + if (sentryStore != null) { + sentryStore.stop(); + } + if (dataDir != null) { + FileUtils.deleteQuietly(dataDir); + } + } + + protected static void addGroupsToUser(String user, String... groupNames) { + policyFile.addGroupsToUser(user, groupNames); + } + + protected static void writePolicyFile() throws Exception { + policyFile.write(policyFilePath); + } + + // Befor import, database is empty. + // The following information is imported: + // group1=role1,role2,role3 + // group2=role1,role2,role3 + // group3=role1,role2,role3 + // role1=privilege1,privilege2,privilege3,privilege4,privilege5,privilege6,privilege7,privilege8 + // role2=privilege1,privilege2,privilege3,privilege4,privilege5,privilege6,privilege7,privilege8 + // role3=privilege1,privilege2,privilege3,privilege4,privilege5,privilege6,privilege7,privilege8 + // Both import API importSentryMetaData and export APIs getRolesMap, getGroupsMap, + // getPrivilegesList are tested. + @Test + public void testImportExportPolicy1() throws Exception { + TSentryMappingData tSentryMappingData = new TSentryMappingData(); + Map> sentryGroupRolesMap = Maps.newHashMap(); + Map> sentryRolePrivilegesMap = Maps.newHashMap(); + sentryGroupRolesMap.put("group1", Sets.newHashSet("Role1", "role2", "role3")); + sentryGroupRolesMap.put("group2", Sets.newHashSet("Role1", "role2", "role3")); + sentryGroupRolesMap.put("group3", Sets.newHashSet("Role1", "role2", "role3")); + sentryRolePrivilegesMap.put("Role1", Sets.newHashSet(tSentryPrivilege1, tSentryPrivilege2, + tSentryPrivilege3, tSentryPrivilege4, tSentryPrivilege5, tSentryPrivilege6, + tSentryPrivilege7, tSentryPrivilege8)); + sentryRolePrivilegesMap.put("role2", Sets.newHashSet(tSentryPrivilege1, tSentryPrivilege2, + tSentryPrivilege3, tSentryPrivilege4, tSentryPrivilege5, tSentryPrivilege6, + tSentryPrivilege7, tSentryPrivilege8)); + sentryRolePrivilegesMap.put("role3", Sets.newHashSet(tSentryPrivilege1, tSentryPrivilege2, + tSentryPrivilege3, tSentryPrivilege4, tSentryPrivilege5, tSentryPrivilege6, + tSentryPrivilege7, tSentryPrivilege8)); + tSentryMappingData.setGroupRolesMap(sentryGroupRolesMap); + tSentryMappingData.setRolePrivilegesMap(sentryRolePrivilegesMap); + sentryStore.importSentryMetaData(tSentryMappingData, false); + + Map rolesMap = sentryStore.getRolesMap(); + Map groupsMap = sentryStore.getGroupNameTGroupMap(); + List privilegesList = sentryStore.getPrivilegesList(); + + // test the result data for the role + verifyRoles(rolesMap, Sets.newHashSet("role1", "role2", "role3")); + + // test the result data for the group + verifyGroups(groupsMap, Sets.newHashSet("group1", "group2", "group3")); + + // test the result data for the privilege + verifyPrivileges(privilegesList, Sets.newHashSet(tSentryPrivilege1, tSentryPrivilege2, + tSentryPrivilege3, tSentryPrivilege4, tSentryPrivilege5, tSentryPrivilege6, + tSentryPrivilege7, tSentryPrivilege8)); + + // test the mapping data for group and role + Map> actualGroupRolesMap = sentryStore.getGroupNameRoleNamesMap(); + Map> exceptedGroupRolesMap = Maps.newHashMap(); + exceptedGroupRolesMap.put("group1", Sets.newHashSet("role1", "role2", "role3")); + exceptedGroupRolesMap.put("group2", Sets.newHashSet("role1", "role2", "role3")); + exceptedGroupRolesMap.put("group3", Sets.newHashSet("role1", "role2", "role3")); + verifyGroupRolesMap(actualGroupRolesMap, exceptedGroupRolesMap); + + // test the mapping data for role and privilege + Map> actualRolePrivilegesMap = sentryStore + .getRoleNameTPrivilegesMap(); + Map> exceptedRolePrivilegesMap = Maps.newHashMap(); + exceptedRolePrivilegesMap.put("role1", Sets.newHashSet(tSentryPrivilege1, tSentryPrivilege2, + tSentryPrivilege3, tSentryPrivilege4, tSentryPrivilege5, tSentryPrivilege6, + tSentryPrivilege7, tSentryPrivilege8)); + exceptedRolePrivilegesMap.put("role2", Sets.newHashSet(tSentryPrivilege1, tSentryPrivilege2, + tSentryPrivilege3, tSentryPrivilege4, tSentryPrivilege5, tSentryPrivilege6, + tSentryPrivilege7, tSentryPrivilege8)); + exceptedRolePrivilegesMap.put("role3", Sets.newHashSet(tSentryPrivilege1, tSentryPrivilege2, + tSentryPrivilege3, tSentryPrivilege4, tSentryPrivilege5, tSentryPrivilege6, + tSentryPrivilege7, tSentryPrivilege8)); + + verifyRolePrivilegesMap(actualRolePrivilegesMap, exceptedRolePrivilegesMap); + } + + // call import twice, and there has no duplicate data: + // The data for 1st import: + // group1=role1 + // role1=privilege1,privilege2,privilege3,privilege4 + // The data for 2nd import: + // group2=role2,role3 + // group3=role2,role3 + // role2=privilege5,privilege6,privilege7,privilege8 + // role3=privilege5,privilege6,privilege7,privilege8 + // Both import API importSentryMetaData and export APIs getRolesMap, getGroupsMap, + // getPrivilegesList are tested. + @Test + public void testImportExportPolicy2() throws Exception { + TSentryMappingData tSentryMappingData1 = new TSentryMappingData(); + Map> sentryGroupRolesMap1 = Maps.newHashMap(); + Map> sentryRolePrivilegesMap1 = Maps.newHashMap(); + sentryGroupRolesMap1.put("group1", Sets.newHashSet("role1")); + sentryRolePrivilegesMap1 + .put("role1", Sets.newHashSet(tSentryPrivilege1, tSentryPrivilege2, tSentryPrivilege3, + tSentryPrivilege4)); + tSentryMappingData1.setGroupRolesMap(sentryGroupRolesMap1); + tSentryMappingData1.setRolePrivilegesMap(sentryRolePrivilegesMap1); + sentryStore.importSentryMetaData(tSentryMappingData1, false); + + TSentryMappingData tSentryMappingData2 = new TSentryMappingData(); + Map> sentryGroupRolesMap2 = Maps.newHashMap(); + Map> sentryRolePrivilegesMap2 = Maps.newHashMap(); + sentryGroupRolesMap2.put("group2", Sets.newHashSet("role2", "role3")); + sentryGroupRolesMap2.put("group3", Sets.newHashSet("role2", "role3")); + sentryRolePrivilegesMap2 + .put("role2", Sets.newHashSet(tSentryPrivilege5, tSentryPrivilege6, tSentryPrivilege7, + tSentryPrivilege8)); + sentryRolePrivilegesMap2 + .put("role3", Sets.newHashSet(tSentryPrivilege5, tSentryPrivilege6, tSentryPrivilege7, + tSentryPrivilege8)); + tSentryMappingData2.setGroupRolesMap(sentryGroupRolesMap2); + tSentryMappingData2.setRolePrivilegesMap(sentryRolePrivilegesMap2); + sentryStore.importSentryMetaData(tSentryMappingData2, false); + + Map rolesMap = sentryStore.getRolesMap(); + Map groupsMap = sentryStore.getGroupNameTGroupMap(); + List privilegesList = sentryStore.getPrivilegesList(); + + // test the result data for the role + verifyRoles(rolesMap, Sets.newHashSet("role1", "role2", "role3")); + + // test the result data for the group + verifyGroups(groupsMap, Sets.newHashSet("group1", "group2", "group3")); + + // test the result data for the privilege + verifyPrivileges(privilegesList, Sets.newHashSet(tSentryPrivilege1, tSentryPrivilege2, + tSentryPrivilege3, tSentryPrivilege4, tSentryPrivilege5, tSentryPrivilege6, + tSentryPrivilege7, tSentryPrivilege8)); + + // test the mapping data for group and role + Map> actualGroupRolesMap = sentryStore.getGroupNameRoleNamesMap(); + Map> exceptedGroupRolesMap = Maps.newHashMap(); + exceptedGroupRolesMap.put("group1", Sets.newHashSet("role1")); + exceptedGroupRolesMap.put("group2", Sets.newHashSet("role2", "role3")); + exceptedGroupRolesMap.put("group3", Sets.newHashSet("role2", "role3")); + verifyGroupRolesMap(actualGroupRolesMap, exceptedGroupRolesMap); + + // test the mapping data for role and privilege + Map> actualRolePrivilegesMap = sentryStore + .getRoleNameTPrivilegesMap(); + Map> exceptedRolePrivilegesMap = Maps.newHashMap(); + exceptedRolePrivilegesMap + .put("role1", Sets.newHashSet(tSentryPrivilege1, tSentryPrivilege2, tSentryPrivilege3, + tSentryPrivilege4)); + exceptedRolePrivilegesMap + .put("role2", Sets.newHashSet(tSentryPrivilege5, tSentryPrivilege6, tSentryPrivilege7, + tSentryPrivilege8)); + exceptedRolePrivilegesMap + .put("role3", Sets.newHashSet(tSentryPrivilege5, tSentryPrivilege6, tSentryPrivilege7, + tSentryPrivilege8)); + verifyRolePrivilegesMap(actualRolePrivilegesMap, exceptedRolePrivilegesMap); + } + + // call import twice, and there has data overlap: + // The data for 1st import: + // group1=role1, role2 + // group2=role1, role2 + // group3=role1, role2 + // role1=privilege1,privilege2,privilege3,privilege4,privilege5 + // role2=privilege1,privilege2,privilege3,privilege4,privilege5 + // The data for 2nd import: + // group1=role2,role3 + // group2=role2,role3 + // group3=role2,role3 + // role2=privilege4,privilege5,privilege6,privilege7,privilege8 + // role3=privilege4,privilege5,privilege6,privilege7,privilege8 + // Both import API importSentryMetaData and export APIs getRolesMap, getGroupsMap, + // getPrivilegesList are tested. + @Test + public void testImportExportPolicy3() throws Exception { + TSentryMappingData tSentryMappingData1 = new TSentryMappingData(); + Map> sentryGroupRolesMap1 = Maps.newHashMap(); + Map> sentryRolePrivilegesMap1 = Maps.newHashMap(); + sentryGroupRolesMap1.put("group1", Sets.newHashSet("role1", "role2")); + sentryGroupRolesMap1.put("group2", Sets.newHashSet("role1", "role2")); + sentryGroupRolesMap1.put("group3", Sets.newHashSet("role1", "role2")); + sentryRolePrivilegesMap1.put("role1", Sets.newHashSet(tSentryPrivilege1, tSentryPrivilege2, + tSentryPrivilege3, tSentryPrivilege4, tSentryPrivilege5)); + sentryRolePrivilegesMap1.put("role2", Sets.newHashSet(tSentryPrivilege1, tSentryPrivilege2, + tSentryPrivilege3, tSentryPrivilege4, tSentryPrivilege5)); + tSentryMappingData1.setGroupRolesMap(sentryGroupRolesMap1); + tSentryMappingData1.setRolePrivilegesMap(sentryRolePrivilegesMap1); + sentryStore.importSentryMetaData(tSentryMappingData1, false); + + TSentryMappingData tSentryMappingData2 = new TSentryMappingData(); + Map> sentryGroupRolesMap2 = Maps.newHashMap(); + Map> sentryRolePrivilegesMap2 = Maps.newHashMap(); + sentryGroupRolesMap2.put("group1", Sets.newHashSet("role2", "role3")); + sentryGroupRolesMap2.put("group2", Sets.newHashSet("role2", "role3")); + sentryGroupRolesMap2.put("group3", Sets.newHashSet("role2", "role3")); + sentryRolePrivilegesMap2.put("role2", Sets.newHashSet(tSentryPrivilege4, tSentryPrivilege5, + tSentryPrivilege6, tSentryPrivilege7, tSentryPrivilege8)); + sentryRolePrivilegesMap2.put("role3", Sets.newHashSet(tSentryPrivilege4, tSentryPrivilege5, + tSentryPrivilege6, tSentryPrivilege7, tSentryPrivilege8)); + tSentryMappingData2.setGroupRolesMap(sentryGroupRolesMap2); + tSentryMappingData2.setRolePrivilegesMap(sentryRolePrivilegesMap2); + sentryStore.importSentryMetaData(tSentryMappingData2, false); + + Map rolesMap = sentryStore.getRolesMap(); + Map groupsMap = sentryStore.getGroupNameTGroupMap(); + List privilegesList = sentryStore.getPrivilegesList(); + + // test the result data for the role + verifyRoles(rolesMap, Sets.newHashSet("role1", "role2", "role3")); + + // test the result data for the group + verifyGroups(groupsMap, Sets.newHashSet("group1", "group2", "group3")); + + // test the result data for the privilege + verifyPrivileges(privilegesList, Sets.newHashSet(tSentryPrivilege1, tSentryPrivilege2, + tSentryPrivilege3, tSentryPrivilege4, tSentryPrivilege5, tSentryPrivilege6, + tSentryPrivilege7, tSentryPrivilege8)); + + // test the mapping data for group and role + Map> actualGroupRolesMap = sentryStore.getGroupNameRoleNamesMap(); + Map> exceptedGroupRolesMap = Maps.newHashMap(); + exceptedGroupRolesMap.put("group1", Sets.newHashSet("role1", "role2", "role3")); + exceptedGroupRolesMap.put("group2", Sets.newHashSet("role1", "role2", "role3")); + exceptedGroupRolesMap.put("group3", Sets.newHashSet("role1", "role2", "role3")); + verifyGroupRolesMap(actualGroupRolesMap, exceptedGroupRolesMap); + + // test the mapping data for role and privilege + Map> actualRolePrivilegesMap = sentryStore + .getRoleNameTPrivilegesMap(); + Map> exceptedRolePrivilegesMap = Maps.newHashMap(); + exceptedRolePrivilegesMap.put("role1", Sets.newHashSet(tSentryPrivilege1, tSentryPrivilege2, + tSentryPrivilege3, tSentryPrivilege4, tSentryPrivilege5)); + exceptedRolePrivilegesMap.put("role2", Sets.newHashSet(tSentryPrivilege1, tSentryPrivilege2, + tSentryPrivilege3, tSentryPrivilege4, tSentryPrivilege5, tSentryPrivilege6, + tSentryPrivilege7, tSentryPrivilege8)); + exceptedRolePrivilegesMap.put("role3", Sets.newHashSet(tSentryPrivilege4, tSentryPrivilege5, + tSentryPrivilege6, tSentryPrivilege7, tSentryPrivilege8)); + verifyRolePrivilegesMap(actualRolePrivilegesMap, exceptedRolePrivilegesMap); + } + + // call import twice, and there has one role without group. + // The data for 1st import: + // group1=role1, role2 + // role1=privilege1,privilege2 + // role2=privilege3,privilege4 + // The data for 2nd import: + // group2=role2 + // role2=privilege5,privilege6 + // role3=privilege7,privilege8 + // role3 is without group, will be imported also + @Test + public void testImportExportPolicy4() throws Exception { + TSentryMappingData tSentryMappingData1 = new TSentryMappingData(); + Map> sentryGroupRolesMap1 = Maps.newHashMap(); + Map> sentryRolePrivilegesMap1 = Maps.newHashMap(); + sentryGroupRolesMap1.put("group1", Sets.newHashSet("role1", "role2")); + sentryRolePrivilegesMap1.put("role1", Sets.newHashSet(tSentryPrivilege1, tSentryPrivilege2)); + sentryRolePrivilegesMap1.put("role2", Sets.newHashSet(tSentryPrivilege3, tSentryPrivilege4)); + tSentryMappingData1.setGroupRolesMap(sentryGroupRolesMap1); + tSentryMappingData1.setRolePrivilegesMap(sentryRolePrivilegesMap1); + sentryStore.importSentryMetaData(tSentryMappingData1, false); + + TSentryMappingData tSentryMappingData2 = new TSentryMappingData(); + Map> sentryGroupRolesMap2 = Maps.newHashMap(); + Map> sentryRolePrivilegesMap2 = Maps.newHashMap(); + sentryGroupRolesMap2.put("group2", Sets.newHashSet("role2")); + sentryRolePrivilegesMap2.put("role2", Sets.newHashSet(tSentryPrivilege5, tSentryPrivilege6)); + sentryRolePrivilegesMap2.put("role3", Sets.newHashSet(tSentryPrivilege7, tSentryPrivilege8)); + tSentryMappingData2.setGroupRolesMap(sentryGroupRolesMap2); + tSentryMappingData2.setRolePrivilegesMap(sentryRolePrivilegesMap2); + sentryStore.importSentryMetaData(tSentryMappingData2, false); + + Map rolesMap = sentryStore.getRolesMap(); + Map groupsMap = sentryStore.getGroupNameTGroupMap(); + List privilegesList = sentryStore.getPrivilegesList(); + + // test the result data for the role + verifyRoles(rolesMap, Sets.newHashSet("role1", "role2", "role3")); + + // test the result data for the group + verifyGroups(groupsMap, Sets.newHashSet("group1", "group2")); + + // test the result data for the privilege + verifyPrivileges(privilegesList, Sets.newHashSet(tSentryPrivilege1, tSentryPrivilege2, + tSentryPrivilege3, tSentryPrivilege4, tSentryPrivilege5, tSentryPrivilege6, + tSentryPrivilege7, tSentryPrivilege8)); + + // test the mapping data for group and role + Map> actualGroupRolesMap = sentryStore.getGroupNameRoleNamesMap(); + Map> exceptedGroupRolesMap = Maps.newHashMap(); + exceptedGroupRolesMap.put("group1", Sets.newHashSet("role1", "role2")); + exceptedGroupRolesMap.put("group2", Sets.newHashSet("role2")); + verifyGroupRolesMap(actualGroupRolesMap, exceptedGroupRolesMap); + + // test the mapping data for role and privilege + Map> actualRolePrivilegesMap = sentryStore + .getRoleNameTPrivilegesMap(); + Map> exceptedRolePrivilegesMap = Maps.newHashMap(); + exceptedRolePrivilegesMap.put("role1", Sets.newHashSet(tSentryPrivilege1, tSentryPrivilege2)); + exceptedRolePrivilegesMap + .put("role2", Sets.newHashSet(tSentryPrivilege3, tSentryPrivilege4, tSentryPrivilege5, + tSentryPrivilege6)); + exceptedRolePrivilegesMap.put("role3", Sets.newHashSet(tSentryPrivilege7, tSentryPrivilege8)); + verifyRolePrivilegesMap(actualRolePrivilegesMap, exceptedRolePrivilegesMap); + } + + // test for import mapping data for [group,role] only: + // group1=role1, role2 + @Test + public void testImportExportPolicy5() throws Exception { + TSentryMappingData tSentryMappingData1 = new TSentryMappingData(); + Map> sentryGroupRolesMap1 = Maps.newHashMap(); + Map> sentryRolePrivilegesMap1 = Maps.newHashMap(); + sentryGroupRolesMap1.put("group1", Sets.newHashSet("role1", "role2")); + tSentryMappingData1.setGroupRolesMap(sentryGroupRolesMap1); + tSentryMappingData1.setRolePrivilegesMap(sentryRolePrivilegesMap1); + sentryStore.importSentryMetaData(tSentryMappingData1, false); + + Map rolesMap = sentryStore.getRolesMap(); + Map groupsMap = sentryStore.getGroupNameTGroupMap(); + List privilegesList = sentryStore.getPrivilegesList(); + + // test the result data for the role + verifyRoles(rolesMap, Sets.newHashSet("role1", "role2")); + + // test the result data for the group + verifyGroups(groupsMap, Sets.newHashSet("group1")); + + // test the result data for the privilege + assertTrue(privilegesList.isEmpty()); + + // test the mapping data for group and role + Map> actualGroupRolesMap = sentryStore.getGroupNameRoleNamesMap(); + Map> exceptedGroupRolesMap = Maps.newHashMap(); + exceptedGroupRolesMap.put("group1", Sets.newHashSet("role1", "role2")); + verifyGroupRolesMap(actualGroupRolesMap, exceptedGroupRolesMap); + + // test the mapping data for role and privilege + Map> actualRolePrivilegesMap = sentryStore + .getRoleNameTPrivilegesMap(); + assertTrue(actualRolePrivilegesMap.isEmpty()); + } + + // test for filter the orphaned group: + // group1=role1, role2 + // group2=role2 + @Test + public void testImportExportPolicy6() throws Exception { + TSentryMappingData tSentryMappingData1 = new TSentryMappingData(); + Map> sentryGroupRolesMap1 = Maps.newHashMap(); + Map> sentryRolePrivilegesMap1 = Maps.newHashMap(); + sentryGroupRolesMap1.put("group1", Sets.newHashSet("role1", "role2")); + sentryGroupRolesMap1.put("group2", Sets.newHashSet("role2")); + tSentryMappingData1.setGroupRolesMap(sentryGroupRolesMap1); + tSentryMappingData1.setRolePrivilegesMap(sentryRolePrivilegesMap1); + sentryStore.importSentryMetaData(tSentryMappingData1, false); + + // drop the role2, the group2 is orphaned group + sentryStore.dropSentryRole("role2"); + + Map rolesMap = sentryStore.getRolesMap(); + Map groupsMap = sentryStore.getGroupNameTGroupMap(); + List privilegesList = sentryStore.getPrivilegesList(); + + // test the result data for the role + verifyRoles(rolesMap, Sets.newHashSet("role1")); + + // test the result data for the group + verifyGroups(groupsMap, Sets.newHashSet("group1", "group2")); + + // test the result data for the privilege + assertTrue(privilegesList.isEmpty()); + + // test the mapping data for group and role + Map> actualGroupRolesMap = sentryStore.getGroupNameRoleNamesMap(); + Map> exceptedGroupRolesMap = Maps.newHashMap(); + exceptedGroupRolesMap.put("group1", Sets.newHashSet("role1")); + verifyGroupRolesMap(actualGroupRolesMap, exceptedGroupRolesMap); + + // test the mapping data for role and privilege + Map> actualRolePrivilegesMap = sentryStore + .getRoleNameTPrivilegesMap(); + assertTrue(actualRolePrivilegesMap.isEmpty()); + } + + // call import twice, and there has no duplicate data, the import will be with the overwrite mode: + // The data for 1st import: + // group1=role1 + // role1=privilege1 + // The data for 2nd import: + // group2=role2,role3 + // group3=role2,role3 + // role2=privilege2 + // role3=privilege2 + // Both import API importSentryMetaData and export APIs getRolesMap, getGroupsMap, + // getPrivilegesList are tested. + @Test + public void testImportExportPolicy7() throws Exception { + TSentryMappingData tSentryMappingData1 = new TSentryMappingData(); + Map> sentryGroupRolesMap1 = Maps.newHashMap(); + Map> sentryRolePrivilegesMap1 = Maps.newHashMap(); + sentryGroupRolesMap1.put("group1", Sets.newHashSet("role1")); + sentryRolePrivilegesMap1.put("role1", Sets.newHashSet(tSentryPrivilege1)); + tSentryMappingData1.setGroupRolesMap(sentryGroupRolesMap1); + tSentryMappingData1.setRolePrivilegesMap(sentryRolePrivilegesMap1); + // the import with overwrite mode + sentryStore.importSentryMetaData(tSentryMappingData1, true); + + TSentryMappingData tSentryMappingData2 = new TSentryMappingData(); + Map> sentryGroupRolesMap2 = Maps.newHashMap(); + Map> sentryRolePrivilegesMap2 = Maps.newHashMap(); + sentryGroupRolesMap2.put("group2", Sets.newHashSet("role2", "role3")); + sentryGroupRolesMap2.put("group3", Sets.newHashSet("role2", "role3")); + sentryRolePrivilegesMap2.put("role2", Sets.newHashSet(tSentryPrivilege2)); + sentryRolePrivilegesMap2.put("role3", Sets.newHashSet(tSentryPrivilege2)); + tSentryMappingData2.setGroupRolesMap(sentryGroupRolesMap2); + tSentryMappingData2.setRolePrivilegesMap(sentryRolePrivilegesMap2); + // the import with overwrite mode + sentryStore.importSentryMetaData(tSentryMappingData2, true); + + Map rolesMap = sentryStore.getRolesMap(); + Map groupsMap = sentryStore.getGroupNameTGroupMap(); + List privilegesList = sentryStore.getPrivilegesList(); + + // test the result data for the role + verifyRoles(rolesMap, Sets.newHashSet("role1", "role2", "role3")); + + // test the result data for the group + verifyGroups(groupsMap, Sets.newHashSet("group1", "group2", "group3")); + + // test the result data for the privilege + verifyPrivileges(privilegesList, Sets.newHashSet(tSentryPrivilege1, tSentryPrivilege2)); + + // test the mapping data for group and role + Map> actualGroupRolesMap = sentryStore.getGroupNameRoleNamesMap(); + Map> exceptedGroupRolesMap = Maps.newHashMap(); + exceptedGroupRolesMap.put("group1", Sets.newHashSet("role1")); + exceptedGroupRolesMap.put("group2", Sets.newHashSet("role2", "role3")); + exceptedGroupRolesMap.put("group3", Sets.newHashSet("role2", "role3")); + verifyGroupRolesMap(actualGroupRolesMap, exceptedGroupRolesMap); + + // test the mapping data for role and privilege + Map> actualRolePrivilegesMap = sentryStore + .getRoleNameTPrivilegesMap(); + Map> exceptedRolePrivilegesMap = Maps.newHashMap(); + exceptedRolePrivilegesMap.put("role1", Sets.newHashSet(tSentryPrivilege1)); + exceptedRolePrivilegesMap.put("role2", Sets.newHashSet(tSentryPrivilege2)); + exceptedRolePrivilegesMap.put("role3", Sets.newHashSet(tSentryPrivilege2)); + verifyRolePrivilegesMap(actualRolePrivilegesMap, exceptedRolePrivilegesMap); + } + + // call import twice, and there has data overlap, the import will be with the overwrite mode: + // The data for 1st import: + // group1=role1, role2 + // group2=role1, role2 + // group3=role1, role2 + // role1=privilege1,privilege2,privilege3,privilege4,privilege5 + // role2=privilege1,privilege2,privilege3,privilege4,privilege5 + // The data for 2nd import: + // group1=role2,role3 + // group2=role2,role3 + // group3=role2,role3 + // role2=privilege4,privilege5,privilege6,privilege7,privilege8 + // role3=privilege4,privilege5,privilege6,privilege7,privilege8 + // Both import API importSentryMetaData and export APIs getRolesMap, getGroupsMap, + // getPrivilegesList are tested. + @Test + public void testImportExportPolicy8() throws Exception { + TSentryMappingData tSentryMappingData1 = new TSentryMappingData(); + Map> sentryGroupRolesMap1 = Maps.newHashMap(); + Map> sentryRolePrivilegesMap1 = Maps.newHashMap(); + sentryGroupRolesMap1.put("group1", Sets.newHashSet("role1", "role2")); + sentryGroupRolesMap1.put("group2", Sets.newHashSet("role1", "role2")); + sentryGroupRolesMap1.put("group3", Sets.newHashSet("role1", "role2")); + sentryRolePrivilegesMap1.put("role1", Sets.newHashSet(tSentryPrivilege1, tSentryPrivilege2, + tSentryPrivilege3, tSentryPrivilege4, tSentryPrivilege5)); + sentryRolePrivilegesMap1.put("role2", Sets.newHashSet(tSentryPrivilege1, tSentryPrivilege2, + tSentryPrivilege3, tSentryPrivilege4, tSentryPrivilege5)); + tSentryMappingData1.setGroupRolesMap(sentryGroupRolesMap1); + tSentryMappingData1.setRolePrivilegesMap(sentryRolePrivilegesMap1); + // the import with overwrite mode + sentryStore.importSentryMetaData(tSentryMappingData1, true); + + TSentryMappingData tSentryMappingData2 = new TSentryMappingData(); + Map> sentryGroupRolesMap2 = Maps.newHashMap(); + Map> sentryRolePrivilegesMap2 = Maps.newHashMap(); + sentryGroupRolesMap2.put("group1", Sets.newHashSet("role2", "role3")); + sentryGroupRolesMap2.put("group2", Sets.newHashSet("role2", "role3")); + sentryGroupRolesMap2.put("group3", Sets.newHashSet("role2", "role3")); + sentryRolePrivilegesMap2.put("role2", Sets.newHashSet(tSentryPrivilege4, tSentryPrivilege5, + tSentryPrivilege6, tSentryPrivilege7, tSentryPrivilege8)); + sentryRolePrivilegesMap2.put("role3", Sets.newHashSet(tSentryPrivilege4, tSentryPrivilege5, + tSentryPrivilege6, tSentryPrivilege7, tSentryPrivilege8)); + tSentryMappingData2.setGroupRolesMap(sentryGroupRolesMap2); + tSentryMappingData2.setRolePrivilegesMap(sentryRolePrivilegesMap2); + // the import with overwrite mode + sentryStore.importSentryMetaData(tSentryMappingData2, true); + + Map rolesMap = sentryStore.getRolesMap(); + Map groupsMap = sentryStore.getGroupNameTGroupMap(); + List privilegesList = sentryStore.getPrivilegesList(); + + // test the result data for the role + verifyRoles(rolesMap, Sets.newHashSet("role1", "role2", "role3")); + + // test the result data for the group + verifyGroups(groupsMap, Sets.newHashSet("group1", "group2", "group3")); + + // test the result data for the privilege + verifyPrivileges(privilegesList, Sets.newHashSet(tSentryPrivilege1, tSentryPrivilege2, + tSentryPrivilege3, tSentryPrivilege4, tSentryPrivilege5, tSentryPrivilege6, + tSentryPrivilege7, tSentryPrivilege8)); + + // test the mapping data for group and role + Map> actualGroupRolesMap = sentryStore.getGroupNameRoleNamesMap(); + Map> exceptedGroupRolesMap = Maps.newHashMap(); + exceptedGroupRolesMap.put("group1", Sets.newHashSet("role1", "role2", "role3")); + exceptedGroupRolesMap.put("group2", Sets.newHashSet("role1", "role2", "role3")); + exceptedGroupRolesMap.put("group3", Sets.newHashSet("role1", "role2", "role3")); + verifyGroupRolesMap(actualGroupRolesMap, exceptedGroupRolesMap); + + // test the mapping data for role and privilege + Map> actualRolePrivilegesMap = sentryStore + .getRoleNameTPrivilegesMap(); + Map> exceptedRolePrivilegesMap = Maps.newHashMap(); + exceptedRolePrivilegesMap.put("role1", Sets.newHashSet(tSentryPrivilege1, tSentryPrivilege2, + tSentryPrivilege3, tSentryPrivilege4, tSentryPrivilege5)); + // role2 should be overwrite + exceptedRolePrivilegesMap.put("role2", Sets.newHashSet(tSentryPrivilege4, tSentryPrivilege5, + tSentryPrivilege6, tSentryPrivilege7, tSentryPrivilege8)); + exceptedRolePrivilegesMap.put("role3", Sets.newHashSet(tSentryPrivilege4, tSentryPrivilege5, + tSentryPrivilege6, tSentryPrivilege7, tSentryPrivilege8)); + verifyRolePrivilegesMap(actualRolePrivilegesMap, exceptedRolePrivilegesMap); + } + + // test the import privileges with the action: All, *, select, insert + // All and * should replace the select and insert + // The data for import: + // group1=role1, role2 + // role1=testPrivilege1,testPrivilege2,testPrivilege3,testPrivilege4 + // role2=testPrivilege5, testPrivilege6,testPrivilege7,testPrivilege8 + @Test + public void testImportExportPolicy9() throws Exception { + TSentryPrivilege testPrivilege1 = createTSentryPrivilege(PrivilegeScope.TABLE.name(), + "server1", "db1", "tbl1", "", "", AccessConstants.SELECT, TSentryGrantOption.TRUE); + TSentryPrivilege testPrivilege2 = createTSentryPrivilege(PrivilegeScope.TABLE.name(), + "server1", "db1", "tbl1", "", "", AccessConstants.INSERT, TSentryGrantOption.FALSE); + TSentryPrivilege testPrivilege3 = createTSentryPrivilege(PrivilegeScope.TABLE.name(), + "server1", "db1", "tbl1", "", "", AccessConstants.ACTION_ALL, TSentryGrantOption.TRUE); + TSentryPrivilege testPrivilege4 = createTSentryPrivilege(PrivilegeScope.TABLE.name(), + "server1", "db1", "tbl1", "", "", AccessConstants.INSERT, TSentryGrantOption.TRUE); + TSentryPrivilege testPrivilege5 = createTSentryPrivilege(PrivilegeScope.TABLE.name(), + "server1", "db1", "tbl2", "", "", AccessConstants.SELECT, TSentryGrantOption.TRUE); + TSentryPrivilege testPrivilege6 = createTSentryPrivilege(PrivilegeScope.TABLE.name(), + "server1", "db1", "tbl2", "", "", AccessConstants.INSERT, TSentryGrantOption.FALSE); + TSentryPrivilege testPrivilege7 = createTSentryPrivilege(PrivilegeScope.TABLE.name(), + "server1", "db1", "tbl2", "", "", AccessConstants.ALL, TSentryGrantOption.TRUE); + TSentryPrivilege testPrivilege8 = createTSentryPrivilege(PrivilegeScope.TABLE.name(), + "server1", "db1", "tbl2", "", "", AccessConstants.INSERT, TSentryGrantOption.TRUE); + + TSentryMappingData tSentryMappingData1 = new TSentryMappingData(); + Map> sentryGroupRolesMap1 = Maps.newHashMap(); + Map> sentryRolePrivilegesMap1 = Maps.newHashMap(); + sentryGroupRolesMap1.put("group1", Sets.newHashSet("role1", "role2")); + // after import there should be only testPrivilege2, testPrivilege3 + sentryRolePrivilegesMap1.put("role1", + Sets.newHashSet(testPrivilege1, testPrivilege2, testPrivilege3, testPrivilege4)); + // after import there should be only testPrivilege6,testPrivilege7 + sentryRolePrivilegesMap1.put("role2", + Sets.newHashSet(testPrivilege5, testPrivilege6, testPrivilege7, testPrivilege8)); + tSentryMappingData1.setGroupRolesMap(sentryGroupRolesMap1); + tSentryMappingData1.setRolePrivilegesMap(sentryRolePrivilegesMap1); + // the import with overwrite mode + sentryStore.importSentryMetaData(tSentryMappingData1, true); + + Map rolesMap = sentryStore.getRolesMap(); + Map groupsMap = sentryStore.getGroupNameTGroupMap(); + + // test the result data for the role + verifyRoles(rolesMap, Sets.newHashSet("role1", "role2")); + + // test the result data for the group + verifyGroups(groupsMap, Sets.newHashSet("group1")); + + // test the mapping data for group and role + Map> actualGroupRolesMap = sentryStore.getGroupNameRoleNamesMap(); + Map> exceptedGroupRolesMap = Maps.newHashMap(); + exceptedGroupRolesMap.put("group1", Sets.newHashSet("role1", "role2")); + verifyGroupRolesMap(actualGroupRolesMap, exceptedGroupRolesMap); + + // test the mapping data for role and privilege + Map> actualRolePrivilegesMap = sentryStore + .getRoleNameTPrivilegesMap(); + Map> exceptedRolePrivilegesMap = Maps.newHashMap(); + exceptedRolePrivilegesMap.put("role1", Sets.newHashSet(testPrivilege2, testPrivilege3)); + exceptedRolePrivilegesMap.put("role2", Sets.newHashSet(testPrivilege6, testPrivilege7)); + verifyRolePrivilegesMap(actualRolePrivilegesMap, exceptedRolePrivilegesMap); + } + + private void verifyRoles(Map actualRoleMap, Set expectedRoleNameSet) { + assertEquals(expectedRoleNameSet.size(), actualRoleMap.keySet().size()); + for (String roleName : actualRoleMap.keySet()) { + assertTrue(expectedRoleNameSet.contains(roleName)); + } + } + + private void verifyGroups(Map actualGroupsMap, + Set expectedGroupNameSet) { + assertEquals(expectedGroupNameSet.size(), actualGroupsMap.keySet().size()); + for (String groupName : actualGroupsMap.keySet()) { + assertTrue(expectedGroupNameSet.contains(groupName)); + } + } + + private void verifyPrivileges(List actualPrivileges, + Set expectedTSentryPrivilegeSet) { + assertEquals(expectedTSentryPrivilegeSet.size(), actualPrivileges.size()); + for (MSentryPrivilege mSentryPrivilege : actualPrivileges) { + boolean isFound = false; + for (TSentryPrivilege tSentryPrivilege : expectedTSentryPrivilegeSet) { + isFound = compareTSentryPrivilege(sentryStore.convertToTSentryPrivilege(mSentryPrivilege), + tSentryPrivilege); + if (isFound) { + break; + } + } + assertTrue(isFound); + } + } + + private void verifyGroupRolesMap(Map> actualGroupRolesMap, + Map> exceptedGroupRolesMap) { + assertEquals(exceptedGroupRolesMap.keySet().size(), actualGroupRolesMap.keySet().size()); + for (String groupName : actualGroupRolesMap.keySet()) { + Set exceptedRoles = exceptedGroupRolesMap.get(groupName); + Set actualRoles = actualGroupRolesMap.get(groupName); + assertEquals(actualRoles.size(), exceptedRoles.size()); + assertTrue(actualRoles.equals(exceptedRoles)); + } + } + + private void verifyRolePrivilegesMap(Map> actualRolePrivilegesMap, + Map> expectedRolePrivilegesMap) { + assertEquals(expectedRolePrivilegesMap.keySet().size(), actualRolePrivilegesMap.keySet().size()); + for (String roleName : expectedRolePrivilegesMap.keySet()) { + Set exceptedTSentryPrivileges = expectedRolePrivilegesMap.get(roleName); + Set actualTSentryPrivileges = actualRolePrivilegesMap.get(roleName); + assertEquals(exceptedTSentryPrivileges.size(), actualTSentryPrivileges.size()); + for (TSentryPrivilege actualPrivilege : actualTSentryPrivileges) { + boolean isFound = false; + for (TSentryPrivilege expectedPrivilege : exceptedTSentryPrivileges) { + isFound = compareTSentryPrivilege(expectedPrivilege, actualPrivilege); + if (isFound) { + break; + } + } + assertTrue(isFound); + } + } + } + + private TSentryPrivilege createTSentryPrivilege(String scope, String server, String dbName, + String tableName, String columnName, String uri, String action, TSentryGrantOption grantOption) { + TSentryPrivilege tSentryPrivilege = new TSentryPrivilege(); + tSentryPrivilege.setPrivilegeScope(scope); + tSentryPrivilege.setServerName(server); + tSentryPrivilege.setDbName(dbName); + tSentryPrivilege.setTableName(tableName); + tSentryPrivilege.setColumnName(columnName); + tSentryPrivilege.setURI(uri); + tSentryPrivilege.setAction(action); + tSentryPrivilege.setGrantOption(grantOption); + return tSentryPrivilege; + } + + // compare the TSentryPrivilege without the create time + private boolean compareTSentryPrivilege(TSentryPrivilege tSentryPrivilege1, + TSentryPrivilege tSentryPrivilege2) { + if (tSentryPrivilege1 == null) { + if (tSentryPrivilege2 == null) { + return true; + } else { + return false; + } + } else { + if (tSentryPrivilege2 == null) { + return false; + } + } + + boolean this_present_privilegeScope = true && tSentryPrivilege1.isSetPrivilegeScope(); + boolean that_present_privilegeScope = true && tSentryPrivilege2.isSetPrivilegeScope(); + if (this_present_privilegeScope || that_present_privilegeScope) { + if (!(this_present_privilegeScope && that_present_privilegeScope)) { + return false; + } + if (!tSentryPrivilege1.getPrivilegeScope().equalsIgnoreCase( + tSentryPrivilege2.getPrivilegeScope())) { + return false; + } + } + + boolean this_present_serverName = true && tSentryPrivilege1.isSetServerName(); + boolean that_present_serverName = true && tSentryPrivilege2.isSetServerName(); + if (this_present_serverName || that_present_serverName) { + if (!(this_present_serverName && that_present_serverName)) { + return false; + } + if (!tSentryPrivilege1.getServerName().equalsIgnoreCase(tSentryPrivilege2.getServerName())) { + return false; + } + } + + boolean this_present_dbName = true && tSentryPrivilege1.isSetDbName(); + boolean that_present_dbName = true && tSentryPrivilege2.isSetDbName(); + if (this_present_dbName || that_present_dbName) { + if (!(this_present_dbName && that_present_dbName)) { + return false; + } + if (!tSentryPrivilege1.getDbName().equalsIgnoreCase(tSentryPrivilege2.getDbName())) { + return false; + } + } + + boolean this_present_tableName = true && tSentryPrivilege1.isSetTableName(); + boolean that_present_tableName = true && tSentryPrivilege2.isSetTableName(); + if (this_present_tableName || that_present_tableName) { + if (!(this_present_tableName && that_present_tableName)) { + return false; + } + if (!tSentryPrivilege1.getTableName().equalsIgnoreCase(tSentryPrivilege2.getTableName())) { + return false; + } + } + + boolean this_present_URI = true && tSentryPrivilege1.isSetURI(); + boolean that_present_URI = true && tSentryPrivilege2.isSetURI(); + if (this_present_URI || that_present_URI) { + if (!(this_present_URI && that_present_URI)) { + return false; + } + if (!tSentryPrivilege1.getURI().equalsIgnoreCase(tSentryPrivilege2.getURI())) { + return false; + } + } + + boolean this_present_action = true && tSentryPrivilege1.isSetAction(); + boolean that_present_action = true && tSentryPrivilege2.isSetAction(); + if (this_present_action || that_present_action) { + if (!(this_present_action && that_present_action)) { + return false; + } + if (!tSentryPrivilege1.getAction().equalsIgnoreCase(tSentryPrivilege2.getAction())) { + return false; + } + } + + boolean this_present_grantOption = true && tSentryPrivilege1.isSetGrantOption(); + boolean that_present_grantOption = true && tSentryPrivilege2.isSetGrantOption(); + if (this_present_grantOption || that_present_grantOption) { + if (!(this_present_grantOption && that_present_grantOption)) { + return false; + } + if (!tSentryPrivilege1.getGrantOption().equals(tSentryPrivilege2.getGrantOption())) { + return false; + } + } + + boolean this_present_columnName = true && tSentryPrivilege1.isSetColumnName(); + boolean that_present_columnName = true && tSentryPrivilege2.isSetColumnName(); + if (this_present_columnName || that_present_columnName) { + if (!(this_present_columnName && that_present_columnName)) { + return false; + } + if (!tSentryPrivilege1.getColumnName().equalsIgnoreCase(tSentryPrivilege2.getColumnName())) { + return false; + } + } + + return true; + } +} diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/persistent/TestSentryStoreToAuthorizable.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/persistent/TestSentryStoreToAuthorizable.java index 922cbc277..25f94fa05 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/persistent/TestSentryStoreToAuthorizable.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/persistent/TestSentryStoreToAuthorizable.java @@ -18,7 +18,7 @@ package org.apache.sentry.provider.db.service.persistent; -import static junit.framework.Assert.assertEquals; +import static org.junit.Assert.assertEquals; import org.apache.sentry.core.model.db.AccessConstants; import org.apache.sentry.provider.db.service.model.MSentryPrivilege; @@ -30,10 +30,10 @@ public class TestSentryStoreToAuthorizable { @Test public void testServer() { - privilege = new MSentryPrivilege(null, null, "server1", null, null, null, null, null); + privilege = new MSentryPrivilege(null, "server1", null, null, null, null, null); assertEquals("server=server1", SentryStore.toAuthorizable(privilege)); - privilege = new MSentryPrivilege(null, null, "server1", null, null, null, null, + privilege = new MSentryPrivilege(null, "server1", null, null, null, null, AccessConstants.ALL); assertEquals("server=server1", SentryStore.toAuthorizable(privilege)); @@ -41,18 +41,18 @@ public void testServer() { @Test public void testTable() { - privilege = new MSentryPrivilege(null, null, "server1", "db1", "tbl1", null, null, null); + privilege = new MSentryPrivilege(null, "server1", "db1", "tbl1", null, null, null); assertEquals("server=server1->db=db1->table=tbl1", SentryStore.toAuthorizable(privilege)); - privilege = new MSentryPrivilege(null, null, "server1", "db1", "tbl1", null, null, + privilege = new MSentryPrivilege(null, "server1", "db1", "tbl1", null, null, AccessConstants.INSERT); assertEquals("server=server1->db=db1->table=tbl1->action=insert", SentryStore.toAuthorizable(privilege)); - privilege = new MSentryPrivilege(null, null, "server1", "db1", "tbl1", null, null, + privilege = new MSentryPrivilege(null, "server1", "db1", "tbl1", null, null, AccessConstants.SELECT); assertEquals("server=server1->db=db1->table=tbl1->action=select", SentryStore.toAuthorizable(privilege)); - privilege = new MSentryPrivilege(null, null, "server1", "db1", "tbl1", null, null, + privilege = new MSentryPrivilege(null, "server1", "db1", "tbl1", null, null, AccessConstants.ALL); assertEquals("server=server1->db=db1->table=tbl1", SentryStore.toAuthorizable(privilege)); @@ -60,10 +60,10 @@ public void testTable() { @Test public void testDb() { - privilege = new MSentryPrivilege(null, null, "server1", "db1", null, null, null, null); + privilege = new MSentryPrivilege(null, "server1", "db1", null, null, null, null); assertEquals("server=server1->db=db1", SentryStore.toAuthorizable(privilege)); - privilege = new MSentryPrivilege(null, null, "server1", "db1", null, null, null, + privilege = new MSentryPrivilege(null, "server1", "db1", null, null, null, AccessConstants.ALL); assertEquals("server=server1->db=db1", SentryStore.toAuthorizable(privilege)); @@ -71,14 +71,14 @@ public void testDb() { @Test public void testUri() { - privilege = new MSentryPrivilege(null, null, "server1", null, null, null, "file:///", null); + privilege = new MSentryPrivilege(null, "server1", null, null, null, "file:///", null); assertEquals("server=server1->uri=file:///", SentryStore.toAuthorizable(privilege)); - privilege = new MSentryPrivilege(null, null, "server1", null, null, null, "file:///", + privilege = new MSentryPrivilege(null, "server1", null, null, null, "file:///", AccessConstants.SELECT); assertEquals("server=server1->uri=file:///->action=select", SentryStore.toAuthorizable(privilege)); - privilege = new MSentryPrivilege(null, null, "server1", null, null, null, "file:///", + privilege = new MSentryPrivilege(null, "server1", null, null, null, "file:///", AccessConstants.ALL); assertEquals("server=server1->uri=file:///", SentryStore.toAuthorizable(privilege)); diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/persistent/TestSentryVersion.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/persistent/TestSentryVersion.java index 0add58b37..3e2f64fd2 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/persistent/TestSentryVersion.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/persistent/TestSentryVersion.java @@ -18,7 +18,7 @@ package org.apache.sentry.provider.db.service.persistent; -import static junit.framework.Assert.assertEquals; +import static org.junit.Assert.assertEquals; import java.io.File; @@ -41,6 +41,7 @@ public void setup() throws Exception { conf = new Configuration(false); conf.set(ServerConfig.SENTRY_STORE_JDBC_URL, "jdbc:derby:;databaseName=" + dataDir.getPath() + ";create=true"); + conf.set(ServerConfig.SENTRY_STORE_JDBC_PASS, "dummy"); } /** @@ -60,12 +61,12 @@ public void testVerifySentryVersionCheck() throws Exception { /** * Verify that store is not initialized by default without schema pre-created - * + * * @throws Exception */ @Test(expected = SentryNoSuchObjectException.class) public void testNegSentrySchemaDefault() throws Exception { - SentryStore sentryStore = new SentryStore(conf); + new SentryStore(conf); } /** diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestAuthorizingDDLAuditLogWithKerberos.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestAuthorizingDDLAuditLogWithKerberos.java new file mode 100644 index 000000000..426b2f7ab --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestAuthorizingDDLAuditLogWithKerberos.java @@ -0,0 +1,295 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.provider.db.service.thrift; + +import static org.hamcrest.core.Is.is; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.apache.sentry.provider.db.log.appender.AuditLoggerTestAppender; +import org.apache.sentry.provider.db.log.util.CommandUtil; +import org.apache.sentry.provider.db.log.util.Constants; +import org.apache.sentry.service.thrift.SentryServiceIntegrationBase; +import org.codehaus.jettison.json.JSONObject; +import org.junit.BeforeClass; +import org.junit.Test; + +import com.google.common.collect.Sets; + +public class TestAuthorizingDDLAuditLogWithKerberos extends SentryServiceIntegrationBase { + + @BeforeClass + public static void setupLog4j() throws Exception { + Logger logger = Logger.getLogger("sentry.hive.authorization.ddl.logger"); + AuditLoggerTestAppender testAppender = new AuditLoggerTestAppender(); + logger.addAppender(testAppender); + logger.setLevel(Level.INFO); + } + + @Test + public void testBasic() throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + String requestorUserName = ADMIN_USER; + Set requestorUserGroupNames = Sets.newHashSet(ADMIN_GROUP); + setLocalGroupMapping(requestorUserName, requestorUserGroupNames); + writePolicyFile(); + + String roleName = "testRole"; + String errorRoleName = "errorRole"; + String serverName = "server1"; + String groupName = "testGroup"; + String dbName = "dbTest"; + String tableName = "tableTest"; + Map fieldValueMap = new HashMap(); + + // for successful audit log + client.createRole(requestorUserName, roleName); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_CREATE_ROLE); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "CREATE ROLE " + roleName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.TRUE); + // for ip address, there is another logic to test the result + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + + client.grantRoleToGroup(requestorUserName, groupName, roleName); + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_ADD_ROLE); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "GRANT ROLE " + roleName + + " TO GROUP " + groupName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.TRUE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + + client.grantDatabasePrivilege(requestorUserName, roleName, serverName, dbName, "ALL"); + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_GRANT_PRIVILEGE); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "GRANT ALL ON DATABASE " + dbName + + " TO ROLE " + roleName); + fieldValueMap.put(Constants.LOG_FIELD_DATABASE_NAME, dbName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.TRUE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + + client.grantTablePrivilege(requestorUserName, roleName, serverName, dbName, tableName, + "SELECT", true); + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_GRANT_PRIVILEGE); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "GRANT SELECT ON TABLE " + tableName + + " TO ROLE " + roleName + " WITH GRANT OPTION"); + fieldValueMap.put(Constants.LOG_FIELD_TABLE_NAME, tableName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.TRUE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + + // for error audit log + try { + client.createRole(requestorUserName, roleName); + fail("Exception should have been thrown"); + } catch (Exception e) { + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_CREATE_ROLE); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "CREATE ROLE " + roleName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.FALSE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + } + try { + client.grantRoleToGroup(requestorUserName, groupName, errorRoleName); + fail("Exception should have been thrown"); + } catch (Exception e) { + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_ADD_ROLE); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "GRANT ROLE " + errorRoleName + + " TO GROUP " + groupName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.FALSE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + } + try { + client + .grantDatabasePrivilege(requestorUserName, errorRoleName, serverName, dbName, "ALL"); + fail("Exception should have been thrown"); + } catch (Exception e) { + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_GRANT_PRIVILEGE); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "GRANT ALL ON DATABASE " + dbName + + " TO ROLE " + errorRoleName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.FALSE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + } + try { + client.grantDatabasePrivilege(requestorUserName, errorRoleName, serverName, dbName, + "INSERT"); + fail("Exception should have been thrown"); + } catch (Exception e) { + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_GRANT_PRIVILEGE); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "GRANT INSERT ON DATABASE " + + dbName + " TO ROLE " + errorRoleName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.FALSE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + } + try { + client.grantDatabasePrivilege(requestorUserName, errorRoleName, serverName, dbName, + "SELECT"); + fail("Exception should have been thrown"); + } catch (Exception e) { + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_GRANT_PRIVILEGE); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "GRANT SELECT ON DATABASE " + + dbName + " TO ROLE " + errorRoleName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.FALSE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + } + try { + client.grantTablePrivilege(requestorUserName, errorRoleName, serverName, dbName, + tableName, "SELECT"); + fail("Exception should have been thrown"); + } catch (Exception e) { + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_GRANT_PRIVILEGE); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "GRANT SELECT ON TABLE " + + tableName + " TO ROLE " + errorRoleName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.FALSE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + } + + client.revokeTablePrivilege(requestorUserName, roleName, serverName, dbName, tableName, + "SELECT"); + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_REVOKE_PRIVILEGE); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "REVOKE SELECT ON TABLE " + tableName + + " FROM ROLE " + roleName); + fieldValueMap.put(Constants.LOG_FIELD_TABLE_NAME, tableName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.TRUE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + + client.revokeDatabasePrivilege(requestorUserName, roleName, serverName, dbName, "ALL"); + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_REVOKE_PRIVILEGE); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "REVOKE ALL ON DATABASE " + dbName + + " FROM ROLE " + roleName); + fieldValueMap.put(Constants.LOG_FIELD_DATABASE_NAME, dbName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.TRUE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + + client.revokeRoleFromGroup(requestorUserName, groupName, roleName); + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_DELETE_ROLE); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "REVOKE ROLE " + roleName + + " FROM GROUP " + groupName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.TRUE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + + client.dropRole(requestorUserName, roleName); + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_DROP_ROLE); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "DROP ROLE " + roleName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.TRUE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + + // for error audit log + try { + client.revokeTablePrivilege(requestorUserName, errorRoleName, serverName, dbName, + tableName, "SELECT"); + fail("Exception should have been thrown"); + } catch (Exception e) { + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_REVOKE_PRIVILEGE); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "REVOKE SELECT ON TABLE " + + tableName + " FROM ROLE " + errorRoleName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.FALSE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + } + + try { + client.revokeDatabasePrivilege(requestorUserName, errorRoleName, serverName, dbName, + "ALL"); + fail("Exception should have been thrown"); + } catch (Exception e) { + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_REVOKE_PRIVILEGE); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "REVOKE ALL ON DATABASE " + dbName + + " FROM ROLE " + errorRoleName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.FALSE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + } + + try { + client.revokeRoleFromGroup(requestorUserName, groupName, errorRoleName); + fail("Exception should have been thrown"); + } catch (Exception e) { + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_DELETE_ROLE); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "REVOKE ROLE " + errorRoleName + + " FROM GROUP " + groupName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.FALSE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + } + + try { + client.dropRole(requestorUserName, errorRoleName); + fail("Exception should have been thrown"); + } catch (Exception e) { + fieldValueMap.clear(); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_DROP_ROLE); + fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "DROP ROLE " + errorRoleName); + fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.FALSE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); + } + } + }); + } + + private void assertAuditLog(Map fieldValueMap) throws Exception { + assertThat(AuditLoggerTestAppender.getLastLogLevel(), is(Level.INFO)); + JSONObject jsonObject = new JSONObject(AuditLoggerTestAppender.getLastLogEvent()); + if (fieldValueMap != null) { + for (Map.Entry entry : fieldValueMap.entrySet()) { + String entryKey = entry.getKey(); + if (Constants.LOG_FIELD_IP_ADDRESS.equals(entryKey)) { + assertTrue(CommandUtil.assertIPInAuditLog(jsonObject.get(entryKey).toString())); + } else { + assertTrue(entry.getValue().equalsIgnoreCase(jsonObject.get(entryKey).toString())); + } + } + } + } +} diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryPolicyStoreProcessor.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryPolicyStoreProcessor.java index ea4e9678b..28c2971bb 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryPolicyStoreProcessor.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryPolicyStoreProcessor.java @@ -17,11 +17,12 @@ */ package org.apache.sentry.provider.db.service.thrift; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.hadoop.conf.Configuration; +import org.apache.sentry.provider.db.SentryThriftAPIMismatchException; import org.apache.sentry.provider.db.service.thrift.PolicyStoreConstants.PolicyStoreServerConfig; -import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; +import org.apache.sentry.service.thrift.ServiceConstants; import org.junit.Before; import org.junit.Test; @@ -68,4 +69,12 @@ public NoopNotificationHandler(Configuration config) throws Exception { super(config); } } + @Test(expected=SentryThriftAPIMismatchException.class) + public void testSentryThriftAPIMismatch() throws Exception { + SentryPolicyStoreProcessor.validateClientVersion(ServiceConstants.ThriftConstants.TSENTRY_SERVICE_VERSION_CURRENT -1); + } + @Test + public void testSentryThriftAPIMatchVersion() throws Exception { + SentryPolicyStoreProcessor.validateClientVersion(ServiceConstants.ThriftConstants.TSENTRY_SERVICE_VERSION_CURRENT); + } } diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServerForHaWithoutKerberos.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServerForHaWithoutKerberos.java index e02e0bad7..ac4df7718 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServerForHaWithoutKerberos.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServerForHaWithoutKerberos.java @@ -17,13 +17,11 @@ */ package org.apache.sentry.provider.db.service.thrift; -import static junit.framework.Assert.assertEquals; +import static org.junit.Assert.assertEquals; import java.util.HashSet; import java.util.Set; -import junit.framework.Assert; - import org.apache.sentry.core.common.ActiveRoleSet; import org.apache.sentry.core.model.db.AccessConstants; import org.apache.sentry.core.model.db.Database; @@ -107,18 +105,18 @@ public void testQueryPushDown() throws Exception { assertEquals("Privilege not assigned to role2 !!", 1, listPrivilegesByRoleName.size()); Set listPrivilegesForProvider = client.listPrivilegesForProvider(Sets.newHashSet(group1, group2), ActiveRoleSet.ALL, new Server("server"), new Database("db2")); - Assert.assertEquals("Privilege not correctly assigned to roles !!", + assertEquals("Privilege not correctly assigned to roles !!", Sets.newHashSet("server=server->db=db2->table=table4->action=all", "server=server->db=db2->table=table3->action=all"), listPrivilegesForProvider); listPrivilegesForProvider = client.listPrivilegesForProvider(Sets.newHashSet(group1, group2), ActiveRoleSet.ALL, new Server("server"), new Database("db3")); - Assert.assertEquals("Privilege not correctly assigned to roles !!", Sets.newHashSet("server=server->db=db3->table=table5->action=all"), listPrivilegesForProvider); + assertEquals("Privilege not correctly assigned to roles !!", Sets.newHashSet("server=server->db=db3->table=table5->action=all"), listPrivilegesForProvider); listPrivilegesForProvider = client.listPrivilegesForProvider(Sets.newHashSet(group1, group2), new ActiveRoleSet(Sets.newHashSet(roleName1)), new Server("server"), new Database("db3")); - Assert.assertEquals("Privilege not correctly assigned to roles !!", Sets.newHashSet("server=+"), listPrivilegesForProvider); + assertEquals("Privilege not correctly assigned to roles !!", Sets.newHashSet("server=+"), listPrivilegesForProvider); listPrivilegesForProvider = client.listPrivilegesForProvider(Sets.newHashSet(group1, group2), new ActiveRoleSet(Sets.newHashSet(roleName1)), new Server("server1")); - Assert.assertEquals("Privilege not correctly assigned to roles !!", new HashSet(), listPrivilegesForProvider); + assertEquals("Privilege not correctly assigned to roles !!", new HashSet(), listPrivilegesForProvider); } @@ -178,4 +176,4 @@ public void testHostSubstitution() throws Exception { // We just need to ensure that we are able to correct connect to the server connectToSentryService(); } -} \ No newline at end of file +} diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServerForPoolHAWithoutKerberos.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServerForPoolHAWithoutKerberos.java new file mode 100644 index 000000000..9ba7d23ce --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServerForPoolHAWithoutKerberos.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless createRequired by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.provider.db.service.thrift; + +import org.junit.BeforeClass; + +public class TestSentryServerForPoolHAWithoutKerberos extends TestSentryServerForHaWithoutKerberos { + + @BeforeClass + public static void setup() throws Exception { + kerberos = false; + haEnabled = true; + pooled = true; + beforeSetup(); + setupConf(); + startSentryService(); + afterSetup(); + } + +} \ No newline at end of file diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServerForPoolWithoutKerberos.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServerForPoolWithoutKerberos.java new file mode 100644 index 000000000..62fbb2f47 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServerForPoolWithoutKerberos.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless createRequired by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.provider.db.service.thrift; + +import org.junit.BeforeClass; + +public class TestSentryServerForPoolWithoutKerberos extends TestSentryServerWithoutKerberos { + + @BeforeClass + public static void setup() throws Exception { + kerberos = false; + haEnabled = false; + pooled = true; + beforeSetup(); + setupConf(); + startSentryService(); + afterSetup(); + } + +} \ No newline at end of file diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServerWithoutKerberos.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServerWithoutKerberos.java index 5afc5b69c..0792eb6bf 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServerWithoutKerberos.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServerWithoutKerberos.java @@ -17,14 +17,12 @@ */ package org.apache.sentry.provider.db.service.thrift; -import static junit.framework.Assert.assertEquals; +import static org.junit.Assert.assertEquals; import java.util.ArrayList; import java.util.HashSet; import java.util.Set; -import junit.framework.Assert; - import org.apache.sentry.core.common.ActiveRoleSet; import org.apache.sentry.core.common.Authorizable; import org.apache.sentry.core.model.db.AccessConstants; @@ -113,18 +111,18 @@ public void testQueryPushDown() throws Exception { assertEquals("Privilege not assigned to role2 !!", 1, listPrivilegesByRoleName.size()); Set listPrivilegesForProvider = client.listPrivilegesForProvider(Sets.newHashSet(group1, group2), ActiveRoleSet.ALL, new Server("server"), new Database("db2")); - Assert.assertEquals("Privilege not correctly assigned to roles !!", + assertEquals("Privilege not correctly assigned to roles !!", Sets.newHashSet("server=server->db=db2->table=table4->action=all", "server=server->db=db2->table=table3->action=all"), listPrivilegesForProvider); listPrivilegesForProvider = client.listPrivilegesForProvider(Sets.newHashSet(group1, group2), ActiveRoleSet.ALL, new Server("server"), new Database("db3")); - Assert.assertEquals("Privilege not correctly assigned to roles !!", Sets.newHashSet("server=server->db=db3->table=table5->action=all"), listPrivilegesForProvider); + assertEquals("Privilege not correctly assigned to roles !!", Sets.newHashSet("server=server->db=db3->table=table5->action=all"), listPrivilegesForProvider); listPrivilegesForProvider = client.listPrivilegesForProvider(Sets.newHashSet(group1, group2), new ActiveRoleSet(Sets.newHashSet(roleName1)), new Server("server"), new Database("db3")); - Assert.assertEquals("Privilege not correctly assigned to roles !!", Sets.newHashSet("server=+"), listPrivilegesForProvider); + assertEquals("Privilege not correctly assigned to roles !!", Sets.newHashSet("server=+"), listPrivilegesForProvider); listPrivilegesForProvider = client.listPrivilegesForProvider(Sets.newHashSet(group1, group2), new ActiveRoleSet(Sets.newHashSet(roleName1)), new Server("server1")); - Assert.assertEquals("Privilege not correctly assigned to roles !!", new HashSet(), listPrivilegesForProvider); + assertEquals("Privilege not correctly assigned to roles !!", new HashSet(), listPrivilegesForProvider); } @@ -174,4 +172,4 @@ public void testDropRole() throws Exception { ActiveRoleSet.ALL).size()); } -} \ No newline at end of file +} diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceClientPool.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceClientPool.java new file mode 100644 index 000000000..e5285bd0a --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceClientPool.java @@ -0,0 +1,113 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.provider.db.service.thrift; + +import static org.junit.Assert.assertTrue; + +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; + +import javax.security.auth.Subject; + +import org.apache.sentry.SentryUserException; +import org.apache.sentry.service.thrift.SentryServiceFactory; +import org.apache.sentry.service.thrift.SentryServiceIntegrationBase; +import org.junit.Test; + +import com.google.common.collect.Sets; + +public class TestSentryServiceClientPool extends SentryServiceIntegrationBase { + + @Test + public void testConnectionWhenReconnect() throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + String requestorUserName = ADMIN_USER; + Set requestorUserGroupNames = Sets.newHashSet(ADMIN_GROUP); + String roleName = "admin_r"; + setLocalGroupMapping(requestorUserName, requestorUserGroupNames); + writePolicyFile(); + + client.dropRoleIfExists(requestorUserName, roleName); + client.createRole(requestorUserName, roleName); + client.listRoles(requestorUserName); + stopSentryService(); + server = new SentryServiceFactory().create(conf); + startSentryService(); + client.listRoles(requestorUserName); + client.dropRole(requestorUserName, roleName); + } + }); + } + + @Test + public void testConnectionWithMultipleRetries() throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + List> tasks = new ArrayList>(); + String requestorUserName = ADMIN_USER; + Set requestorUserGroupNames = Sets.newHashSet(ADMIN_GROUP); + String roleName = "admin_r"; + setLocalGroupMapping(requestorUserName, requestorUserGroupNames); + writePolicyFile(); + + client.dropRoleIfExists(requestorUserName, roleName); + client.createRole(requestorUserName, roleName); + + ExecutorService executorService = Executors.newFixedThreadPool(20); + + Callable func = new Callable() { + public Boolean call() throws Exception { + return Subject.doAs(clientSubject, new PrivilegedExceptionAction() { + @Override + public Boolean run() throws Exception { + try { + client.listRoles(ADMIN_USER); + return true; + } catch (SentryUserException sue) { + return false; + } + } + }); + } + }; + + for (int i = 0; i < 30; i++) { + FutureTask task = new FutureTask(func); + tasks.add(task); + executorService.submit(task); + } + + for (Future task : tasks) { + Boolean result = task.get(); + assertTrue("Some tasks are failed.", result); + } + } + }); + } +} \ No newline at end of file diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceFailureCase.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceFailureCase.java index 2fd34bdaa..a453ff323 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceFailureCase.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceFailureCase.java @@ -20,7 +20,6 @@ import java.security.PrivilegedActionException; -import org.apache.sentry.SentryUserException; import org.apache.sentry.service.thrift.SentryServiceIntegrationBase; import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; import org.junit.After; @@ -54,7 +53,7 @@ public void before() throws Exception { @Override @After - public void after() throws SentryUserException { + public void after() { } @Test diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceForHAWithKerberos.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceForHAWithKerberos.java index cfe09b5b7..813b30b52 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceForHAWithKerberos.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceForHAWithKerberos.java @@ -18,13 +18,18 @@ package org.apache.sentry.provider.db.service.thrift; -import org.apache.sentry.SentryUserException; +import java.io.File; +import java.util.Set; + +import org.apache.sentry.provider.file.PolicyFile; import org.apache.sentry.service.thrift.SentryServiceIntegrationBase; -import org.junit.After; +import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; +import com.google.common.collect.Sets; + /** * Test various kerberos related stuff on the SentryService side */ @@ -44,21 +49,27 @@ public static void setup() throws Exception { @Override @Before public void before() throws Exception { + policyFilePath = new File(dbDir, "local_policy_file.ini"); + conf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING_RESOURCE, + policyFilePath.getPath()); + policyFile = new PolicyFile(); + connectToSentryService(); } - @Override - @After - public void after() throws SentryUserException { - } - - /** - * Test that we are correctly substituting "_HOST" if/when needed. - * - * @throws Exception - */ @Test - public void testHostSubstitution() throws Exception { - // We just need to ensure that we are able to correct connect to the server - connectToSentryService(); + public void testCreateRole() throws Exception { + runTestAsSubject(new TestOperation(){ + @Override + public void runTestAsSubject() throws Exception { + String requestorUserName = ADMIN_USER; + Set requestorUserGroupNames = Sets.newHashSet(ADMIN_GROUP); + setLocalGroupMapping(requestorUserName, requestorUserGroupNames); + writePolicyFile(); + String roleName = "admin_r"; + client.dropRoleIfExists(requestorUserName, roleName); + client.createRole(requestorUserName, roleName); + client.dropRole(requestorUserName, roleName); + } + }); } } diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceForPoolHAWithKerberos.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceForPoolHAWithKerberos.java new file mode 100644 index 000000000..acb906fc8 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceForPoolHAWithKerberos.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless createRequired by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.provider.db.service.thrift; + +import org.junit.BeforeClass; + +public class TestSentryServiceForPoolHAWithKerberos extends TestSentryServiceWithKerberos { + + @BeforeClass + public static void setup() throws Exception { + kerberos = true; + haEnabled = true; + pooled = true; + beforeSetup(); + setupConf(); + startSentryService(); + afterSetup(); + } + +} \ No newline at end of file diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceForPoolWithKerberos.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceForPoolWithKerberos.java new file mode 100644 index 000000000..bd3c1ccba --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceForPoolWithKerberos.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless createRequired by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.provider.db.service.thrift; + +import org.junit.BeforeClass; + +public class TestSentryServiceForPoolWithKerberos extends TestSentryServiceWithKerberos { + + @BeforeClass + public static void setup() throws Exception { + kerberos = true; + haEnabled = false; + pooled = true; + beforeSetup(); + setupConf(); + startSentryService(); + afterSetup(); + } + +} \ No newline at end of file diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceImportExport.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceImportExport.java new file mode 100644 index 000000000..dbe4a27a0 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceImportExport.java @@ -0,0 +1,538 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.provider.db.service.thrift; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.Map; +import java.util.Set; + +import org.apache.sentry.policy.common.PolicyConstants; +import org.apache.sentry.provider.common.PolicyFileConstants; +import org.apache.sentry.service.thrift.SentryServiceIntegrationBase; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; + +public class TestSentryServiceImportExport extends SentryServiceIntegrationBase { + + // define the privileges + public static String PRIVILIEGE1 = "server=server1"; + public static String PRIVILIEGE2 = "server=server1->action=select->grantoption=false"; + public static String PRIVILIEGE3 = "server=server1->db=db2->action=insert->grantoption=true"; + public static String PRIVILIEGE4 = "server=server1->db=db1->table=tbl1->action=insert"; + public static String PRIVILIEGE5 = "server=server1->db=db1->table=tbl2->column=col1->action=insert"; + public static String PRIVILIEGE6 = "server=server1->db=db1->table=tbl3->column=col1->action=*->grantoption=true"; + public static String PRIVILIEGE7 = "server=server1->db=db1->table=tbl4->column=col1->action=all->grantoption=true"; + public static String PRIVILIEGE8 = "server=server1->uri=hdfs://testserver:9999/path2->action=insert"; + + @BeforeClass + public static void setup() throws Exception { + kerberos = false; + setupConf(); + startSentryService(); + } + + @Before + public void preparePolicyFile() throws Exception { + super.before(); + String requestorUserName = ADMIN_USER; + Set requestorUserGroupNames = Sets.newHashSet(ADMIN_GROUP); + setLocalGroupMapping(requestorUserName, requestorUserGroupNames); + writePolicyFile(); + } + + // Befor import, database is empty. + // The following information is imported: + // group1=role1,role2,role3 + // group2=role1,role2,role3 + // group3=role1,role2,role3 + // role1=privilege1,privilege2,privilege3,privilege4,privilege5,privilege6,privilege7,privilege8 + // role2=privilege1,privilege2,privilege3,privilege4,privilege5,privilege6,privilege7,privilege8 + // role3=privilege1,privilege2,privilege3,privilege4,privilege5,privilege6,privilege7,privilege8 + // Both import API importPolicy and export API exportPoicy are tested. + @Test + public void testImportExportPolicy1() throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + Map>> policyFileMappingData = Maps.newHashMap(); + Map> groupRolesMap = Maps.newHashMap(); + Set roles = Sets.newHashSet("role1", "role2", "role3"); + groupRolesMap.put("group1", roles); + groupRolesMap.put("group2", roles); + groupRolesMap.put("group3", roles); + Map> rolePrivilegesMap = Maps.newHashMap(); + for (String roleName : roles) { + rolePrivilegesMap.put(roleName, Sets.newHashSet(PRIVILIEGE1, PRIVILIEGE2, PRIVILIEGE3, + PRIVILIEGE4, PRIVILIEGE5, PRIVILIEGE6, PRIVILIEGE7, PRIVILIEGE8)); + } + policyFileMappingData.put(PolicyFileConstants.GROUPS, groupRolesMap); + policyFileMappingData.put(PolicyFileConstants.ROLES, rolePrivilegesMap); + client.importPolicy(policyFileMappingData, ADMIN_USER, false); + + Map>> sentryMappingData = client.exportPolicy(ADMIN_USER); + validateSentryMappingData(sentryMappingData, + policyFileMappingData); + } + }); + } + + // call import twice, and there has no duplicate data: + // The data for 1st import: + // group1=role1 + // role1=privilege1,privilege2,privilege3,privilege4 + // The data for 2nd import: + // group2=role2,role3 + // group3=role2,role3 + // role2=privilege5,privilege6,privilege7,privilege8 + // role3=privilege5,privilege6,privilege7,privilege8 + // Both import API importPolicy and export API exportPoicy are tested. + @Test + public void testImportExportPolicy2() throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + Map>> policyFileMappingData1 = Maps.newHashMap(); + Map> groupRolesMap1 = Maps.newHashMap(); + groupRolesMap1.put("group1", Sets.newHashSet("role1")); + Map> rolePrivilegesMap1 = Maps.newHashMap(); + rolePrivilegesMap1.put("role1", + Sets.newHashSet(PRIVILIEGE1, PRIVILIEGE2, PRIVILIEGE3, PRIVILIEGE4)); + policyFileMappingData1.put(PolicyFileConstants.GROUPS, groupRolesMap1); + policyFileMappingData1.put(PolicyFileConstants.ROLES, rolePrivilegesMap1); + client.importPolicy(policyFileMappingData1, ADMIN_USER, false); + + Map>> policyFileMappingData2 = Maps.newHashMap(); + Map> groupRolesMap2 = Maps.newHashMap(); + groupRolesMap2.put("group2", Sets.newHashSet("role2", "role3")); + groupRolesMap2.put("group3", Sets.newHashSet("role2", "role3")); + Map> rolePrivilegesMap2 = Maps.newHashMap(); + rolePrivilegesMap2.put("role2", + Sets.newHashSet(PRIVILIEGE5, PRIVILIEGE6, PRIVILIEGE7, PRIVILIEGE8)); + rolePrivilegesMap2.put("role3", + Sets.newHashSet(PRIVILIEGE5, PRIVILIEGE6, PRIVILIEGE7, PRIVILIEGE8)); + policyFileMappingData2.put(PolicyFileConstants.GROUPS, groupRolesMap2); + policyFileMappingData2.put(PolicyFileConstants.ROLES, rolePrivilegesMap2); + client.importPolicy(policyFileMappingData2, ADMIN_USER, false); + + Map>> exceptedMappingData = Maps.newHashMap(); + // for exceptedMappingData, combine policyFileMappingData1 and policyFileMappingData2 + exceptedMappingData.put(PolicyFileConstants.GROUPS, + policyFileMappingData1.get(PolicyFileConstants.GROUPS)); + exceptedMappingData.get(PolicyFileConstants.GROUPS).putAll( + policyFileMappingData2.get(PolicyFileConstants.GROUPS)); + exceptedMappingData.put(PolicyFileConstants.ROLES, + policyFileMappingData1.get(PolicyFileConstants.ROLES)); + exceptedMappingData.get(PolicyFileConstants.ROLES).putAll( + policyFileMappingData2.get(PolicyFileConstants.ROLES)); + + Map>> sentryMappingData = client.exportPolicy(ADMIN_USER); + validateSentryMappingData(sentryMappingData, exceptedMappingData); + } + }); + } + + // Call import twice, and there has overlapping groups + // The data for 1st import: + // group1=role1, role2 + // group2=role1, role2 + // group3=role1, role2 + // role1=privilege1,privilege2,privilege3,privilege4,privilege5 + // role2=privilege1,privilege2,privilege3,privilege4,privilege5 + // The data for 2nd import: + // group1=role2,role3 + // group2=role2,role3 + // group3=role2,role3 + // role2=privilege4,privilege5,privilege6,privilege7,privilege8 + // role3=privilege4,privilege5,privilege6,privilege7,privilege8 + // Both import API importPolicy and export API exportPoicy are tested. + @Test + public void testImportExportPolicy3() throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + Map>> policyFileMappingData1 = Maps.newHashMap(); + Map> groupRolesMap1 = Maps.newHashMap(); + groupRolesMap1.put("group1", Sets.newHashSet("role1", "role2")); + groupRolesMap1.put("group2", Sets.newHashSet("role1", "role2")); + groupRolesMap1.put("group3", Sets.newHashSet("role1", "role2")); + Map> rolePrivilegesMap1 = Maps.newHashMap(); + rolePrivilegesMap1.put("role1", + Sets.newHashSet(PRIVILIEGE1, PRIVILIEGE2, PRIVILIEGE3, PRIVILIEGE4, PRIVILIEGE5)); + rolePrivilegesMap1.put("role2", + Sets.newHashSet(PRIVILIEGE1, PRIVILIEGE2, PRIVILIEGE3, PRIVILIEGE4, PRIVILIEGE5)); + policyFileMappingData1.put(PolicyFileConstants.GROUPS, groupRolesMap1); + policyFileMappingData1.put(PolicyFileConstants.ROLES, rolePrivilegesMap1); + client.importPolicy(policyFileMappingData1, ADMIN_USER, false); + + Map>> policyFileMappingData2 = Maps.newHashMap(); + Map> groupRolesMap2 = Maps.newHashMap(); + groupRolesMap2.put("group1", Sets.newHashSet("role2", "role3")); + groupRolesMap2.put("group2", Sets.newHashSet("role2", "role3")); + groupRolesMap2.put("group3", Sets.newHashSet("role2", "role3")); + Map> rolePrivilegesMap2 = Maps.newHashMap(); + rolePrivilegesMap2.put("role2", + Sets.newHashSet(PRIVILIEGE4, PRIVILIEGE5, PRIVILIEGE6, PRIVILIEGE7, PRIVILIEGE8)); + rolePrivilegesMap2.put("role3", + Sets.newHashSet(PRIVILIEGE4, PRIVILIEGE5, PRIVILIEGE6, PRIVILIEGE7, PRIVILIEGE8)); + policyFileMappingData2.put(PolicyFileConstants.GROUPS, groupRolesMap2); + policyFileMappingData2.put(PolicyFileConstants.ROLES, rolePrivilegesMap2); + client.importPolicy(policyFileMappingData2, ADMIN_USER, false); + + Map>> exceptedMappingData = Maps.newHashMap(); + Map> exceptedRolesMap = Maps.newHashMap(); + exceptedRolesMap.put("group1", Sets.newHashSet("role1", "role2", "role3")); + exceptedRolesMap.put("group2", Sets.newHashSet("role1", "role2", "role3")); + exceptedRolesMap.put("group3", Sets.newHashSet("role1", "role2", "role3")); + Map> exceptedPrivilegesMap = Maps.newHashMap(); + exceptedPrivilegesMap.put("role1", + Sets.newHashSet(PRIVILIEGE1, PRIVILIEGE2, PRIVILIEGE3, PRIVILIEGE4, PRIVILIEGE5)); + exceptedPrivilegesMap.put("role2", Sets.newHashSet(PRIVILIEGE1, PRIVILIEGE2, PRIVILIEGE3, + PRIVILIEGE4, PRIVILIEGE5, PRIVILIEGE6, PRIVILIEGE7, PRIVILIEGE8)); + exceptedPrivilegesMap.put("role3", + Sets.newHashSet(PRIVILIEGE4, PRIVILIEGE5, PRIVILIEGE6, PRIVILIEGE7, PRIVILIEGE8)); + exceptedMappingData.put(PolicyFileConstants.GROUPS, exceptedRolesMap); + exceptedMappingData.put(PolicyFileConstants.ROLES, exceptedPrivilegesMap); + + Map>> sentryMappingData = client.exportPolicy(ADMIN_USER); + validateSentryMappingData(sentryMappingData, exceptedMappingData); + } + }); + } + + // Only mapping data for [group,role] is imported: + // group1=role1,role2 + @Test + public void testImportExportPolicy4() throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + Map>> policyFileMappingData = Maps.newHashMap(); + Map> groupRolesMap = Maps.newHashMap(); + Set roles = Sets.newHashSet("role1", "role2"); + groupRolesMap.put("group1", roles); + Map> rolePrivilegesMap = Maps.newHashMap(); + policyFileMappingData.put(PolicyFileConstants.GROUPS, groupRolesMap); + policyFileMappingData.put(PolicyFileConstants.ROLES, rolePrivilegesMap); + client.importPolicy(policyFileMappingData, ADMIN_USER, false); + + Map>> sentryMappingData = client.exportPolicy(ADMIN_USER); + validateSentryMappingData(sentryMappingData, + policyFileMappingData); + } + }); + } + + // call import twice, and there has no duplicate data, the import will be with the overwrite mode: + // The data for 1st import: + // group1=role1 + // role1=privilege1 + // The data for 2nd import: + // group2=role2,role3 + // group3=role2,role3 + // role2=privilege2 + // role3=privilege2 + // Both import API importSentryMetaData and export APIs getRolesMap, getGroupsMap, + // getPrivilegesList are tested. + @Test + public void testImportExportPolicy5() throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + Map>> policyFileMappingData1 = Maps.newHashMap(); + Map> groupRolesMap1 = Maps.newHashMap(); + groupRolesMap1.put("group1", Sets.newHashSet("role1")); + Map> rolePrivilegesMap1 = Maps.newHashMap(); + rolePrivilegesMap1.put("role1", Sets.newHashSet(PRIVILIEGE1)); + policyFileMappingData1.put(PolicyFileConstants.GROUPS, groupRolesMap1); + policyFileMappingData1.put(PolicyFileConstants.ROLES, rolePrivilegesMap1); + client.importPolicy(policyFileMappingData1, ADMIN_USER, true); + + Map>> policyFileMappingData2 = Maps.newHashMap(); + Map> groupRolesMap2 = Maps.newHashMap(); + groupRolesMap2.put("group2", Sets.newHashSet("role2", "role3")); + groupRolesMap2.put("group3", Sets.newHashSet("role2", "role3")); + Map> rolePrivilegesMap2 = Maps.newHashMap(); + rolePrivilegesMap2.put("role2", Sets.newHashSet(PRIVILIEGE2)); + rolePrivilegesMap2.put("role3", Sets.newHashSet(PRIVILIEGE2)); + policyFileMappingData2.put(PolicyFileConstants.GROUPS, groupRolesMap2); + policyFileMappingData2.put(PolicyFileConstants.ROLES, rolePrivilegesMap2); + client.importPolicy(policyFileMappingData2, ADMIN_USER, true); + + Map>> exceptedMappingData = Maps.newHashMap(); + Map> exceptedRolesMap = Maps.newHashMap(); + exceptedRolesMap.put("group1", Sets.newHashSet("role1")); + exceptedRolesMap.put("group2", Sets.newHashSet("role2", "role3")); + exceptedRolesMap.put("group3", Sets.newHashSet("role2", "role3")); + Map> exceptedPrivilegesMap = Maps.newHashMap(); + exceptedPrivilegesMap.put("role1", Sets.newHashSet(PRIVILIEGE1)); + exceptedPrivilegesMap.put("role2", Sets.newHashSet(PRIVILIEGE2)); + exceptedPrivilegesMap.put("role3", Sets.newHashSet(PRIVILIEGE2)); + exceptedMappingData.put(PolicyFileConstants.GROUPS, exceptedRolesMap); + exceptedMappingData.put(PolicyFileConstants.ROLES, exceptedPrivilegesMap); + + Map>> sentryMappingData = client.exportPolicy(ADMIN_USER); + validateSentryMappingData(sentryMappingData, exceptedMappingData); + } + }); + } + + // call import twice, and there has data overlap, the import will be with the overwrite mode: + // The data for 1st import: + // group1=role1, role2 + // group2=role1, role2 + // group3=role1, role2 + // role1=privilege1,privilege2,privilege3,privilege4,privilege5 + // role2=privilege1,privilege2,privilege3,privilege4,privilege5 + // The data for 2nd import: + // group1=role2,role3 + // group2=role2,role3 + // group3=role2,role3 + // role2=privilege4,privilege5,privilege6,privilege7,privilege8 + // role3=privilege4,privilege5,privilege6,privilege7,privilege8 + // Both import API importSentryMetaData and export APIs getRolesMap, getGroupsMap, + // getPrivilegesList are tested. + @Test + public void testImportExportPolicy6() throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + Map>> policyFileMappingData1 = Maps.newHashMap(); + Map> groupRolesMap1 = Maps.newHashMap(); + groupRolesMap1.put("group1", Sets.newHashSet("role1", "role2")); + groupRolesMap1.put("group2", Sets.newHashSet("role1", "role2")); + groupRolesMap1.put("group3", Sets.newHashSet("role1", "role2")); + Map> rolePrivilegesMap1 = Maps.newHashMap(); + rolePrivilegesMap1.put("role1", + Sets.newHashSet(PRIVILIEGE1, PRIVILIEGE2, PRIVILIEGE3, PRIVILIEGE4, PRIVILIEGE5)); + rolePrivilegesMap1.put("role2", + Sets.newHashSet(PRIVILIEGE1, PRIVILIEGE2, PRIVILIEGE3, PRIVILIEGE4, PRIVILIEGE5)); + policyFileMappingData1.put(PolicyFileConstants.GROUPS, groupRolesMap1); + policyFileMappingData1.put(PolicyFileConstants.ROLES, rolePrivilegesMap1); + client.importPolicy(policyFileMappingData1, ADMIN_USER, true); + + Map>> policyFileMappingData2 = Maps.newHashMap(); + Map> groupRolesMap2 = Maps.newHashMap(); + groupRolesMap2.put("group1", Sets.newHashSet("role2", "role3")); + groupRolesMap2.put("group2", Sets.newHashSet("role2", "role3")); + groupRolesMap2.put("group3", Sets.newHashSet("role2", "role3")); + Map> rolePrivilegesMap2 = Maps.newHashMap(); + rolePrivilegesMap2.put("role2", + Sets.newHashSet(PRIVILIEGE4, PRIVILIEGE5, PRIVILIEGE6, PRIVILIEGE7, PRIVILIEGE8)); + rolePrivilegesMap2.put("role3", + Sets.newHashSet(PRIVILIEGE4, PRIVILIEGE5, PRIVILIEGE6, PRIVILIEGE7, PRIVILIEGE8)); + policyFileMappingData2.put(PolicyFileConstants.GROUPS, groupRolesMap2); + policyFileMappingData2.put(PolicyFileConstants.ROLES, rolePrivilegesMap2); + client.importPolicy(policyFileMappingData2, ADMIN_USER, true); + + Map>> exceptedMappingData = Maps.newHashMap(); + Map> exceptedRolesMap = Maps.newHashMap(); + exceptedRolesMap.put("group1", Sets.newHashSet("role1", "role2", "role3")); + exceptedRolesMap.put("group2", Sets.newHashSet("role1", "role2", "role3")); + exceptedRolesMap.put("group3", Sets.newHashSet("role1", "role2", "role3")); + Map> exceptedPrivilegesMap = Maps.newHashMap(); + exceptedPrivilegesMap.put("role1", + Sets.newHashSet(PRIVILIEGE1, PRIVILIEGE2, PRIVILIEGE3, PRIVILIEGE4, PRIVILIEGE5)); + exceptedPrivilegesMap.put("role2", + Sets.newHashSet(PRIVILIEGE4, PRIVILIEGE5, PRIVILIEGE6, PRIVILIEGE7, PRIVILIEGE8)); + exceptedPrivilegesMap.put("role3", + Sets.newHashSet(PRIVILIEGE4, PRIVILIEGE5, PRIVILIEGE6, PRIVILIEGE7, PRIVILIEGE8)); + exceptedMappingData.put(PolicyFileConstants.GROUPS, exceptedRolesMap); + exceptedMappingData.put(PolicyFileConstants.ROLES, exceptedPrivilegesMap); + + Map>> sentryMappingData = client.exportPolicy(ADMIN_USER); + validateSentryMappingData(sentryMappingData, exceptedMappingData); + } + }); + } + + // test the import privileges with the action: All, *, select, insert + // All and * should replace the select and insert + // The data for import: + // group1=role1, role2 + // role1=testPrivilege1,testPrivilege2,testPrivilege3,testPrivilege4 + // role2=testPrivilege5, testPrivilege6,testPrivilege7,testPrivilege8 + @Test + public void testImportExportPolicy7() throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + String testPrivilege1 = "server=server1->db=db1->table=tbl1->action=select->grantoption=true"; + String testPrivilege2 = "server=server1->db=db1->table=tbl1->action=insert->grantoption=false"; + String testPrivilege3 = "server=server1->db=db1->table=tbl1->action=all->grantoption=true"; + String testPrivilege4 = "server=server1->db=db1->table=tbl1->action=insert->grantoption=true"; + String testPrivilege5 = "server=server1->db=db1->table=tbl2->action=select->grantoption=true"; + String testPrivilege6 = "server=server1->db=db1->table=tbl2->action=insert->grantoption=false"; + String testPrivilege7 = "server=server1->db=db1->table=tbl2->action=*->grantoption=true"; + String testPrivilege8 = "server=server1->db=db1->table=tbl2->action=insert->grantoption=true"; + + Map>> policyFileMappingData1 = Maps.newHashMap(); + Map> groupRolesMap1 = Maps.newHashMap(); + groupRolesMap1.put("group1", Sets.newHashSet("role1", "role2")); + Map> rolePrivilegesMap1 = Maps.newHashMap(); + rolePrivilegesMap1.put("role1", + Sets.newHashSet(testPrivilege1, testPrivilege2, testPrivilege3, testPrivilege4)); + rolePrivilegesMap1.put("role2", + Sets.newHashSet(testPrivilege5, testPrivilege6, testPrivilege7, testPrivilege8)); + policyFileMappingData1.put(PolicyFileConstants.GROUPS, groupRolesMap1); + policyFileMappingData1.put(PolicyFileConstants.ROLES, rolePrivilegesMap1); + client.importPolicy(policyFileMappingData1, ADMIN_USER, true); + + Map>> exceptedMappingData = Maps.newHashMap(); + Map> exceptedRolesMap = Maps.newHashMap(); + exceptedRolesMap.put("group1", Sets.newHashSet("role1", "role2")); + Map> exceptedPrivilegesMap = Maps.newHashMap(); + exceptedPrivilegesMap.put("role1", Sets.newHashSet(testPrivilege2, testPrivilege3)); + exceptedPrivilegesMap.put("role2", Sets.newHashSet(testPrivilege6, testPrivilege7)); + exceptedMappingData.put(PolicyFileConstants.GROUPS, exceptedRolesMap); + exceptedMappingData.put(PolicyFileConstants.ROLES, exceptedPrivilegesMap); + + Map>> sentryMappingData = client.exportPolicy(ADMIN_USER); + validateSentryMappingData(sentryMappingData, exceptedMappingData); + } + }); + } + + // Call import twice, and there has overlapping actions, all and * should replace the select and + // insert + // The data for 1st import: + // group1=role1, role2 + // role1=privilege1(with select action),privilege2(with insert action) + // role2=privilege4(with select action),privilege5(with insert action) + // The data for 2nd import: + // group1=role1, role2 + // role1=privilege3(with all action) + // role2=privilege6(with * action) + @Test + public void testImportExportPolicy8() throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + String testPrivilege1 = "server=server1->db=db1->table=tbl1->action=select->grantoption=true"; + String testPrivilege2 = "server=server1->db=db1->table=tbl1->action=insert->grantoption=true"; + String testPrivilege3 = "server=server1->db=db1->table=tbl1->action=all->grantoption=true"; + String testPrivilege4 = "server=server1->db=db1->table=tbl2->action=select->grantoption=true"; + String testPrivilege5 = "server=server1->db=db1->table=tbl2->action=insert->grantoption=true"; + String testPrivilege6 = "server=server1->db=db1->table=tbl2->action=*->grantoption=true"; + + Map>> policyFileMappingData1 = Maps.newHashMap(); + Map> groupRolesMap1 = Maps.newHashMap(); + groupRolesMap1.put("group1", Sets.newHashSet("role1", "role2")); + Map> rolePrivilegesMap1 = Maps.newHashMap(); + rolePrivilegesMap1.put("role1", Sets.newHashSet(testPrivilege1, testPrivilege2)); + rolePrivilegesMap1.put("role2", Sets.newHashSet(testPrivilege4, testPrivilege5)); + policyFileMappingData1.put(PolicyFileConstants.GROUPS, groupRolesMap1); + policyFileMappingData1.put(PolicyFileConstants.ROLES, rolePrivilegesMap1); + client.importPolicy(policyFileMappingData1, ADMIN_USER, false); + + Map>> policyFileMappingData2 = Maps.newHashMap(); + Map> groupRolesMap2 = Maps.newHashMap(); + groupRolesMap2.put("group1", Sets.newHashSet("role1", "role2")); + Map> rolePrivilegesMap2 = Maps.newHashMap(); + rolePrivilegesMap2.put("role1", Sets.newHashSet(testPrivilege3)); + rolePrivilegesMap2.put("role2", Sets.newHashSet(testPrivilege6)); + policyFileMappingData2.put(PolicyFileConstants.GROUPS, groupRolesMap2); + policyFileMappingData2.put(PolicyFileConstants.ROLES, rolePrivilegesMap2); + client.importPolicy(policyFileMappingData2, ADMIN_USER, false); + + Map>> exceptedMappingData = policyFileMappingData2; + Map>> sentryMappingData = client.exportPolicy(ADMIN_USER); + // all and * should replace the select and insert + validateSentryMappingData(sentryMappingData, exceptedMappingData); + } + }); + } + + // test the user not in the admin group can't do the import/export + @Test + public void testImportExportPolicy9() throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + Map>> policyFileMappingData1 = Maps.newHashMap(); + Map> groupRolesMap1 = Maps.newHashMap(); + Map> rolePrivilegesMap1 = Maps.newHashMap(); + policyFileMappingData1.put(PolicyFileConstants.GROUPS, groupRolesMap1); + policyFileMappingData1.put(PolicyFileConstants.ROLES, rolePrivilegesMap1); + try { + client.importPolicy(policyFileMappingData1, "no-admin-user", false); + fail("non-admin can't do the import."); + } catch (Exception e) { + // excepted exception + } + + try { + client.exportPolicy("no-admin-user"); + fail("non-admin can't do the export."); + } catch (Exception e) { + // excepted exception + } + } + }); + } + + // verify the mapping data + public void validateSentryMappingData( + Map>> actualMappingData, + Map>> expectedMappingData) { + validateGroupRolesMap(actualMappingData.get(PolicyFileConstants.GROUPS), + expectedMappingData.get(PolicyFileConstants.GROUPS)); + validateRolePrivilegesMap(actualMappingData.get(PolicyFileConstants.ROLES), + expectedMappingData.get(PolicyFileConstants.ROLES)); + } + + // verify the mapping data for [group,role] + private void validateGroupRolesMap(Map> actualMap, + Map> expectedMap) { + assertEquals(expectedMap.keySet().size(), actualMap.keySet().size()); + for (String groupName : actualMap.keySet()) { + Set actualRoles = actualMap.get(groupName); + Set expectedRoles = expectedMap.get(groupName); + assertEquals(actualRoles.size(), expectedRoles.size()); + assertTrue(actualRoles.equals(expectedRoles)); + } + } + + // verify the mapping data for [role,privilege] + private void validateRolePrivilegesMap(Map> actualMap, + Map> expectedMap) { + assertEquals(expectedMap.keySet().size(), actualMap.keySet().size()); + for (String roleName : actualMap.keySet()) { + Set actualPrivileges = actualMap.get(roleName); + Set exceptedPrivileges = expectedMap.get(roleName); + assertEquals(exceptedPrivileges.size(), actualPrivileges.size()); + for (String actualPrivilege : actualPrivileges) { + boolean isFound = exceptedPrivileges.contains(actualPrivilege); + if (!isFound) { + String withOptionPrivilege = PolicyConstants.AUTHORIZABLE_JOINER.join(actualPrivilege, + PolicyConstants.KV_JOINER.join(PolicyFileConstants.PRIVILEGE_GRANT_OPTION_NAME, + "false")); + isFound = exceptedPrivileges.contains(withOptionPrivilege); + } + assertTrue(isFound); + } + } + } +} diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceIntegration.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceIntegration.java index 02c753513..07c7f7aa3 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceIntegration.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceIntegration.java @@ -18,7 +18,7 @@ package org.apache.sentry.provider.db.service.thrift; -import static junit.framework.Assert.assertEquals; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -820,4 +820,32 @@ public void runTestAsSubject() throws Exception { }}); } + + /* SENTRY-841 */ + @Test + public void testGranRevokePrivilegeOnServerForRole() throws Exception { + runTestAsSubject(new TestOperation(){ + @Override + public void runTestAsSubject() throws Exception { + String requestorUserName = ADMIN_USER; + Set requestorUserGroupNames = Sets.newHashSet(ADMIN_GROUP); + setLocalGroupMapping(requestorUserName, requestorUserGroupNames); + writePolicyFile(); + + String roleName1 = "admin_r1"; + + client.dropRoleIfExists(requestorUserName, roleName1); + client.createRole(requestorUserName, roleName1); + + client.grantServerPrivilege(requestorUserName, roleName1, "server", false); + + Set listPrivs = client.listAllPrivilegesByRoleName(requestorUserName, roleName1); + assertTrue("Privilege should be all:",listPrivs.iterator().next().getAction().equals("*")); + + client.revokeServerPrivilege(requestorUserName, roleName1, "server", false); + listPrivs = client.listAllPrivilegesByRoleName(requestorUserName, roleName1); + assertTrue("Privilege not correctly revoked !!", listPrivs.size() == 0); + + }}); + } } diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceWithInvalidMsgSize.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceWithInvalidMsgSize.java new file mode 100644 index 000000000..09f3d8ed8 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceWithInvalidMsgSize.java @@ -0,0 +1,119 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.provider.db.service.thrift; + +import com.google.common.collect.Sets; +import org.apache.hadoop.conf.Configuration; +import org.apache.sentry.SentryUserException; +import org.apache.sentry.service.thrift.SentryServiceClientFactory; +import org.apache.sentry.service.thrift.SentryServiceFactory; +import org.apache.sentry.service.thrift.SentryServiceIntegrationBase; +import org.apache.sentry.service.thrift.ServiceConstants; +import org.junit.Assert; +import org.junit.Test; + +import java.util.Set; + +/** + * Test sentry service with a larger message size than the server's or client's thrift max message size. + */ +public class TestSentryServiceWithInvalidMsgSize extends SentryServiceIntegrationBase { + private final Set REQUESTER_USER_GROUP_NAMES = Sets.newHashSet(ADMIN_GROUP); + private final String ROLE_NAME = "admin_r"; + + /** + * Test the case when the message size is larger than the client's thrift max message size. + */ + @Test + public void testClientWithSmallMaxMsgSize() throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + Configuration confWithSmallMaxMsgSize = new Configuration(conf); + confWithSmallMaxMsgSize.setLong(ServiceConstants.ClientConfig.SENTRY_POLICY_CLIENT_THRIFT_MAX_MESSAGE_SIZE, 20); + // create a client with a small thrift max message size + SentryPolicyServiceClient clientWithSmallMaxMsgSize = SentryServiceClientFactory.create(confWithSmallMaxMsgSize); + + setLocalGroupMapping(ADMIN_USER, REQUESTER_USER_GROUP_NAMES); + writePolicyFile(); + + boolean exceptionThrown = false; + try { + // client throws exception when message size is larger than the client's thrift max message size. + clientWithSmallMaxMsgSize.listRoles(ADMIN_USER); + } catch (SentryUserException e) { + exceptionThrown = true; + Assert.assertTrue(e.getMessage().contains("Thrift exception occurred")); + Assert.assertTrue(e.getCause().getMessage().contains("Length exceeded max allowed")); + } finally { + Assert.assertEquals(true, exceptionThrown); + clientWithSmallMaxMsgSize.close(); + } + + // client can still talk with sentry server when message size is smaller. + client.dropRoleIfExists(ADMIN_USER, ROLE_NAME); + client.listRoles(ADMIN_USER); + client.createRole(ADMIN_USER, ROLE_NAME); + client.listRoles(ADMIN_USER); + } + }); + } + + /** + * Test the case when the message size is larger than the server's thrift max message size. + */ + @Test + public void testServerWithSmallMaxMsgSize() throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + Configuration confWithSmallMaxMsgSize = new Configuration(conf); + confWithSmallMaxMsgSize.setLong(ServiceConstants.ServerConfig.SENTRY_POLICY_SERVER_THRIFT_MAX_MESSAGE_SIZE, + 50); + stopSentryService(); + + // create a server with a small max thrift message size + server = new SentryServiceFactory().create(confWithSmallMaxMsgSize); + startSentryService(); + + setLocalGroupMapping(ADMIN_USER, REQUESTER_USER_GROUP_NAMES); + writePolicyFile(); + + // client can talk with server when message size is smaller. + client.listRoles(ADMIN_USER); + client.createRole(ADMIN_USER, ROLE_NAME); + + boolean exceptionThrown = false; + try { + // client throws exception when message size is larger than the server's thrift max message size. + client.grantServerPrivilege(ADMIN_USER, ROLE_NAME, "server", false); + } catch (SentryUserException e) { + exceptionThrown = true; + Assert.assertTrue(e.getMessage().contains("org.apache.thrift.transport.TTransportException")); + } finally { + Assert.assertEquals(true, exceptionThrown); + } + + // client can still talk with sentry server when message size is smaller. + Set roles = client.listRoles(ADMIN_USER); + Assert.assertTrue(roles.size() == 1); + Assert.assertEquals(ROLE_NAME, roles.iterator().next().getRoleName()); + } + }); + } +} diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceWithKerberos.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceWithKerberos.java index 7b1eab19c..ff7338266 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceWithKerberos.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServiceWithKerberos.java @@ -17,7 +17,6 @@ */ package org.apache.sentry.provider.db.service.thrift; -import org.apache.sentry.SentryUserException; import org.apache.sentry.service.thrift.SentryServiceIntegrationBase; import org.junit.After; import org.junit.Before; @@ -42,7 +41,7 @@ public void before() throws Exception { @Override @After - public void after() throws SentryUserException { + public void after() { } /** diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryWebServerWithKerberos.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryWebServerWithKerberos.java index ffbb5855e..90ce080c0 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryWebServerWithKerberos.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryWebServerWithKerberos.java @@ -33,7 +33,6 @@ import org.apache.hadoop.security.authentication.client.AuthenticatedURL; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authentication.client.KerberosAuthenticator; -import org.apache.sentry.SentryUserException; import org.apache.sentry.service.thrift.KerberosConfiguration; import org.apache.sentry.service.thrift.SentryServiceIntegrationBase; import org.junit.After; @@ -64,7 +63,7 @@ public void before() throws Exception { @Override @After - public void after() throws SentryUserException { + public void after() { } @Test diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryWebServerWithoutSecurity.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryWebServerWithoutSecurity.java index 27e518ba0..4a913e518 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryWebServerWithoutSecurity.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryWebServerWithoutSecurity.java @@ -21,7 +21,6 @@ import java.net.URL; import org.apache.commons.io.IOUtils; -import org.apache.sentry.SentryUserException; import org.apache.sentry.service.thrift.SentryServiceIntegrationBase; import org.junit.After; import org.junit.Assert; @@ -45,7 +44,7 @@ public void before() throws Exception { @Override @After - public void after() throws SentryUserException { + public void after() { } @Test @@ -56,4 +55,33 @@ public void testPing() throws Exception { String response = IOUtils.toString(conn.getInputStream()); Assert.assertEquals("pong\n", response); } + + @Test + public void testConf() throws Exception { + // test bad format + final URL url = new URL("http://" + SERVER_HOST + ":" + webServerPort + "/conf?" + + ConfServlet.FORMAT_PARAM + "=badformat"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + Assert.assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, conn.getResponseCode()); + + // test json format + final URL url1 = new URL("http://" + SERVER_HOST + ":" + webServerPort + "/conf?" + + ConfServlet.FORMAT_PARAM +"=" + ConfServlet.FORMAT_JSON); + conn = (HttpURLConnection) url1.openConnection(); + Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); + + // test xml format + final URL url2 = new URL("http://" + SERVER_HOST + ":" + webServerPort + "/conf?" + + ConfServlet.FORMAT_PARAM +"=" + ConfServlet.FORMAT_XML); + conn = (HttpURLConnection) url2.openConnection(); + Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); + String xmlResponse = IOUtils.toString(conn.getInputStream()); + + // test default is xml format + final URL url3 = new URL("http://" + SERVER_HOST + ":" + webServerPort + "/conf"); + conn = (HttpURLConnection) url3.openConnection(); + Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); + String defaultResponse = IOUtils.toString(conn.getInputStream()); + Assert.assertEquals(xmlResponse, defaultResponse); + } } diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/tools/TestSentrySchemaTool.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/tools/TestSentrySchemaTool.java index 9a2dff811..cb62c136a 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/tools/TestSentrySchemaTool.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/tools/TestSentrySchemaTool.java @@ -41,6 +41,7 @@ public void defaultSetup() throws Exception { File dbDir = new File(Files.createTempDir(), "sentry_policy_db"); sentryConf.set(ServerConfig.SENTRY_STORE_JDBC_URL, "jdbc:derby:;databaseName=" + dbDir.getPath() + ";create=true"); + sentryConf.set(ServerConfig.SENTRY_STORE_JDBC_PASS, "dummy"); schemaTool = new SentrySchemaTool("./src/main/resources", sentryConf, "derby"); } @@ -50,6 +51,7 @@ private void nonDefaultsetup() throws Exception { File dbDir = new File(Files.createTempDir(), "sentry_policy_db"); sentryConf.set(ServerConfig.SENTRY_STORE_JDBC_URL, "jdbc:derby:;databaseName=" + dbDir.getPath() + ";create=true"); + sentryConf.set(ServerConfig.SENTRY_STORE_JDBC_PASS, "dummy"); schemaTool = new SentrySchemaTool("./src/main/resources", sentryConf, "derby"); } diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/tools/TestSentryShellHive.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/tools/TestSentryShellHive.java new file mode 100644 index 000000000..21dfa0f14 --- /dev/null +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/tools/TestSentryShellHive.java @@ -0,0 +1,608 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.provider.db.tools; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.PrintStream; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Set; + +import org.apache.commons.io.FileUtils; +import org.apache.sentry.SentryUserException; +import org.apache.sentry.provider.db.service.thrift.TSentryPrivilege; +import org.apache.sentry.provider.db.service.thrift.TSentryRole; +import org.apache.sentry.service.thrift.SentryServiceIntegrationBase; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.google.common.collect.Sets; +import com.google.common.io.Files; + +public class TestSentryShellHive extends SentryServiceIntegrationBase { + + private File confDir; + private File confPath; + private static String TEST_ROLE_NAME_1 = "testRole1"; + private static String TEST_ROLE_NAME_2 = "testRole2"; + private String requestorName = ""; + + @Before + public void prepareForTest() throws Exception { + confDir = Files.createTempDir(); + confPath = new File(confDir, "sentry-site.xml"); + if (confPath.createNewFile()) { + FileOutputStream to = new FileOutputStream(confPath); + conf.writeXml(to); + to.close(); + } + requestorName = System.getProperty("user.name", ""); + Set requestorUserGroupNames = Sets.newHashSet(ADMIN_GROUP); + setLocalGroupMapping(requestorName, requestorUserGroupNames); + // add ADMIN_USER for the after() in SentryServiceIntegrationBase + setLocalGroupMapping(ADMIN_USER, requestorUserGroupNames); + writePolicyFile(); + } + + @After + public void clearTestData() throws Exception { + FileUtils.deleteQuietly(confDir); + } + + @Test + public void testCreateDropRole() throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + // test: create role with -cr + String[] args = { "-cr", "-r", TEST_ROLE_NAME_1, "-conf", confPath.getAbsolutePath() }; + SentryShellHive.main(args); + // test: create role with --create_role + args = new String[] { "--create_role", "-r", TEST_ROLE_NAME_2, "-conf", + confPath.getAbsolutePath() }; + SentryShellHive.main(args); + + // validate the result, list roles with -lr + args = new String[] { "-lr", "-conf", confPath.getAbsolutePath() }; + SentryShellHive sentryShell = new SentryShellHive(); + Set roleNames = getShellResultWithOSRedirect(sentryShell, args, true); + validateRoleNames(roleNames, TEST_ROLE_NAME_1, TEST_ROLE_NAME_2); + + // validate the result, list roles with --list_role + args = new String[] { "--list_role", "-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellHive(); + roleNames = getShellResultWithOSRedirect(sentryShell, args, true); + validateRoleNames(roleNames, TEST_ROLE_NAME_1, TEST_ROLE_NAME_2); + + // test: drop role with -dr + args = new String[] { "-dr", "-r", TEST_ROLE_NAME_1, "-conf", confPath.getAbsolutePath() }; + SentryShellHive.main(args); + // test: drop role with --drop_role + args = new String[] { "--drop_role", "-r", TEST_ROLE_NAME_2, "-conf", + confPath.getAbsolutePath() }; + SentryShellHive.main(args); + + // validate the result + Set roles = client.listRoles(requestorName); + assertEquals("Incorrect number of roles", 0, roles.size()); + } + }); + } + + @Test + public void testAddDeleteRoleForGroup() throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + // create the role for test + client.createRole(requestorName, TEST_ROLE_NAME_1); + client.createRole(requestorName, TEST_ROLE_NAME_2); + // test: add role to group with -arg + String[] args = { "-arg", "-r", TEST_ROLE_NAME_1, "-g", "testGroup1", "-conf", + confPath.getAbsolutePath() }; + SentryShellHive.main(args); + // test: add role to multiple groups + args = new String[] { "-arg", "-r", TEST_ROLE_NAME_1, "-g", "testGroup2,testGroup3", + "-conf", + confPath.getAbsolutePath() }; + SentryShellHive.main(args); + // test: add role to group with --add_role_group + args = new String[] { "--add_role_group", "-r", TEST_ROLE_NAME_2, "-g", "testGroup1", + "-conf", + confPath.getAbsolutePath() }; + SentryShellHive.main(args); + + // validate the result list roles with -lr and -g + args = new String[] { "-lr", "-g", "testGroup1", "-conf", confPath.getAbsolutePath() }; + SentryShellHive sentryShell = new SentryShellHive(); + Set roleNames = getShellResultWithOSRedirect(sentryShell, args, true); + validateRoleNames(roleNames, TEST_ROLE_NAME_1, TEST_ROLE_NAME_2); + + + // list roles with --list_role and -g + args = new String[] { "--list_role", "-g", "testGroup2", "-conf", + confPath.getAbsolutePath() }; + sentryShell = new SentryShellHive(); + roleNames = getShellResultWithOSRedirect(sentryShell, args, true); + validateRoleNames(roleNames, TEST_ROLE_NAME_1); + + args = new String[] { "--list_role", "-g", "testGroup3", "-conf", + confPath.getAbsolutePath() }; + sentryShell = new SentryShellHive(); + roleNames = getShellResultWithOSRedirect(sentryShell, args, true); + validateRoleNames(roleNames, TEST_ROLE_NAME_1); + + // test: delete role from group with -drg + args = new String[] { "-drg", "-r", TEST_ROLE_NAME_1, "-g", "testGroup1", "-conf", + confPath.getAbsolutePath() }; + SentryShellHive.main(args); + // test: delete role to multiple groups + args = new String[] { "-drg", "-r", TEST_ROLE_NAME_1, "-g", "testGroup2,testGroup3", + "-conf", + confPath.getAbsolutePath() }; + SentryShellHive.main(args); + // test: delete role from group with --delete_role_group + args = new String[] { "--delete_role_group", "-r", TEST_ROLE_NAME_2, "-g", "testGroup1", + "-conf", confPath.getAbsolutePath() }; + SentryShellHive.main(args); + + // validate the result + Set roles = client.listRolesByGroupName(requestorName, "testGroup1"); + assertEquals("Incorrect number of roles", 0, roles.size()); + roles = client.listRolesByGroupName(requestorName, "testGroup2"); + assertEquals("Incorrect number of roles", 0, roles.size()); + roles = client.listRolesByGroupName(requestorName, "testGroup3"); + assertEquals("Incorrect number of roles", 0, roles.size()); + // clear the test data + client.dropRole(requestorName, TEST_ROLE_NAME_1); + client.dropRole(requestorName, TEST_ROLE_NAME_2); + } + }); + } + + @Test + public void testGrantRevokePrivilegeWithShortOption() throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + // create the role for test + client.createRole(requestorName, TEST_ROLE_NAME_1); + client.createRole(requestorName, TEST_ROLE_NAME_2); + + // test: grant privilege to role with -gpr + String[] args = { "-gpr", "-r", TEST_ROLE_NAME_1, "-p", + "server=server1->action=*", + "-conf", confPath.getAbsolutePath() }; + SentryShellHive.main(args); + args = new String[] { "-gpr", "-r", TEST_ROLE_NAME_1, "-p", + "server=server1->db=db1->action=select", "-conf", confPath.getAbsolutePath() }; + SentryShellHive.main(args); + args = new String[] { "-gpr", "-r", TEST_ROLE_NAME_1, "-p", + "server=server1->db=db1->table=tbl1->action=insert", "-conf", + confPath.getAbsolutePath() }; + SentryShellHive.main(args); + args = new String[] { "-gpr", "-r", TEST_ROLE_NAME_1, "-p", + "server=server1->db=db1->table=tbl1->column=col1->action=insert", "-conf", + confPath.getAbsolutePath() }; + SentryShellHive.main(args); + args = new String[] { "-gpr", "-r", TEST_ROLE_NAME_1, "-p", + "server=server1->db=db1->table=tbl1->column=col2->action=insert->grantoption=true", + "-conf", confPath.getAbsolutePath() }; + SentryShellHive.main(args); + // for the uri privilege, the action will be awalys * + args = new String[] { "-gpr", "-r", TEST_ROLE_NAME_1, "-p", + "server=server1->uri=hdfs://path/testuri", "-conf", confPath.getAbsolutePath() }; + SentryShellHive.main(args); + + // test the list privilege with -lp + args = new String[] { "-lp", "-r", TEST_ROLE_NAME_1, "-conf", confPath.getAbsolutePath() }; + SentryShellHive sentryShell = new SentryShellHive(); + Set privilegeStrs = getShellResultWithOSRedirect(sentryShell, args, true); + // validate the result for -lp + assertEquals("Incorrect number of privileges", 6, privilegeStrs.size()); + assertTrue(privilegeStrs.contains("server=server1->action=*")); + assertTrue(privilegeStrs.contains("server=server1->db=db1->action=select")); + assertTrue(privilegeStrs.contains("server=server1->db=db1->table=tbl1->action=insert")); + assertTrue(privilegeStrs + .contains("server=server1->db=db1->table=tbl1->column=col1->action=insert")); + assertTrue(privilegeStrs + .contains("server=server1->db=db1->table=tbl1->column=col2->action=insert->grantoption=true")); + // for the uri privilege, the action will be awalys * + assertTrue(privilegeStrs.contains("server=server1->uri=hdfs://path/testuri->action=*")); + + // test: revoke privilege from role with -rpr + args = new String[] { "-rpr", "-r", TEST_ROLE_NAME_1, "-p", + "server=server1->db=db1->table=tbl1->column=col1->action=insert", "-conf", + confPath.getAbsolutePath() }; + SentryShellHive.main(args); + Set privileges = client.listAllPrivilegesByRoleName(requestorName, + TEST_ROLE_NAME_1); + assertEquals("Incorrect number of privileges", 5, privileges.size()); + + args = new String[] { "-rpr", "-r", TEST_ROLE_NAME_1, "-p", + "server=server1->db=db1->table=tbl1->column=col2->action=insert->grantoption=true", + "-conf", confPath.getAbsolutePath() }; + SentryShellHive.main(args); + privileges = client.listAllPrivilegesByRoleName(requestorName, TEST_ROLE_NAME_1); + assertEquals("Incorrect number of privileges", 4, privileges.size()); + + args = new String[] { "-rpr", "-r", TEST_ROLE_NAME_1, "-p", + "server=server1->uri=hdfs://path/testuri", "-conf", confPath.getAbsolutePath() }; + SentryShellHive.main(args); + privileges = client.listAllPrivilegesByRoleName(requestorName, TEST_ROLE_NAME_1); + assertEquals("Incorrect number of privileges", 3, privileges.size()); + + args = new String[] { "-rpr", "-r", TEST_ROLE_NAME_1, "-p", + "server=server1->db=db1->table=tbl1->action=insert", "-conf", + confPath.getAbsolutePath() }; + SentryShellHive.main(args); + privileges = client.listAllPrivilegesByRoleName(requestorName, TEST_ROLE_NAME_1); + assertEquals("Incorrect number of privileges", 2, privileges.size()); + + args = new String[] { "-rpr", "-r", TEST_ROLE_NAME_1, "-p", + "server=server1->db=db1->action=select", "-conf", confPath.getAbsolutePath() }; + SentryShellHive.main(args); + privileges = client.listAllPrivilegesByRoleName(requestorName, TEST_ROLE_NAME_1); + assertEquals("Incorrect number of privileges", 1, privileges.size()); + + args = new String[] { "-rpr", "-r", TEST_ROLE_NAME_1, "-p", "server=server1->action=*", + "-conf", + confPath.getAbsolutePath() }; + SentryShellHive.main(args); + privileges = client.listAllPrivilegesByRoleName(requestorName, TEST_ROLE_NAME_1); + assertEquals("Incorrect number of privileges", 0, privileges.size()); + + // clear the test data + client.dropRole(requestorName, TEST_ROLE_NAME_1); + client.dropRole(requestorName, TEST_ROLE_NAME_2); + } + }); + } + + @Test + public void testGrantRevokePrivilegeWithLongOption() throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + // create the role for test + client.createRole(requestorName, TEST_ROLE_NAME_1); + client.createRole(requestorName, TEST_ROLE_NAME_2); + + // test: grant privilege to role with -gpr + String[] args = { "--grant_privilege_role", "-r", TEST_ROLE_NAME_1, "-p", + "server=server1->action=*", "-conf", confPath.getAbsolutePath() }; + SentryShellHive.main(args); + args = new String[] { "--grant_privilege_role", "-r", TEST_ROLE_NAME_1, "-p", + "server=server1->db=db1->action=select", "-conf", confPath.getAbsolutePath() }; + SentryShellHive.main(args); + args = new String[] { "--grant_privilege_role", "-r", TEST_ROLE_NAME_1, "-p", + "server=server1->db=db1->table=tbl1->action=insert", "-conf", + confPath.getAbsolutePath() }; + SentryShellHive.main(args); + args = new String[] { "--grant_privilege_role", "-r", TEST_ROLE_NAME_1, "-p", + "server=server1->db=db1->table=tbl1->column=col1->action=insert", "-conf", + confPath.getAbsolutePath() }; + SentryShellHive.main(args); + args = new String[] { "--grant_privilege_role", "-r", TEST_ROLE_NAME_1, "-p", + "server=server1->db=db1->table=tbl1->column=col2->action=insert->grantoption=true", + "-conf", confPath.getAbsolutePath() }; + SentryShellHive.main(args); + // for the uri privilege, the action will be awalys * + args = new String[] { "--grant_privilege_role", "-r", TEST_ROLE_NAME_1, "-p", + "server=server1->uri=hdfs://path/testuri", "-conf", confPath.getAbsolutePath() }; + SentryShellHive.main(args); + + // test the list privilege with -lp + args = new String[] { "--list_privilege", "-r", TEST_ROLE_NAME_1, "-conf", + confPath.getAbsolutePath() }; + SentryShellHive sentryShell = new SentryShellHive(); + Set privilegeStrs = getShellResultWithOSRedirect(sentryShell, args, true); + // validate the result for -lp + assertEquals("Incorrect number of privileges", 6, privilegeStrs.size()); + assertTrue(privilegeStrs.contains("server=server1->action=*")); + assertTrue(privilegeStrs.contains("server=server1->db=db1->action=select")); + assertTrue(privilegeStrs.contains("server=server1->db=db1->table=tbl1->action=insert")); + assertTrue(privilegeStrs + .contains("server=server1->db=db1->table=tbl1->column=col1->action=insert")); + assertTrue(privilegeStrs + .contains("server=server1->db=db1->table=tbl1->column=col2->action=insert->grantoption=true")); + // for the uri privilege, the action will be awalys * + assertTrue(privilegeStrs.contains("server=server1->uri=hdfs://path/testuri->action=*")); + + // test: revoke privilege from role with -rpr + args = new String[] { "--revoke_privilege_role", "-r", TEST_ROLE_NAME_1, "-p", + "server=server1->db=db1->table=tbl1->column=col1->action=insert", "-conf", + confPath.getAbsolutePath() }; + SentryShellHive.main(args); + Set privileges = client.listAllPrivilegesByRoleName(requestorName, + TEST_ROLE_NAME_1); + assertEquals("Incorrect number of privileges", 5, privileges.size()); + + args = new String[] { "--revoke_privilege_role", "-r", TEST_ROLE_NAME_1, "-p", + "server=server1->db=db1->table=tbl1->column=col2->action=insert->grantoption=true", + "-conf", confPath.getAbsolutePath() }; + SentryShellHive.main(args); + privileges = client.listAllPrivilegesByRoleName(requestorName, TEST_ROLE_NAME_1); + assertEquals("Incorrect number of privileges", 4, privileges.size()); + + args = new String[] { "--revoke_privilege_role", "-r", TEST_ROLE_NAME_1, "-p", + "server=server1->uri=hdfs://path/testuri", "-conf", confPath.getAbsolutePath() }; + SentryShellHive.main(args); + privileges = client.listAllPrivilegesByRoleName(requestorName, TEST_ROLE_NAME_1); + assertEquals("Incorrect number of privileges", 3, privileges.size()); + + args = new String[] { "--revoke_privilege_role", "-r", TEST_ROLE_NAME_1, "-p", + "server=server1->db=db1->table=tbl1->action=insert", "-conf", + confPath.getAbsolutePath() }; + SentryShellHive.main(args); + privileges = client.listAllPrivilegesByRoleName(requestorName, TEST_ROLE_NAME_1); + assertEquals("Incorrect number of privileges", 2, privileges.size()); + + args = new String[] { "--revoke_privilege_role", "-r", TEST_ROLE_NAME_1, "-p", + "server=server1->db=db1->action=select", "-conf", confPath.getAbsolutePath() }; + SentryShellHive.main(args); + privileges = client.listAllPrivilegesByRoleName(requestorName, TEST_ROLE_NAME_1); + assertEquals("Incorrect number of privileges", 1, privileges.size()); + + args = new String[] { "--revoke_privilege_role", "-r", TEST_ROLE_NAME_1, "-p", + "server=server1->action=*", "-conf", confPath.getAbsolutePath() }; + SentryShellHive.main(args); + privileges = client.listAllPrivilegesByRoleName(requestorName, TEST_ROLE_NAME_1); + assertEquals("Incorrect number of privileges", 0, privileges.size()); + + // clear the test data + client.dropRole(requestorName, TEST_ROLE_NAME_1); + client.dropRole(requestorName, TEST_ROLE_NAME_2); + } + }); + } + + @Test + public void testNegativeCaseWithInvalidArgument() throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + client.createRole(requestorName, TEST_ROLE_NAME_1); + // test: create duplicate role with -cr + String[] args = { "-cr", "-r", TEST_ROLE_NAME_1, "-conf", confPath.getAbsolutePath() }; + SentryShellHive sentryShell = new SentryShellHive(); + try { + sentryShell.executeShell(args); + fail("Exception should be thrown for creating duplicate role"); + } catch (SentryUserException e) { + // excepted exception + } + + // test: drop non-exist role with -dr + args = new String[] { "-dr", "-r", TEST_ROLE_NAME_2, "-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellHive(); + try { + sentryShell.executeShell(args); + fail("Exception should be thrown for dropping non-exist role"); + } catch (SentryUserException e) { + // excepted exception + } + + // test: add non-exist role to group with -arg + args = new String[] { "-arg", "-r", TEST_ROLE_NAME_2, "-g", "testGroup1", "-conf", + confPath.getAbsolutePath() }; + sentryShell = new SentryShellHive(); + try { + sentryShell.executeShell(args); + fail("Exception should be thrown for granting non-exist role to group"); + } catch (SentryUserException e) { + // excepted exception + } + + // test: drop group from non-exist role with -drg + args = new String[] { "-drg", "-r", TEST_ROLE_NAME_2, "-g", "testGroup1", "-conf", + confPath.getAbsolutePath() }; + sentryShell = new SentryShellHive(); + try { + sentryShell.executeShell(args); + fail("Exception should be thrown for drop group from non-exist role"); + } catch (SentryUserException e) { + // excepted exception + } + + // test: grant privilege to role with the error privilege format + args = new String[] { "-gpr", "-r", TEST_ROLE_NAME_1, "-p", "serverserver1->action=*", + "-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellHive(); + try { + sentryShell.executeShell(args); + fail("Exception should be thrown for the error privilege format, invalid key value."); + } catch (IllegalArgumentException e) { + // excepted exception + } + + // test: grant privilege to role with the error privilege hierarchy + args = new String[] { "-gpr", "-r", TEST_ROLE_NAME_1, "-p", + "server=server1->table=tbl1->column=col2->action=insert", "-conf", + confPath.getAbsolutePath() }; + sentryShell = new SentryShellHive(); + try { + sentryShell.executeShell(args); + fail("Exception should be thrown for the error privilege format, invalid key value."); + } catch (IllegalArgumentException e) { + // excepted exception + } + + // clear the test data + client.dropRole(requestorName, TEST_ROLE_NAME_1); + } + }); + } + + @Test + public void testNegativeCaseWithoutRequiredArgument() throws Exception { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + String strOptionConf = "conf"; + client.createRole(requestorName, TEST_ROLE_NAME_1); + // test: the conf is required argument + String[] args = { "-cr", "-r", TEST_ROLE_NAME_1 }; + SentryShellHive sentryShell = new SentryShellHive(); + validateMissingParameterMsg(sentryShell, args, + SentryShellCommon.PREFIX_MESSAGE_MISSING_OPTION + strOptionConf); + + // test: -r is required when create role + args = new String[] { "-cr", "-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellHive(); + validateMissingParameterMsg(sentryShell, args, + SentryShellCommon.PREFIX_MESSAGE_MISSING_OPTION + SentryShellCommon.OPTION_DESC_ROLE_NAME); + + // test: -r is required when drop role + args = new String[] { "-dr", "-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellHive(); + validateMissingParameterMsg(sentryShell, args, + SentryShellCommon.PREFIX_MESSAGE_MISSING_OPTION + SentryShellCommon.OPTION_DESC_ROLE_NAME); + + // test: -r is required when add role to group + args = new String[] { "-arg", "-g", "testGroup1", "-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellHive(); + validateMissingParameterMsg(sentryShell, args, + SentryShellCommon.PREFIX_MESSAGE_MISSING_OPTION + SentryShellCommon.OPTION_DESC_ROLE_NAME); + + // test: -g is required when add role to group + args = new String[] { "-arg", "-r", TEST_ROLE_NAME_2, "-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellHive(); + validateMissingParameterMsg(sentryShell, args, + SentryShellCommon.PREFIX_MESSAGE_MISSING_OPTION + SentryShellCommon.OPTION_DESC_GROUP_NAME); + + // test: -r is required when delete role from group + args = new String[] { "-drg", "-g", "testGroup1", "-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellHive(); + validateMissingParameterMsg(sentryShell, args, + SentryShellCommon.PREFIX_MESSAGE_MISSING_OPTION + SentryShellCommon.OPTION_DESC_ROLE_NAME); + + // test: -g is required when delete role from group + args = new String[] { "-drg", "-r", TEST_ROLE_NAME_2, "-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellHive(); + validateMissingParameterMsg(sentryShell, args, + SentryShellCommon.PREFIX_MESSAGE_MISSING_OPTION + SentryShellCommon.OPTION_DESC_GROUP_NAME); + + // test: -r is required when grant privilege to role + args = new String[] { "-gpr", "-p", "server=server1", "-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellHive(); + validateMissingParameterMsg(sentryShell, args, + SentryShellCommon.PREFIX_MESSAGE_MISSING_OPTION + SentryShellCommon.OPTION_DESC_ROLE_NAME); + + // test: -p is required when grant privilege to role + args = new String[] { "-gpr", "-r", TEST_ROLE_NAME_1, "-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellHive(); + validateMissingParameterMsg(sentryShell, args, + SentryShellCommon.PREFIX_MESSAGE_MISSING_OPTION + SentryShellCommon.OPTION_DESC_PRIVILEGE); + + // test: -r is required when revoke privilege from role + args = new String[] { "-rpr", "-p", "server=server1", "-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellHive(); + validateMissingParameterMsg(sentryShell, args, + SentryShellCommon.PREFIX_MESSAGE_MISSING_OPTION + SentryShellCommon.OPTION_DESC_ROLE_NAME); + + // test: -p is required when revoke privilege from role + args = new String[] { "-rpr", "-r", TEST_ROLE_NAME_1, "-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellHive(); + validateMissingParameterMsg(sentryShell, args, + SentryShellCommon.PREFIX_MESSAGE_MISSING_OPTION + SentryShellCommon.OPTION_DESC_PRIVILEGE); + + // test: command option is required for shell + args = new String[] {"-conf", confPath.getAbsolutePath() }; + sentryShell = new SentryShellHive(); + validateMissingParameterMsgsContains(sentryShell, args, + SentryShellCommon.PREFIX_MESSAGE_MISSING_OPTION + "[", + "-arg Add role to group", + "-cr Create role", + "-rpr Revoke privilege from role", + "-drg Delete role from group", + "-lr List role", + "-lp List privilege", + "-gpr Grant privilege to role", + "-dr Drop role"); + + // clear the test data + client.dropRole(requestorName, TEST_ROLE_NAME_1); + } + }); + } + + // redirect the System.out to ByteArrayOutputStream, then execute the command and parse the result. + private Set getShellResultWithOSRedirect(SentryShellHive sentryShell, + String[] args, boolean exceptedExecuteResult) throws Exception { + PrintStream oldOut = System.out; + ByteArrayOutputStream outContent = new ByteArrayOutputStream(); + System.setOut(new PrintStream(outContent)); + assertEquals(exceptedExecuteResult, sentryShell.executeShell(args)); + Set resultSet = Sets.newHashSet(outContent.toString().split("\n")); + System.setOut(oldOut); + return resultSet; + } + + private void validateRoleNames(Set roleNames, String ... expectedRoleNames) { + if (expectedRoleNames != null && expectedRoleNames.length > 0) { + assertEquals("Found: " + roleNames.size() + " roles, expected: " + expectedRoleNames.length, + expectedRoleNames.length, roleNames.size()); + Set lowerCaseRoles = new HashSet(); + for (String role : roleNames) { + lowerCaseRoles.add(role.toLowerCase()); + } + + for (String expectedRole : expectedRoleNames) { + assertTrue("Expected role: " + expectedRole, + lowerCaseRoles.contains(expectedRole.toLowerCase())); + } + } + } + + private void validateMissingParameterMsg(SentryShellHive sentryShell, String[] args, + String exceptedErrorMsg) throws Exception { + Set errorMsgs = getShellResultWithOSRedirect(sentryShell, args, false); + assertTrue(errorMsgs.contains(exceptedErrorMsg)); + } + + private void validateMissingParameterMsgsContains(SentryShellHive sentryShell, String[] args, + String ... expectedErrorMsgsContains) throws Exception { + Set errorMsgs = getShellResultWithOSRedirect(sentryShell, args, false); + boolean foundAllMessages = false; + Iterator it = errorMsgs.iterator(); + while (it.hasNext()) { + String errorMessage = it.next(); + boolean missingExpected = false; + for (String expectedContains : expectedErrorMsgsContains) { + if (!errorMessage.contains(expectedContains)) { + missingExpected = true; + break; + } + } + if (!missingExpected) { + foundAllMessages = true; + break; + } + } + assertTrue(foundAllMessages); + } +} diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/service/thrift/SentryServiceIntegrationBase.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/service/thrift/SentryServiceIntegrationBase.java index 9a6f8c44b..e02bd8a9f 100644 --- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/service/thrift/SentryServiceIntegrationBase.java +++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/service/thrift/SentryServiceIntegrationBase.java @@ -33,7 +33,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.net.NetUtils; -import org.apache.sentry.SentryUserException; +import org.apache.sentry.provider.db.service.persistent.HAContext; import org.apache.sentry.provider.db.service.thrift.SentryMiniKdcTestcase; import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient; import org.apache.sentry.provider.db.service.thrift.TSentryRole; @@ -56,12 +56,6 @@ public abstract class SentryServiceIntegrationBase extends SentryMiniKdcTestcase { private static final Logger LOGGER = LoggerFactory.getLogger(SentryServiceIntegrationBase.class); - static { - if (System.getProperty("sun.security.krb5.debug", "").trim().isEmpty()) { - System.setProperty("sun.security.krb5.debug", String.valueOf("true")); - } - } - protected static final String SERVER_HOST = NetUtils.createSocketAddr("localhost:80").getAddress().getCanonicalHostName(); protected static final String REALM = "EXAMPLE.COM"; protected static final String SERVER_PRINCIPAL = "sentry/" + SERVER_HOST; @@ -100,9 +94,12 @@ public abstract class SentryServiceIntegrationBase extends SentryMiniKdcTestcase protected static int webServerPort = ServerConfig.SENTRY_WEB_PORT_DEFAULT; protected static boolean webSecurity = false; + protected static boolean pooled = false; + @BeforeClass public static void setup() throws Exception { kerberos = true; + pooled = true; beforeSetup(); setupConf(); startSentryService(); @@ -124,6 +121,11 @@ public static void startSentryService() throws Exception { } } + public void stopSentryService() throws Exception { + server.stop(); + Thread.sleep(30000); + } + public static void setupConf() throws Exception { if (kerberos) { setupKdc(); @@ -179,7 +181,9 @@ public static void setupConf() throws Exception { } else { conf.set(ServerConfig.SENTRY_WEB_ENABLE, "false"); } - + if (pooled) { + conf.set(ClientConfig.SENTRY_POOL_ENABLED, "true"); + } conf.set(ServerConfig.SENTRY_VERIFY_SCHEM_VERSION, "false"); conf.set(ServerConfig.ADMIN_GROUPS, ADMIN_GROUP); conf.set(ServerConfig.RPC_ADDRESS, SERVER_HOST); @@ -187,6 +191,7 @@ public static void setupConf() throws Exception { dbDir = new File(Files.createTempDir(), "sentry_policy_db"); conf.set(ServerConfig.SENTRY_STORE_JDBC_URL, "jdbc:derby:;databaseName=" + dbDir.getPath() + ";create=true"); + conf.set(ServerConfig.SENTRY_STORE_JDBC_PASS, "dummy"); server = new SentryServiceFactory().create(conf); conf.set(ClientConfig.SERVER_RPC_ADDRESS, server.getAddress().getHostName()); conf.set(ClientConfig.SERVER_RPC_PORT, String.valueOf(server.getAddress().getPort())); @@ -204,17 +209,27 @@ public void before() throws Exception { } @After - public void after() throws SentryUserException { - if (client != null) { - Set tRoles = client.listRoles(ADMIN_USER); - if (tRoles != null) { - for (TSentryRole tRole : tRoles) { - client.dropRole(ADMIN_USER, tRole.getRoleName()); + public void after() { + try { + runTestAsSubject(new TestOperation() { + @Override + public void runTestAsSubject() throws Exception { + if (client != null) { + Set tRoles = client.listRoles(ADMIN_USER); + if (tRoles != null) { + for (TSentryRole tRole : tRoles) { + client.dropRole(ADMIN_USER, tRole.getRoleName()); + } + } + client.close(); + } } - } - client.close(); + }); + } catch (Exception e) { + LOGGER.error(e.getMessage(), e); + } finally { + policyFilePath.delete(); } - policyFilePath.delete(); } public void connectToSentryService() throws Exception { @@ -313,9 +328,10 @@ protected static TestingServer getZKServer() throws Exception { System.setProperty("zookeeper.kerberos.removeHostFromPrincipal", "true"); System.setProperty("zookeeper.kerberos.removeRealmFromPrincipal", "true"); - JaasConfiguration.addEntry("Server", ZK_SERVER_PRINCIPAL, ZKKeytabFile.getAbsolutePath()); + JaasConfiguration.addEntryForKeytab("Server", ZK_SERVER_PRINCIPAL, ZKKeytabFile.getAbsolutePath()); // Here's where we add the "Client" to the jaas configuration, even though we'd like not to - JaasConfiguration.addEntry("Client", SERVER_KERBEROS_NAME, serverKeytab.getAbsolutePath()); + JaasConfiguration.addEntryForKeytab(HAContext.SENTRY_ZK_JAAS_NAME, + SERVER_KERBEROS_NAME, serverKeytab.getAbsolutePath()); javax.security.auth.login.Configuration.setConfiguration(JaasConfiguration.getInstance()); System.setProperty(ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY, "Server"); @@ -339,7 +355,7 @@ public Void run() throws Exception { } protected interface TestOperation { - public void runTestAsSubject() throws Exception; + void runTestAsSubject() throws Exception; } } diff --git a/sentry-provider/sentry-provider-file/pom.xml b/sentry-provider/sentry-provider-file/pom.xml index 84cdf3f82..1f3f7e67e 100644 --- a/sentry-provider/sentry-provider-file/pom.xml +++ b/sentry-provider/sentry-provider-file/pom.xml @@ -21,7 +21,7 @@ limitations under the License. org.apache.sentry sentry-provider - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-provider-file diff --git a/sentry-provider/sentry-provider-file/src/main/java/org/apache/sentry/provider/file/LocalGroupMappingService.java b/sentry-provider/sentry-provider-file/src/main/java/org/apache/sentry/provider/file/LocalGroupMappingService.java index 9b146d9d1..fed1195ef 100644 --- a/sentry-provider/sentry-provider-file/src/main/java/org/apache/sentry/provider/file/LocalGroupMappingService.java +++ b/sentry-provider/sentry-provider-file/src/main/java/org/apache/sentry/provider/file/LocalGroupMappingService.java @@ -18,7 +18,6 @@ package org.apache.sentry.provider.file; import java.io.IOException; -import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; @@ -27,7 +26,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.sentry.policy.common.PolicyConstants; import org.apache.sentry.provider.common.GroupMappingService; +import org.apache.sentry.provider.common.PolicyFileConstants; +import org.apache.sentry.provider.common.SentryGroupNotFoundException; import org.apache.shiro.config.Ini; import org.apache.shiro.config.Ini.Section; import org.slf4j.Logger; @@ -83,11 +85,11 @@ public LocalGroupMappingService(Configuration configuration, String resource) @Override public Set getGroups(String user) { - if (groupMap.containsKey(user)) { - return groupMap.get(user); - } else { - return Collections.emptySet(); + Set groups = groupMap.get(user); + if (groups == null || groups.isEmpty()) { + throw new SentryGroupNotFoundException("Unable to obtain groups for " + user); } + return groups; } private void parseGroups(FileSystem fileSystem, Path resourcePath) throws IOException { @@ -109,8 +111,8 @@ private void parseGroups(FileSystem fileSystem, Path resourcePath) throws IOExce " in the " + resourcePath); continue; } - Set groupList = Sets.newHashSet( - PolicyFileConstants.ROLE_SPLITTER.trimResults().split(groupNames)); + Set groupList = Sets.newHashSet(PolicyConstants.ROLE_SPLITTER.trimResults().split( + groupNames)); LOGGER.debug("Got user mapping: " + userName + ", Groups: " + groupNames); groupMap.put(userName, groupList); } diff --git a/sentry-provider/sentry-provider-file/src/main/java/org/apache/sentry/provider/file/PolicyFile.java b/sentry-provider/sentry-provider-file/src/main/java/org/apache/sentry/provider/file/PolicyFile.java index 32b2d722a..991a95f12 100644 --- a/sentry-provider/sentry-provider-file/src/main/java/org/apache/sentry/provider/file/PolicyFile.java +++ b/sentry-provider/sentry-provider-file/src/main/java/org/apache/sentry/provider/file/PolicyFile.java @@ -17,15 +17,16 @@ package org.apache.sentry.provider.file; -import static org.apache.sentry.provider.file.PolicyFileConstants.DATABASES; -import static org.apache.sentry.provider.file.PolicyFileConstants.GROUPS; -import static org.apache.sentry.provider.file.PolicyFileConstants.ROLES; -import static org.apache.sentry.provider.file.PolicyFileConstants.USERS; +import static org.apache.sentry.provider.common.PolicyFileConstants.DATABASES; +import static org.apache.sentry.provider.common.PolicyFileConstants.GROUPS; +import static org.apache.sentry.provider.common.PolicyFileConstants.ROLES; +import static org.apache.sentry.provider.common.PolicyFileConstants.USERS; import java.io.File; import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -88,9 +89,9 @@ public PolicyFile addGroupsToUser(String userName, boolean allowDuplicates, Stri LOGGER.warn("Static user:group mapping is not being used"); return add(usersToGroups.get(userName), allowDuplicates, groupNames); } - public PolicyFile setUserGroupMapping(Map mapping){ - for(String key: mapping.keySet()){ - usersToGroups.put(key, mapping.get(key)); + public PolicyFile setUserGroupMapping(Map mapping) { + for (Entry entry : mapping.entrySet()) { + usersToGroups.put(entry.getKey(), entry.getValue()); } return this; } @@ -155,8 +156,8 @@ private String getSection(String name, Map mapping) { Joiner kvJoiner = Joiner.on(" = "); List lines = Lists.newArrayList(); lines.add("[" + name + "]"); - for(String key : mapping.keySet()) { - lines.add(kvJoiner.join(key, mapping.get(key))); + for (Entry entry : mapping.entrySet()) { + lines.add(kvJoiner.join(entry.getKey(), entry.getValue())); } return Joiner.on(NL).join(lines); } diff --git a/sentry-provider/sentry-provider-file/src/main/java/org/apache/sentry/provider/file/PolicyFiles.java b/sentry-provider/sentry-provider-file/src/main/java/org/apache/sentry/provider/file/PolicyFiles.java index f30329478..378f63c42 100644 --- a/sentry-provider/sentry-provider-file/src/main/java/org/apache/sentry/provider/file/PolicyFiles.java +++ b/sentry-provider/sentry-provider-file/src/main/java/org/apache/sentry/provider/file/PolicyFiles.java @@ -16,8 +16,13 @@ */ package org.apache.sentry.provider.file; -import com.google.common.io.ByteStreams; -import com.google.common.io.Resources; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; + import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -25,12 +30,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; +import com.google.common.io.ByteStreams; +import com.google.common.io.Resources; public class PolicyFiles { @@ -40,7 +41,7 @@ public class PolicyFiles { public static void copyToDir(File dest, String... resources) throws FileNotFoundException, IOException { for(String resource : resources) { - LOGGER.info("Copying " + resource + " to " + dest); + LOGGER.debug("Copying " + resource + " to " + dest); Resources.copy(Resources.getResource(resource), new FileOutputStream(new File(dest, resource))); } } @@ -54,26 +55,26 @@ public static void copyToDir(FileSystem fs, Path dest, String... resources) in.close(); out.hflush(); out.close(); - LOGGER.info("Copying " + resource + " to " + dest + ", bytes " + bytes); + LOGGER.debug("Copying " + resource + " to " + dest + ", bytes " + bytes); } } public static void copyFilesToDir(FileSystem fs, Path dest, File inputFile) throws IOException { - InputStream input = new FileInputStream(inputFile.getPath()); - FSDataOutputStream out = fs.create(new Path(dest, inputFile.getName())); - ByteStreams.copy(input, out); - input.close(); - out.hflush(); - out.close(); + try (InputStream input = new FileInputStream(inputFile.getPath()); + FSDataOutputStream out = fs.create(new Path(dest, inputFile.getName()))) { + ByteStreams.copy(input, out); + input.close(); + out.hflush(); + out.close(); + } } public static Ini loadFromPath(FileSystem fileSystem, Path path) throws IOException { InputStream inputStream = null; try { - LOGGER.info("Opening " + path); - String dfsUri = fileSystem.getDefaultUri(fileSystem.getConf()).toString(); + LOGGER.debug("Opening " + path); inputStream = fileSystem.open(path); Ini ini = new Ini(); ini.load(inputStream); diff --git a/sentry-provider/sentry-provider-file/src/main/java/org/apache/sentry/provider/file/SimpleFileProviderBackend.java b/sentry-provider/sentry-provider-file/src/main/java/org/apache/sentry/provider/file/SimpleFileProviderBackend.java index fa5ab698a..884de16a1 100644 --- a/sentry-provider/sentry-provider-file/src/main/java/org/apache/sentry/provider/file/SimpleFileProviderBackend.java +++ b/sentry-provider/sentry-provider-file/src/main/java/org/apache/sentry/provider/file/SimpleFileProviderBackend.java @@ -16,19 +16,16 @@ */ package org.apache.sentry.provider.file; -import com.google.common.base.Splitter; -import com.google.common.base.Strings; -import com.google.common.collect.HashBasedTable; -import com.google.common.collect.HashMultimap; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Interner; -import com.google.common.collect.Interners; -import com.google.common.collect.Lists; -import com.google.common.collect.Multimap; -import com.google.common.collect.Sets; -import com.google.common.collect.Table; -import com.google.common.collect.Table.Cell; +import static org.apache.sentry.policy.common.PolicyConstants.ROLE_SPLITTER; + +import java.io.IOException; +import java.net.URI; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import javax.annotation.Nullable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -39,26 +36,26 @@ import org.apache.sentry.policy.common.PrivilegeUtils; import org.apache.sentry.policy.common.PrivilegeValidator; import org.apache.sentry.policy.common.PrivilegeValidatorContext; +import org.apache.sentry.provider.common.PolicyFileConstants; import org.apache.sentry.provider.common.ProviderBackend; import org.apache.sentry.provider.common.ProviderBackendContext; import org.apache.shiro.config.Ini; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.annotation.Nullable; - -import java.io.IOException; -import java.net.URI; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import static org.apache.sentry.provider.file.PolicyFileConstants.DATABASES; -import static org.apache.sentry.provider.file.PolicyFileConstants.GROUPS; -import static org.apache.sentry.provider.file.PolicyFileConstants.ROLES; -import static org.apache.sentry.provider.file.PolicyFileConstants.ROLE_SPLITTER; -import static org.apache.sentry.provider.file.PolicyFileConstants.USERS; +import com.google.common.base.Splitter; +import com.google.common.base.Strings; +import com.google.common.collect.HashBasedTable; +import com.google.common.collect.HashMultimap; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Interner; +import com.google.common.collect.Interners; +import com.google.common.collect.Lists; +import com.google.common.collect.Multimap; +import com.google.common.collect.Sets; +import com.google.common.collect.Table; +import com.google.common.collect.Table.Cell; public class SimpleFileProviderBackend implements ProviderBackend { @@ -183,7 +180,7 @@ public ImmutableSet getRoles(Set groups, ActiveRoleSet roleSet) @Override public void close() { - groupRolePrivilegeTable.clear(); + // SENTRY-847 will use HiveAuthBinding again, so groupRolePrivilegeTable shouldn't clear itself } @Override @@ -193,7 +190,7 @@ public void validatePolicy(boolean strictValidation) throws SentryConfigurationE } List localConfigErrors = Lists.newArrayList(configErrors); List localConfigWarnings = Lists.newArrayList(configWarnings); - if ((strictValidation && !localConfigWarnings.isEmpty()) || !localConfigErrors.isEmpty()) { + if (strictValidation && !localConfigWarnings.isEmpty() || !localConfigErrors.isEmpty()) { localConfigErrors.add("Failed to process global policy file " + resourcePath); SentryConfigurationException e = new SentryConfigurationException(""); e.setConfigErrors(localConfigErrors); @@ -235,9 +232,9 @@ private void parse() { parseIni(null, ini, validators, resourcePath, groupRolePrivilegeTableTemp); mergeResult(groupRolePrivilegeTableTemp); groupRolePrivilegeTableTemp.clear(); - Ini.Section filesSection = ini.getSection(DATABASES); + Ini.Section filesSection = ini.getSection(PolicyFileConstants.DATABASES); if(filesSection == null) { - LOGGER.info("Section " + DATABASES + " needs no further processing"); + LOGGER.info("Section " + PolicyFileConstants.DATABASES + " needs no further processing"); } else if (!allowPerDatabaseSection) { String msg = "Per-db policy file is not expected in this configuration."; throw new SentryConfigurationException(msg); @@ -251,14 +248,14 @@ private void parse() { try { LOGGER.debug("Parsing " + perDbPolicy); Ini perDbIni = PolicyFiles.loadFromPath(perDbPolicy.getFileSystem(conf), perDbPolicy); - if(perDbIni.containsKey(USERS)) { - configErrors.add("Per-db policy file cannot contain " + USERS + " section in " + perDbPolicy); - throw new SentryConfigurationException("Per-db policy files cannot contain " + USERS + " section"); + if(perDbIni.containsKey(PolicyFileConstants.USERS)) { + configErrors.add("Per-db policy file cannot contain " + PolicyFileConstants.USERS + " section in " + perDbPolicy); + throw new SentryConfigurationException("Per-db policy files cannot contain " + PolicyFileConstants.USERS + " section"); } - if(perDbIni.containsKey(DATABASES)) { - configErrors.add("Per-db policy files cannot contain " + DATABASES + if(perDbIni.containsKey(PolicyFileConstants.DATABASES)) { + configErrors.add("Per-db policy files cannot contain " + PolicyFileConstants.DATABASES + " section in " + perDbPolicy); - throw new SentryConfigurationException("Per-db policy files cannot contain " + DATABASES + " section"); + throw new SentryConfigurationException("Per-db policy files cannot contain " + PolicyFileConstants.DATABASES + " section"); } parseIni(database, perDbIni, validators, perDbPolicy, groupRolePrivilegeTableTemp); } catch (Exception e) { @@ -301,17 +298,17 @@ private void mergeResult(Table> groupRolePrivilegeTa private void parseIni(String database, Ini ini, List validators, Path policyPath, Table> groupRolePrivilegeTable) { - Ini.Section privilegesSection = ini.getSection(ROLES); + Ini.Section privilegesSection = ini.getSection(PolicyFileConstants.ROLES); boolean invalidConfiguration = false; if (privilegesSection == null) { - String errMsg = String.format("Section %s empty for %s", ROLES, policyPath); + String errMsg = String.format("Section %s empty for %s", PolicyFileConstants.ROLES, policyPath); LOGGER.warn(errMsg); configErrors.add(errMsg); invalidConfiguration = true; } - Ini.Section groupsSection = ini.getSection(GROUPS); + Ini.Section groupsSection = ini.getSection(PolicyFileConstants.GROUPS); if (groupsSection == null) { - String warnMsg = String.format("Section %s empty for %s", GROUPS, policyPath); + String warnMsg = String.format("Section %s empty for %s", PolicyFileConstants.GROUPS, policyPath); LOGGER.warn(warnMsg); configErrors.add(warnMsg); invalidConfiguration = true; diff --git a/sentry-provider/sentry-provider-file/src/test/java/org/apache/sentry/provider/file/TestLocalGroupMapping.java b/sentry-provider/sentry-provider-file/src/test/java/org/apache/sentry/provider/file/TestLocalGroupMapping.java index c4360099f..c5345bcb5 100644 --- a/sentry-provider/sentry-provider-file/src/test/java/org/apache/sentry/provider/file/TestLocalGroupMapping.java +++ b/sentry-provider/sentry-provider-file/src/test/java/org/apache/sentry/provider/file/TestLocalGroupMapping.java @@ -23,6 +23,7 @@ import org.apache.commons.io.FileUtils; import org.apache.hadoop.fs.Path; +import org.apache.sentry.provider.common.SentryGroupNotFoundException; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -63,7 +64,10 @@ public void testGroupMapping() { Set barGroupsFromResource = localGroupMapping.getGroups("bar"); Assert.assertEquals(barGroupsFromResource, barGroups); - Set unknownGroupsFromResource = localGroupMapping.getGroups("unknown"); - Assert.assertTrue("List not empty " + unknownGroupsFromResource, unknownGroupsFromResource.isEmpty()); + try { + localGroupMapping.getGroups("unknown"); + Assert.fail("SentryGroupNotFoundException should be thrown."); + } catch (SentryGroupNotFoundException sgnfe) { + } } } diff --git a/sentry-provider/sentry-provider-file/src/test/java/org/apache/sentry/provider/file/TestSimpleFileProvderBackend.java b/sentry-provider/sentry-provider-file/src/test/java/org/apache/sentry/provider/file/TestSimpleFileProvderBackend.java index cd203cd68..20d5664c9 100644 --- a/sentry-provider/sentry-provider-file/src/test/java/org/apache/sentry/provider/file/TestSimpleFileProvderBackend.java +++ b/sentry-provider/sentry-provider-file/src/test/java/org/apache/sentry/provider/file/TestSimpleFileProvderBackend.java @@ -17,8 +17,8 @@ * under the License. */ package org.apache.sentry.provider.file; -import static junit.framework.Assert.assertEquals; -import static junit.framework.Assert.fail; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; diff --git a/sentry-solr/pom.xml b/sentry-solr/pom.xml index c2438029a..43798c974 100644 --- a/sentry-solr/pom.xml +++ b/sentry-solr/pom.xml @@ -22,7 +22,7 @@ limitations under the License. org.apache.sentry sentry - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-solr @@ -31,6 +31,7 @@ limitations under the License. solr-sentry-handlers + solr-sentry-core diff --git a/sentry-solr/solr-sentry-core/pom.xml b/sentry-solr/solr-sentry-core/pom.xml new file mode 100644 index 000000000..44fbb864a --- /dev/null +++ b/sentry-solr/solr-sentry-core/pom.xml @@ -0,0 +1,58 @@ + + + + 4.0.0 + + + org.apache.sentry + sentry-solr + 1.7.0-incubating-SNAPSHOT + + + solr-sentry-core + Solr Sentry Core + + + + log4j + log4j + + + org.apache.sentry + sentry-core-common + + + org.apache.sentry + sentry-core-model-search + + + org.apache.sentry + sentry-binding-solr + + + org.apache.solr + solr-solrj + + + org.apache.solr + solr-core + + + + diff --git a/sentry-solr/solr-sentry-core/src/main/java/org/apache/solr/sentry/AuditLogger.java b/sentry-solr/solr-sentry-core/src/main/java/org/apache/solr/sentry/AuditLogger.java new file mode 100644 index 000000000..7f3e391e1 --- /dev/null +++ b/sentry-solr/solr-sentry-core/src/main/java/org/apache/solr/sentry/AuditLogger.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.sentry; + + +import org.apache.lucene.util.Version; +import org.noggit.CharArr; +import org.noggit.JSONWriter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * Writes audit events to the audit log. This helps answer questions such as: + * Who did what action when from where, and what values were changed from what + * to what as a result? + */ +final class AuditLogger { + + public static final int ALLOWED = 1; + public static final int UNAUTHORIZED = 0; + + private final Logger logger; + + private static final boolean IS_ENABLED = + Boolean.valueOf( + System.getProperty(AuditLogger.class.getName() + ".isEnabled", "true")); + + private static final String SOLR_VERSION = Version.LATEST.toString(); + + + public AuditLogger() { + this.logger = LoggerFactory.getLogger(getClass()); + } + + public boolean isLogEnabled() { + return IS_ENABLED && logger.isInfoEnabled(); + } + + public void log( + String userName, + String impersonator, + String ipAddress, + String operation, + String operationParams, + long eventTime, + int allowed, + String collectionName) { + + if (!isLogEnabled()) { + return; + } + CharArr chars = new CharArr(512); + JSONWriter writer = new JSONWriter(chars, -1); + writer.startObject(); + writeField("solrVersion", SOLR_VERSION, writer); + writer.writeValueSeparator(); + writeField("eventTime", eventTime, writer); + writer.writeValueSeparator(); + writeField("allowed", allowed, writer); + writer.writeValueSeparator(); + writeField("collectionName", collectionName, writer); + writer.writeValueSeparator(); + writeField("operation", operation, writer); + writer.writeValueSeparator(); + writeField("operationParams", operationParams, writer); + writer.writeValueSeparator(); + writeField("ipAddress", ipAddress, writer); + writer.writeValueSeparator(); + writeField("username", userName, writer); + writer.writeValueSeparator(); + writeField("impersonator", impersonator, writer); + writer.endObject(); + logger.info("{}", chars); + } + + private void writeField(String key, Object value, JSONWriter writer) { + writer.writeString(key); + writer.writeNameSeparator(); + writer.write(value); + } + +} diff --git a/sentry-solr/solr-sentry-core/src/main/java/org/apache/solr/sentry/RollingFileWithoutDeleteAppender.java b/sentry-solr/solr-sentry-core/src/main/java/org/apache/solr/sentry/RollingFileWithoutDeleteAppender.java new file mode 100644 index 000000000..f749740a3 --- /dev/null +++ b/sentry-solr/solr-sentry-core/src/main/java/org/apache/solr/sentry/RollingFileWithoutDeleteAppender.java @@ -0,0 +1,175 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.solr.sentry; + +import java.io.File; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.io.Writer; + +import org.apache.log4j.FileAppender; +import org.apache.log4j.Layout; +import org.apache.log4j.helpers.CountingQuietWriter; +import org.apache.log4j.helpers.LogLog; +import org.apache.log4j.helpers.OptionConverter; +import org.apache.log4j.spi.LoggingEvent; + +public class RollingFileWithoutDeleteAppender extends FileAppender { + /** + * The default maximum file size is 10MB. + */ + protected long maxFileSize = 10 * 1024 * 1024; + + private long nextRollover = 0; + + /** + * The default constructor simply calls its {@link FileAppender#FileAppender + * parents constructor}. + */ + public RollingFileWithoutDeleteAppender() { + super(); + } + + /** + * Instantiate a RollingFileAppender and open the file designated by + * filename. The opened filename will become the ouput + * destination for this appender. + *

+ * If the append parameter is true, the file will be appended to. + * Otherwise, the file desginated by filename will be truncated + * before being opened. + */ + public RollingFileWithoutDeleteAppender(Layout layout, String filename, + boolean append) throws IOException { + super(layout, getLogFileName(filename), append); + } + + /** + * Instantiate a FileAppender and open the file designated by + * filename. The opened filename will become the output + * destination for this appender. + *

+ * The file will be appended to. + */ + public RollingFileWithoutDeleteAppender(Layout layout, String filename) + throws IOException { + super(layout, getLogFileName(filename)); + } + + /** + * Get the maximum size that the output file is allowed to reach before being + * rolled over to backup files. + */ + public long getMaximumFileSize() { + return maxFileSize; + } + + /** + * Implements the usual roll over behaviour. + *

+ * File is renamed File.yyyyMMddHHmmss and closed. A + * new File is created to receive further log output. + */ + // synchronization not necessary since doAppend is alreasy synched + public void rollOver() { + if (qw != null) { + long size = ((CountingQuietWriter) qw).getCount(); + LogLog.debug("rolling over count=" + size); + // if operation fails, do not roll again until + // maxFileSize more bytes are written + nextRollover = size + maxFileSize; + } + + this.closeFile(); // keep windows happy. + + String newFileName = getLogFileName(fileName); + try { + // This will also close the file. This is OK since multiple + // close operations are safe. + this.setFile(newFileName, false, bufferedIO, bufferSize); + nextRollover = 0; + } catch (IOException e) { + if (e instanceof InterruptedIOException) { + Thread.currentThread().interrupt(); + } + LogLog.error("setFile(" + newFileName + ", false) call failed.", e); + } + } + + public synchronized void setFile(String fileName, boolean append, + boolean bufferedIO, int bufferSize) throws IOException { + super.setFile(fileName, append, this.bufferedIO, this.bufferSize); + if (append) { + File f = new File(fileName); + ((CountingQuietWriter) qw).setCount(f.length()); + } + } + + /** + * Set the maximum size that the output file is allowed to reach before being + * rolled over to backup files. + *

+ * This method is equivalent to {@link #setMaxFileSize} except that it is + * required for differentiating the setter taking a long argument + * from the setter taking a String argument by the JavaBeans + * {@link java.beans.Introspector Introspector}. + * + * @see #setMaxFileSize(String) + */ + public void setMaximumFileSize(long maxFileSize) { + this.maxFileSize = maxFileSize; + } + + /** + * Set the maximum size that the output file is allowed to reach before being + * rolled over to backup files. + *

+ * In configuration files, the MaxFileSize option takes an long integer + * in the range 0 - 2^63. You can specify the value with the suffixes "KB", + * "MB" or "GB" so that the integer is interpreted being expressed + * respectively in kilobytes, megabytes or gigabytes. For example, the value + * "10KB" will be interpreted as 10240. + */ + public void setMaxFileSize(String value) { + maxFileSize = OptionConverter.toFileSize(value, maxFileSize + 1); + } + + protected void setQWForFiles(Writer writer) { + this.qw = new CountingQuietWriter(writer, errorHandler); + } + + /** + * This method differentiates RollingFileAppender from its super class. + */ + protected void subAppend(LoggingEvent event) { + super.subAppend(event); + + if (fileName != null && qw != null) { + long size = ((CountingQuietWriter) qw).getCount(); + if (size >= maxFileSize && size >= nextRollover) { + rollOver(); + } + } + } + + // Mangled file name. Append the current timestamp + private static String getLogFileName(String oldFileName) { + return oldFileName + "." + Long.toString(System.currentTimeMillis()); + } +} diff --git a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/SecureRequestHandlerUtil.java b/sentry-solr/solr-sentry-core/src/main/java/org/apache/solr/sentry/SecureRequestHandlerUtil.java similarity index 86% rename from sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/SecureRequestHandlerUtil.java rename to sentry-solr/solr-sentry-core/src/main/java/org/apache/solr/sentry/SecureRequestHandlerUtil.java index 7ae5391a7..be9642bca 100644 --- a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/SecureRequestHandlerUtil.java +++ b/sentry-solr/solr-sentry-core/src/main/java/org/apache/solr/sentry/SecureRequestHandlerUtil.java @@ -14,14 +14,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.solr.handler; +package org.apache.solr.sentry; import java.util.EnumSet; import java.util.Set; import org.apache.sentry.core.model.search.SearchModelAction; import org.apache.solr.request.SolrQueryRequest; -import org.apache.solr.response.SolrQueryResponse; -import org.apache.solr.sentry.SentryIndexAuthorizationSingleton; /** * Utility functions for Secure (sentry-aware) versions of RequestHandlers @@ -43,17 +41,18 @@ public class SecureRequestHandlerUtil { * @param collection only relevant if checkCollection==true, * use collection (if non-null) instead pulling collection name from req (if null) */ - public static void checkSentryAdmin(SolrQueryRequest req, Set andActions, boolean checkCollection, String collection) { - checkSentry(req, andActions, true, checkCollection, collection); + public static void checkSentryAdmin(SolrQueryRequest req, Set andActions, + String operation, boolean checkCollection, String collection) { + checkSentry(req, andActions, operation, true, checkCollection, collection); } /** * Attempt to authorize a collection action. The collection * name will be pulled from the request. */ - public static void checkSentryCollection(SolrQueryRequest req, Set andActions) { - checkSentry(req, andActions, false, false, null); - } + public static void checkSentryCollection(SolrQueryRequest req, Set andActions, String operation) { + checkSentry(req, andActions, operation, false, false, null); + } /** * Attempt to sync collection privileges with Sentry when the metadata has changed. @@ -68,16 +67,16 @@ public static void syncDeleteCollection(String collection) { } private static void checkSentry(SolrQueryRequest req, Set andActions, - boolean admin, boolean checkCollection, String collection) { + String operation, boolean admin, boolean checkCollection, String collection) { // Sentry currently does have AND support for actions; need to check // actions one at a time final SentryIndexAuthorizationSingleton sentryInstance = (testOverride == null)?SentryIndexAuthorizationSingleton.getInstance():testOverride; for (SearchModelAction action : andActions) { if (admin) { - sentryInstance.authorizeAdminAction(req, EnumSet.of(action), checkCollection, collection); + sentryInstance.authorizeAdminAction(req, EnumSet.of(action), operation, checkCollection, collection); } else { - sentryInstance.authorizeCollectionAction(req, EnumSet.of(action)); + sentryInstance.authorizeCollectionAction(req, EnumSet.of(action), operation); } } } diff --git a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/sentry/SentryIndexAuthorizationSingleton.java b/sentry-solr/solr-sentry-core/src/main/java/org/apache/solr/sentry/SentryIndexAuthorizationSingleton.java similarity index 74% rename from sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/sentry/SentryIndexAuthorizationSingleton.java rename to sentry-solr/solr-sentry-core/src/main/java/org/apache/solr/sentry/SentryIndexAuthorizationSingleton.java index 53c894635..245fe78d6 100644 --- a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/sentry/SentryIndexAuthorizationSingleton.java +++ b/sentry-solr/solr-sentry-core/src/main/java/org/apache/solr/sentry/SentryIndexAuthorizationSingleton.java @@ -39,13 +39,26 @@ public class SentryIndexAuthorizationSingleton { private static Logger log = LoggerFactory.getLogger(SentryIndexAuthorizationSingleton.class); + /** + * Java system property for specifying location of sentry-site.xml + */ public static final String propertyName = "solr.authorization.sentry.site"; - private static final String USER_NAME = "solr.user.name"; + + /** + * {@link HttpServletRequest} attribute for requesting user name + */ + public static final String USER_NAME = "solr.user.name"; + + /** + * {@link HttpServletRequest} attribute for requesting do as user. + */ + public static final String DO_AS_USER_NAME = "solr.do.as.user.name"; private static final SentryIndexAuthorizationSingleton INSTANCE = new SentryIndexAuthorizationSingleton(System.getProperty(propertyName)); private final SolrAuthzBinding binding; + private final AuditLogger auditLogger = new AuditLogger(); private SentryIndexAuthorizationSingleton(String sentrySiteLocation) { SolrAuthzBinding tmpBinding = null; @@ -85,15 +98,15 @@ public boolean isEnabled() { * use collection (if non-null) instead pulling collection name from req (if null) */ public void authorizeAdminAction(SolrQueryRequest req, - Set actions, boolean checkCollection, String collection) + Set actions, String operation, boolean checkCollection, String collection) throws SolrException { - authorizeCollectionAction(req, actions, "admin", true); + authorizeCollectionAction(req, actions, operation, "admin", true); if (checkCollection) { // Let's not error out if we can't find the collection associated with an // admin action, it's pretty complicated to get all the possible administrative // actions correct. Instead, let's warn in the log and address any issues we // find. - authorizeCollectionAction(req, actions, collection, false); + authorizeCollectionAction(req, actions, operation, collection, false); } } @@ -102,8 +115,8 @@ public void authorizeAdminAction(SolrQueryRequest req, * name will be pulled from the request. */ public void authorizeCollectionAction(SolrQueryRequest req, - Set actions) throws SolrException { - authorizeCollectionAction(req, actions, null, true); + Set actions, String operation) throws SolrException { + authorizeCollectionAction(req, actions, operation, null, true); } /** @@ -117,34 +130,61 @@ public void authorizeCollectionAction(SolrQueryRequest req, * cannot be located */ public void authorizeCollectionAction(SolrQueryRequest req, - Set actions, String collectionName, boolean errorIfNoCollection) + Set actions, String operation, String collectionName, + boolean errorIfNoCollection) throws SolrException { Subject superUser = new Subject(System.getProperty("solr.authorization.superuser", "solr")); Subject userName = new Subject(getUserName(req)); + long eventTime = req.getStartTime(); + String paramString = req.getParamString(); + String impersonator = getImpersonatorName(req); + + String ipAddress = null; + HttpServletRequest sreq = (HttpServletRequest) req.getContext().get("httpRequest"); + if (sreq != null) { + try { + ipAddress = sreq.getRemoteAddr(); + } catch (AssertionError e) { + // ignore + // This is a work-around for "Unexpected method call getRemoteAddr()" + // exception during unit test mocking at + // com.sun.proxy.$Proxy28.getRemoteAddr(Unknown Source) + } + } + if (collectionName == null) { SolrCore solrCore = req.getCore(); if (solrCore == null) { String msg = "Unable to locate collection for sentry to authorize because " + "no SolrCore attached to request"; if (errorIfNoCollection) { + auditLogger.log(userName.getName(), impersonator, ipAddress, + operation, paramString, eventTime, AuditLogger.UNAUTHORIZED, ""); throw new SolrException(SolrException.ErrorCode.UNAUTHORIZED, msg); } else { // just warn log.warn(msg); + auditLogger.log(userName.getName(), impersonator, ipAddress, + operation, paramString, eventTime, AuditLogger.ALLOWED, ""); return; } } collectionName = solrCore.getCoreDescriptor().getCloudDescriptor().getCollectionName(); } + Collection collection = new Collection(collectionName); try { if (!superUser.getName().equals(userName.getName())) { binding.authorizeCollection(userName, collection, actions); } } catch (SentrySolrAuthorizationException ex) { + auditLogger.log(userName.getName(), impersonator, ipAddress, + operation, paramString, eventTime, AuditLogger.UNAUTHORIZED, collectionName); throw new SolrException(SolrException.ErrorCode.UNAUTHORIZED, ex); } + auditLogger.log(userName.getName(), impersonator, ipAddress, + operation, paramString, eventTime, AuditLogger.ALLOWED, collectionName); } /** @@ -184,13 +224,21 @@ public String getUserName(SolrQueryRequest req) throws SolrException { throw new SolrException(SolrException.ErrorCode.UNAUTHORIZED, builder.toString()); } - String superUser = (System.getProperty("solr.authorization.superuser", "solr")); + String superUser = System.getProperty("solr.authorization.superuser", "solr"); // If a local request, treat it like a super user request; i.e. it is equivalent to an // http request from the same process. return req instanceof LocalSolrQueryRequest? superUser:(String)httpServletRequest.getAttribute(USER_NAME); } + private String getImpersonatorName(SolrQueryRequest req) { + HttpServletRequest httpServletRequest = (HttpServletRequest)req.getContext().get("httpRequest"); + if (httpServletRequest != null) { + return (String)httpServletRequest.getAttribute(DO_AS_USER_NAME); + } + return null; + } + /** * Attempt to notify the Sentry service when deleting collection happened * @param collection diff --git a/sentry-solr/solr-sentry-handlers/pom.xml b/sentry-solr/solr-sentry-handlers/pom.xml index 8ca1cb3bd..07d95faf1 100644 --- a/sentry-solr/solr-sentry-handlers/pom.xml +++ b/sentry-solr/solr-sentry-handlers/pom.xml @@ -22,7 +22,7 @@ limitations under the License. org.apache.sentry sentry-solr - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT solr-sentry-handlers @@ -44,20 +44,11 @@ limitations under the License. commons-lang test - - log4j - log4j - test - commons-logging commons-logging test - - org.apache.sentry - sentry-core-common - org.apache.sentry sentry-core-model-search @@ -80,7 +71,7 @@ limitations under the License. org.apache.sentry - sentry-binding-solr + solr-sentry-core org.apache.solr diff --git a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/SecureDocumentAnalysisRequestHandler.java b/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/SecureDocumentAnalysisRequestHandler.java index 23886feb9..1c1f6f8a3 100644 --- a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/SecureDocumentAnalysisRequestHandler.java +++ b/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/SecureDocumentAnalysisRequestHandler.java @@ -19,6 +19,7 @@ import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.response.SolrQueryResponse; +import org.apache.solr.sentry.SecureRequestHandlerUtil; /** * Secure (sentry-aware) version of DocumentAnalysisRequestHandler @@ -26,7 +27,7 @@ public class SecureDocumentAnalysisRequestHandler extends DocumentAnalysisRequestHandler { @Override public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception { - SecureRequestHandlerUtil.checkSentryCollection(req, SecureRequestHandlerUtil.QUERY_ONLY); + SecureRequestHandlerUtil.checkSentryCollection(req, SecureRequestHandlerUtil.QUERY_ONLY, getClass().getName()); super.handleRequestBody(req, rsp); } } diff --git a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/SecureFieldAnalysisRequestHandler.java b/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/SecureFieldAnalysisRequestHandler.java index 4a8809afe..62f9a1969 100644 --- a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/SecureFieldAnalysisRequestHandler.java +++ b/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/SecureFieldAnalysisRequestHandler.java @@ -19,6 +19,7 @@ import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.response.SolrQueryResponse; +import org.apache.solr.sentry.SecureRequestHandlerUtil; /** * Secure (sentry-aware) version of FieldAnalysisRequestHandler @@ -26,7 +27,7 @@ public class SecureFieldAnalysisRequestHandler extends FieldAnalysisRequestHandler { @Override public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception { - SecureRequestHandlerUtil.checkSentryCollection(req, SecureRequestHandlerUtil.QUERY_ONLY); + SecureRequestHandlerUtil.checkSentryCollection(req, SecureRequestHandlerUtil.QUERY_ONLY, getClass().getName()); super.handleRequestBody(req, rsp); } } diff --git a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/SecureRealTimeGetHandler.java b/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/SecureRealTimeGetHandler.java new file mode 100644 index 000000000..db182ef8e --- /dev/null +++ b/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/SecureRealTimeGetHandler.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.solr.handler; + + +import org.apache.solr.handler.component.RealTimeGetComponent; +import org.apache.solr.handler.component.SecureRealTimeGetComponent; + +import java.util.ArrayList; +import java.util.List; + +public class SecureRealTimeGetHandler extends RealTimeGetHandler { + @Override + protected List getDefaultComponents() + { + List names = new ArrayList<>(1); + names.add(RealTimeGetComponent.COMPONENT_NAME); + names.add(SecureRealTimeGetComponent.COMPONENT_NAME); + return names; + } +} diff --git a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/SecureReplicationHandler.java b/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/SecureReplicationHandler.java index 70e5c83cd..bdcd830de 100644 --- a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/SecureReplicationHandler.java +++ b/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/SecureReplicationHandler.java @@ -18,6 +18,7 @@ import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.response.SolrQueryResponse; +import org.apache.solr.sentry.SecureRequestHandlerUtil; /** * Secure (sentry-aware) version of ReplicationHandler @@ -31,7 +32,7 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw // request handler collection = core.getCoreDescriptor().getCloudDescriptor().getCollectionName(); } - SecureRequestHandlerUtil.checkSentryAdmin(req, SecureRequestHandlerUtil.QUERY_AND_UPDATE, true, collection); + SecureRequestHandlerUtil.checkSentryAdmin(req, SecureRequestHandlerUtil.QUERY_AND_UPDATE, getClass().getName(), true, collection); super.handleRequestBody(req, rsp); } } diff --git a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/admin/SecureAdminHandlers.java b/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/admin/SecureAdminHandlers.java index 546375495..98354e514 100644 --- a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/admin/SecureAdminHandlers.java +++ b/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/admin/SecureAdminHandlers.java @@ -17,18 +17,15 @@ package org.apache.solr.handler.admin; import java.io.IOException; -import java.util.EnumSet; import java.util.Map; import org.apache.solr.common.SolrException; import org.apache.solr.core.CoreContainer; import org.apache.solr.core.SolrCore; -import org.apache.sentry.core.model.search.SearchModelAction; -import org.apache.solr.handler.RequestHandlerBase; -import org.apache.solr.handler.SecureRequestHandlerUtil; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.request.SolrRequestHandler; import org.apache.solr.response.SolrQueryResponse; +import org.apache.solr.sentry.SecureRequestHandlerUtil; import org.apache.solr.util.plugin.SolrCoreAware; import org.apache.zookeeper.KeeperException; @@ -112,7 +109,7 @@ public SecureLoggingHandler() { @Override public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception { // logging handler can be used both to read and change logs - SecureRequestHandlerUtil.checkSentryAdmin(req, SecureRequestHandlerUtil.QUERY_AND_UPDATE, false, null); + SecureRequestHandlerUtil.checkSentryAdmin(req, SecureRequestHandlerUtil.QUERY_AND_UPDATE, getClass().getName(), false, null); super.handleRequestBody(req, rsp); } } @@ -120,7 +117,7 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw public static class SecureLukeRequestHandler extends LukeRequestHandler { @Override public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception { - SecureRequestHandlerUtil.checkSentryAdmin(req, SecureRequestHandlerUtil.QUERY_ONLY, true, null); + SecureRequestHandlerUtil.checkSentryAdmin(req, SecureRequestHandlerUtil.QUERY_ONLY, getClass().getName(), true, null); super.handleRequestBody(req, rsp); } } @@ -128,7 +125,7 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw public static class SecurePluginInfoHandler extends PluginInfoHandler { @Override public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception { - SecureRequestHandlerUtil.checkSentryAdmin(req, SecureRequestHandlerUtil.QUERY_ONLY, true, null); + SecureRequestHandlerUtil.checkSentryAdmin(req, SecureRequestHandlerUtil.QUERY_ONLY, getClass().getName(), true, null); super.handleRequestBody(req, rsp); } } @@ -136,7 +133,7 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw public static class SecurePropertiesRequestHandler extends PropertiesRequestHandler { @Override public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws IOException { - SecureRequestHandlerUtil.checkSentryAdmin(req, SecureRequestHandlerUtil.QUERY_ONLY, false, null); + SecureRequestHandlerUtil.checkSentryAdmin(req, SecureRequestHandlerUtil.QUERY_ONLY, getClass().getName(), false, null); super.handleRequestBody(req, rsp); } } @@ -145,7 +142,7 @@ public static class SecureShowFileRequestHandler extends ShowFileRequestHandler @Override public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws IOException, KeeperException, InterruptedException { - SecureRequestHandlerUtil.checkSentryAdmin(req, SecureRequestHandlerUtil.QUERY_ONLY, true, null); + SecureRequestHandlerUtil.checkSentryAdmin(req, SecureRequestHandlerUtil.QUERY_ONLY, getClass().getName(), true, null); super.handleRequestBody(req, rsp); } } @@ -153,7 +150,7 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) public static class SecureSolrInfoMBeanHandler extends SolrInfoMBeanHandler { @Override public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception { - SecureRequestHandlerUtil.checkSentryAdmin(req, SecureRequestHandlerUtil.QUERY_ONLY, true, null); + SecureRequestHandlerUtil.checkSentryAdmin(req, SecureRequestHandlerUtil.QUERY_ONLY, getClass().getName(), true, null); super.handleRequestBody(req, rsp); } } @@ -171,7 +168,7 @@ public SecureSystemInfoHandler(CoreContainer cc) { public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception { // this may or may not have the core SolrCore core = req.getCore(); - SecureRequestHandlerUtil.checkSentryAdmin(req, SecureRequestHandlerUtil.QUERY_ONLY, core != null, null); + SecureRequestHandlerUtil.checkSentryAdmin(req, SecureRequestHandlerUtil.QUERY_ONLY, getClass().getName(), core != null, null); super.handleRequestBody(req, rsp); } } @@ -179,7 +176,7 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw public static class SecureThreadDumpHandler extends ThreadDumpHandler { @Override public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws IOException { - SecureRequestHandlerUtil.checkSentryAdmin(req, SecureRequestHandlerUtil.QUERY_ONLY, false, null); + SecureRequestHandlerUtil.checkSentryAdmin(req, SecureRequestHandlerUtil.QUERY_ONLY, getClass().getName(), false, null); super.handleRequestBody(req, rsp); } } diff --git a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/admin/SecureCollectionsHandler.java b/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/admin/SecureCollectionsHandler.java index 0a471a4d2..b5edf2093 100644 --- a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/admin/SecureCollectionsHandler.java +++ b/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/admin/SecureCollectionsHandler.java @@ -17,14 +17,12 @@ * limitations under the License. */ -import java.util.EnumSet; import org.apache.solr.common.params.SolrParams; import org.apache.solr.common.params.CollectionParams.CollectionAction; import org.apache.solr.common.params.CoreAdminParams; -import org.apache.sentry.core.model.search.SearchModelAction; -import org.apache.solr.handler.SecureRequestHandlerUtil; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.response.SolrQueryResponse; +import org.apache.solr.sentry.SecureRequestHandlerUtil; import org.apache.solr.core.CoreContainer; /** @@ -75,7 +73,7 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw } // all actions require UPDATE privileges SecureRequestHandlerUtil.checkSentryAdmin(req, SecureRequestHandlerUtil.UPDATE_ONLY, - true, collection); + (action != null ? "CollectionAction." + action.toString() : getClass().getName() + "/" + a), true, collection); super.handleRequestBody(req, rsp); /** @@ -83,7 +81,7 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw * ex: When the collection has been deleted, the privileges related to the collection * were also needed to drop. */ - if (action.equals(CollectionAction.DELETE)) { + if (CollectionAction.DELETE.equals(action)) { SecureRequestHandlerUtil.syncDeleteCollection(collection); } diff --git a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/admin/SecureCoreAdminHandler.java b/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/admin/SecureCoreAdminHandler.java index c1bde310e..ff6e28182 100644 --- a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/admin/SecureCoreAdminHandler.java +++ b/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/admin/SecureCoreAdminHandler.java @@ -17,16 +17,15 @@ * limitations under the License. */ -import java.util.EnumSet; -import org.apache.solr.core.SolrCore; -import org.apache.sentry.core.model.search.SearchModelAction; import org.apache.solr.common.params.CoreAdminParams; import org.apache.solr.common.params.CoreAdminParams.CoreAdminAction; import org.apache.solr.common.params.SolrParams; -import org.apache.solr.handler.SecureRequestHandlerUtil; +import org.apache.solr.core.CoreContainer; +import org.apache.solr.core.CoreDescriptor; +import org.apache.solr.core.SolrCore; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.response.SolrQueryResponse; -import org.apache.solr.core.CoreContainer; +import org.apache.solr.sentry.SecureRequestHandlerUtil; /** * Secure (sentry-aware) version of CoreAdminHandler @@ -67,7 +66,12 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw action = CoreAdminAction.get(a); if (action == null) { // some custom action -- let's reqiure QUERY and UPDATE - SecureRequestHandlerUtil.checkSentryAdmin(req, SecureRequestHandlerUtil.QUERY_AND_UPDATE, true, null); + SecureRequestHandlerUtil.checkSentryAdmin( + req, + SecureRequestHandlerUtil.QUERY_AND_UPDATE, + "CoreAdminAction." + a, + true, + null); } } String collection = null; @@ -87,13 +91,17 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw collection = getCollectionFromCoreName(cname); break; } - case REQUESTAPPLYUPDATES: { - String cname = params.get(CoreAdminParams.NAME, ""); - collection = getCollectionFromCoreName(cname); + case CREATE: { + CoreDescriptor coreDescriptor = buildCoreDescriptor(params, coreContainer); + if (coreDescriptor != null) { + collection = coreDescriptor.getCloudDescriptor().getCollectionName(); + } break; } - case CREATE: { - collection = params.get(CoreAdminParams.COLLECTION); + case REQUESTAPPLYUPDATES: + case REQUESTBUFFERUPDATES: { + String cname = params.get(CoreAdminParams.NAME, ""); + collection = getCollectionFromCoreName(cname); break; } case STATUS: @@ -103,8 +111,11 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw case PERSIST: case CREATEALIAS: case DELETEALIAS: + case LOAD: case LOAD_ON_STARTUP: case TRANSIENT: + case REQUESTSTATUS: + case OVERSEEROP: default: { // these are actions that are not core related or not actually // handled by the CoreAdminHandler @@ -114,8 +125,14 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw } switch (action) { - case STATUS: { - SecureRequestHandlerUtil.checkSentryAdmin(req, SecureRequestHandlerUtil.QUERY_ONLY, checkCollection, collection); + case STATUS: + case REQUESTSTATUS: { + SecureRequestHandlerUtil.checkSentryAdmin( + req, + SecureRequestHandlerUtil.QUERY_ONLY, + "CoreAdminAction." + action.toString(), + checkCollection, + collection); break; } case LOAD: @@ -136,16 +153,25 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw case CREATEALIAS: case DELETEALIAS: case LOAD_ON_STARTUP: + case TRANSIENT: case REQUESTBUFFERUPDATES: - case OVERSEEROP: - case REQUESTSTATUS: - case TRANSIENT: { - SecureRequestHandlerUtil.checkSentryAdmin(req, SecureRequestHandlerUtil.UPDATE_ONLY, checkCollection, collection); + case OVERSEEROP: { + SecureRequestHandlerUtil.checkSentryAdmin( + req, + SecureRequestHandlerUtil.UPDATE_ONLY, + "CoreAdminAction." + action.toString(), + checkCollection, + collection); break; } default: { // some custom action -- let's reqiure QUERY and UPDATE - SecureRequestHandlerUtil.checkSentryAdmin(req, SecureRequestHandlerUtil.QUERY_AND_UPDATE, checkCollection, collection); + SecureRequestHandlerUtil.checkSentryAdmin( + req, + SecureRequestHandlerUtil.QUERY_AND_UPDATE, + "CoreAdminAction." + action.toString(), + checkCollection, + collection); break; } } diff --git a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/admin/SecureInfoHandler.java b/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/admin/SecureInfoHandler.java index 90b898b10..628d1d7ef 100644 --- a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/admin/SecureInfoHandler.java +++ b/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/admin/SecureInfoHandler.java @@ -17,9 +17,6 @@ * limitations under the License. */ -import java.util.EnumSet; -import org.apache.sentry.core.model.search.SearchModelAction; -import org.apache.solr.handler.RequestHandlerBase; import org.apache.solr.core.CoreContainer; /** diff --git a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/component/QueryDocAuthorizationComponent.java b/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/component/QueryDocAuthorizationComponent.java index 371787df6..be46a85c6 100644 --- a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/component/QueryDocAuthorizationComponent.java +++ b/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/component/QueryDocAuthorizationComponent.java @@ -17,6 +17,12 @@ package org.apache.solr.handler.component; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.solr.common.SolrException; @@ -24,13 +30,9 @@ import org.apache.solr.common.params.SolrParams; import org.apache.solr.common.util.NamedList; import org.apache.solr.sentry.SentryIndexAuthorizationSingleton; -import org.apache.solr.request.LocalSolrQueryRequest; import com.google.common.annotations.VisibleForTesting; import java.io.IOException; -import java.util.EnumSet; -import java.util.Iterator; import java.util.Set; -import java.net.URLEncoder; public class QueryDocAuthorizationComponent extends SearchComponent { @@ -73,27 +75,56 @@ private void addRawClause(StringBuilder builder, String authField, String value) .append(value).append("}"); } + public String getFilterQueryStr(Set roles) { + if (roles != null && roles.size() > 0) { + StringBuilder builder = new StringBuilder(); + for (String role : roles) { + addRawClause(builder, authField, role); + } + if (allRolesToken != null && !allRolesToken.isEmpty()) { + addRawClause(builder, authField, allRolesToken); + } + return builder.toString(); + } + return null; + } + + private BooleanClause getBooleanClause(String authField, String value) { + Term t = new Term(authField, value); + return new BooleanClause(new TermQuery(t), BooleanClause.Occur.SHOULD); + } + + public Query getFilterQuery(Set roles) { + if (roles != null && roles.size() > 0) { + BooleanQuery query = new BooleanQuery(); + for (String role : roles) { + query.add(getBooleanClause(authField, role)); + } + if (allRolesToken != null && !allRolesToken.isEmpty()) { + query.add(getBooleanClause(authField, allRolesToken)); + } + return query; + } + + return null; + } + @Override public void prepare(ResponseBuilder rb) throws IOException { - if (!enabled) return; + if (!enabled) { + return; + } String userName = sentryInstance.getUserName(rb.req); - String superUser = (System.getProperty("solr.authorization.superuser", "solr")); + String superUser = System.getProperty("solr.authorization.superuser", "solr"); if (superUser.equals(userName)) { return; } Set roles = sentryInstance.getRoles(userName); if (roles != null && roles.size() > 0) { - StringBuilder builder = new StringBuilder(); - for (String role : roles) { - addRawClause(builder, authField, role); - } - if (allRolesToken != null && !allRolesToken.isEmpty()) { - addRawClause(builder, authField, allRolesToken); - } + String filterQuery = getFilterQueryStr(roles); ModifiableSolrParams newParams = new ModifiableSolrParams(rb.req.getParams()); - String result = builder.toString(); - newParams.add("fq", result); + newParams.add("fq", filterQuery); rb.req.setParams(newParams); } else { throw new SolrException(SolrException.ErrorCode.UNAUTHORIZED, @@ -115,4 +146,8 @@ public String getDescription() { public String getSource() { return "$URL$"; } + + public boolean getEnabled() { + return enabled; + } } diff --git a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/component/QueryIndexAuthorizationComponent.java b/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/component/QueryIndexAuthorizationComponent.java index e4b5741b0..5fbb7436e 100644 --- a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/component/QueryIndexAuthorizationComponent.java +++ b/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/component/QueryIndexAuthorizationComponent.java @@ -20,8 +20,6 @@ import org.apache.solr.common.util.StrUtils; import org.apache.solr.sentry.SentryIndexAuthorizationSingleton; import org.apache.sentry.core.model.search.SearchModelAction; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; import java.io.IOException; import java.util.EnumSet; @@ -29,8 +27,7 @@ public class QueryIndexAuthorizationComponent extends SearchComponent { - private static Logger log = - LoggerFactory.getLogger(QueryIndexAuthorizationComponent.class); + private static final String OPERATION_NAME = "query"; private SentryIndexAuthorizationSingleton sentryInstance; public QueryIndexAuthorizationComponent() { @@ -46,7 +43,7 @@ public QueryIndexAuthorizationComponent(SentryIndexAuthorizationSingleton sentry @Override public void prepare(ResponseBuilder rb) throws IOException { sentryInstance.authorizeCollectionAction( - rb.req, EnumSet.of(SearchModelAction.QUERY)); + rb.req, EnumSet.of(SearchModelAction.QUERY), OPERATION_NAME); String collections = rb.req.getParams().get("collection"); if (collections != null) { List collectionList = StrUtils.splitSmart(collections, ",", true); @@ -61,7 +58,7 @@ public void prepare(ResponseBuilder rb) throws IOException { // correct sentry check for (String coll : collectionList) { sentryInstance.authorizeCollectionAction(rb.req, EnumSet.of(SearchModelAction.QUERY), - coll, true); + OPERATION_NAME, coll, true); } } } diff --git a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/component/SecureRealTimeGetComponent.java b/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/component/SecureRealTimeGetComponent.java new file mode 100644 index 000000000..ff86b56eb --- /dev/null +++ b/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/handler/component/SecureRealTimeGetComponent.java @@ -0,0 +1,356 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.solr.handler.component; + +import org.apache.lucene.document.Document; +import org.apache.lucene.index.AtomicReaderContext; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.document.Field; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; +import org.apache.lucene.util.BytesRef; + +import org.apache.solr.client.solrj.SolrQuery; +import org.apache.solr.common.SolrDocument; +import org.apache.solr.common.SolrDocumentList; +import org.apache.solr.common.SolrException; +import org.apache.solr.common.params.CommonParams; +import org.apache.solr.common.params.ModifiableSolrParams; +import org.apache.solr.common.util.NamedList; +import org.apache.solr.core.SolrCore; +import org.apache.solr.schema.FieldType; +import org.apache.solr.schema.SchemaField; +import org.apache.solr.request.SolrQueryRequest; +import org.apache.solr.response.SolrQueryResponse; +import org.apache.solr.response.transform.DocTransformer; +import org.apache.solr.response.transform.DocTransformers; +import org.apache.solr.response.transform.TransformContext; +import org.apache.solr.schema.IndexSchema; +import org.apache.solr.search.SolrIndexSearcher; +import org.apache.solr.search.SolrReturnFields; +import org.apache.solr.search.ReturnFields; +import org.apache.solr.sentry.SentryIndexAuthorizationSingleton; +import org.apache.solr.update.AddUpdateCommand; +import org.apache.solr.update.UpdateCommand; +import org.apache.solr.update.UpdateLog; +import org.apache.solr.util.RefCounted; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Set; + +public class SecureRealTimeGetComponent extends SearchComponent +{ + private static Logger log = + LoggerFactory.getLogger(SecureRealTimeGetComponent.class); + public static String ID_FIELD_NAME = "_reserved_sentry_id"; + public static final String COMPONENT_NAME = "secureGet"; + + private SentryIndexAuthorizationSingleton sentryInstance; + + public SecureRealTimeGetComponent() { + this(SentryIndexAuthorizationSingleton.getInstance()); + } + + @VisibleForTesting + public SecureRealTimeGetComponent(SentryIndexAuthorizationSingleton sentryInstance) { + super(); + this.sentryInstance = sentryInstance; + } + + @Override + public void prepare(ResponseBuilder rb) throws IOException { + QueryDocAuthorizationComponent docComponent = + (QueryDocAuthorizationComponent)rb.req.getCore().getSearchComponent("queryDocAuthorization"); + if (docComponent != null) { + String userName = sentryInstance.getUserName(rb.req); + String superUser = (System.getProperty("solr.authorization.superuser", "solr")); + // security is never applied to the super user; for example, if solr internally is using + // real time get for replica synchronization, we need to return all the documents. + if (docComponent.getEnabled() && !superUser.equals(userName)) { + Set roles = sentryInstance.getRoles(userName); + if (roles != null && roles.size() > 0) { + SolrReturnFields savedReturnFields = (SolrReturnFields)rb.rsp.getReturnFields(); + if (savedReturnFields == null) { + throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, + "Not able to authorize request because ReturnFields is invalid: " + savedReturnFields); + } + DocTransformer savedTransformer = savedReturnFields.getTransformer(); + Query filterQuery = docComponent.getFilterQuery(roles); + if (filterQuery != null) { + SolrReturnFields solrReturnFields = new AddDocIdReturnFields(rb.req, savedTransformer, filterQuery); + rb.rsp.setReturnFields(solrReturnFields); + } else { + throw new SolrException(SolrException.ErrorCode.UNAUTHORIZED, + "Request from user: " + userName + + "rejected because filter query was unable to be generated"); + } + } else { + throw new SolrException(SolrException.ErrorCode.UNAUTHORIZED, + "Request from user: " + userName + + " rejected because user is not associated with any roles"); + } + } + } else { + throw new SolrException(SolrException.ErrorCode.UNAUTHORIZED, + "RealTimeGetRequest request " + + " rejected because \"queryDocAuthorization\" component not defined"); + } + } + + @Override + public void process(ResponseBuilder rb) throws IOException { + if (!(rb.rsp.getReturnFields() instanceof AddDocIdReturnFields)) { + log.info("Skipping application of SecureRealTimeGetComponent because " + + " return field wasn't applied in prepare phase"); + return; + } + + final SolrQueryResponse rsp = rb.rsp; + ResponseFormatDocs responseFormatDocs = getResponseFormatDocs(rsp); + if (responseFormatDocs == null) { + return; // no documents to check + } + final SolrDocumentList docList = responseFormatDocs.getDocList(); + final AddDocIdReturnFields addDocIdRf = (AddDocIdReturnFields)rb.rsp.getReturnFields(); + final Query filterQuery = addDocIdRf.getFilterQuery(); + final DocTransformer transformer = addDocIdRf.getOriginalTransformer(); + + // we replaced the original transfer in order to add the document id, reapply it here + // so return documents in the correct format. + if (transformer != null) { + TransformContext context = new TransformContext(); + context.req = rb.req; + transformer.setContext(context); + } + + SolrCore core = rb.req.getCore(); + UpdateLog ulog = core.getUpdateHandler().getUpdateLog(); + SchemaField idField = core.getLatestSchema().getUniqueKeyField(); + FieldType fieldType = idField.getType(); + boolean openedRealTimeSearcher = false; + RefCounted searcherHolder = core.getRealtimeSearcher(); + + SolrDocumentList docListToReturn = new SolrDocumentList(); + try { + SolrIndexSearcher searcher = searcherHolder.get(); + for (SolrDocument doc : docList) { + // -1 doc id indicates this value was read from log; we need to open + // a new real time searcher to run the filter query against + if (Integer.valueOf(-1).equals(doc.get(ID_FIELD_NAME)) && !openedRealTimeSearcher) { + searcherHolder.decref(); + // hack to clear ulog maps since we don't have + // openRealtimeSearcher API from SOLR-8436 + AddUpdateCommand cmd = new AddUpdateCommand(rb.req); + cmd.setFlags(UpdateCommand.REPLAY); + ulog.add(cmd, true); + + searcherHolder = core.getRealtimeSearcher(); + searcher = searcherHolder.get(); + openedRealTimeSearcher = true; + } + + int docid = getFilteredInternalDocId(doc, idField, fieldType, filterQuery, searcher); + if (docid < 0) continue; + Document luceneDocument = searcher.doc(docid); + SolrDocument newDoc = toSolrDoc(luceneDocument, core.getLatestSchema()); + if( transformer != null ) { + transformer.transform(newDoc, docid); + } + docListToReturn.add(newDoc); + } + } finally { + searcherHolder.decref(); + searcherHolder = null; + } + if (responseFormatDocs.getUseResponseField()) { + rsp.getValues().remove("response"); + docListToReturn.setNumFound(docListToReturn.size()); + rsp.add("response", docListToReturn); + } else { + rsp.getValues().remove("doc"); + rsp.add("doc", docListToReturn.size() > 0 ? docListToReturn.get(0) : null); + } + } + + private static SolrDocument toSolrDoc(Document doc, IndexSchema schema) { + SolrDocument out = new SolrDocument(); + for ( IndexableField f : doc.getFields() ) { + // Make sure multivalued fields are represented as lists + Object existing = out.get(f.name()); + if (existing == null) { + SchemaField sf = schema.getFieldOrNull(f.name()); + + // don't return copyField targets + if (sf != null && schema.isCopyFieldTarget(sf)) continue; + + if (sf != null && sf.multiValued()) { + List vals = new ArrayList<>(); + vals.add( f ); + out.setField( f.name(), vals ); + } + else{ + out.setField( f.name(), f ); + } + } + else { + out.addField( f.name(), f ); + } + } + return out; + } + + // get the response format to use and the documents to check + private static ResponseFormatDocs getResponseFormatDocs(final SolrQueryResponse rsp) { + SolrDocumentList docList = (SolrDocumentList)rsp.getValues().get("response"); + SolrDocument singleDoc = (SolrDocument)rsp.getValues().get("doc"); + if (docList == null && singleDoc == null) { + return null; // no documents to filter + } + if (docList != null && singleDoc != null) { + throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, + "Not able to filter secure reponse, RealTimeGet returned both a doc list and " + + "an individual document"); + } + final boolean useResponseField = docList != null; + if (docList == null) { + docList = new SolrDocumentList(); + docList.add(singleDoc); + } + return new ResponseFormatDocs(useResponseField, docList); + } + + /** + * @param doc SolrDocument to check + * @param idField field where the id is stored + * @param fieldType type of id field + * @param filterQuery Query to filter by + * @param searcher SolrIndexSearcher on which to apply the filter query + * @returns the internal docid, or -1 if doc is not found or doesn't match filter + */ + private static int getFilteredInternalDocId(SolrDocument doc, SchemaField idField, FieldType fieldType, + Query filterQuery, SolrIndexSearcher searcher) throws IOException { + int docid = -1; + Field f = (Field)doc.getFieldValue(idField.getName()); + String idStr = f.stringValue(); + BytesRef idBytes = new BytesRef(); + fieldType.readableToIndexed(idStr, idBytes); + // get the internal document id + long segAndId = searcher.lookupId(idBytes); + + // if docid is valid, run it through the filter + if (segAndId >= 0) { + int segid = (int) segAndId; + AtomicReaderContext ctx = searcher.getTopReaderContext().leaves().get((int) (segAndId >> 32)); + docid = segid + ctx.docBase; + Weight weight = filterQuery.createWeight(searcher); + Scorer scorer = weight.scorer(ctx, null); + if (scorer == null || segid != scorer.advance(segid)) { + // filter doesn't match. + docid = -1; + } + } + return docid; + } + + @Override + public String getDescription() { + return "Handle Query Document Authorization for RealTimeGet"; + } + + @Override + public String getSource() { + return "$URL$"; + } + + private static class ResponseFormatDocs { + private boolean useResponseField; + private SolrDocumentList docList; + + public ResponseFormatDocs(boolean useResponseField, SolrDocumentList docList) { + this.useResponseField = useResponseField; + this.docList = docList; + } + + public boolean getUseResponseField() { return useResponseField; } + public SolrDocumentList getDocList() { return docList; } + } + + // ReturnField that adds a transformer to store the document id + private static class AddDocIdReturnFields extends SolrReturnFields { + private DocTransformer transformer; + private DocTransformer originalTransformer; + private Query filterQuery; + + public AddDocIdReturnFields(SolrQueryRequest req, DocTransformer docTransformer, + Query filterQuery) { + super(req); + this.originalTransformer = docTransformer; + this.filterQuery = filterQuery; + final DocTransformers docTransformers = new DocTransformers(); + if (originalTransformer != null) docTransformers.addTransformer(originalTransformer); + docTransformers.addTransformer(new DocIdAugmenter(ID_FIELD_NAME)); + this.transformer = docTransformers; + } + + @Override + public DocTransformer getTransformer() { + return transformer; + } + + public DocTransformer getOriginalTransformer() { + return originalTransformer; + } + + public Query getFilterQuery() { + return filterQuery; + } + } + + // the Solr DocIdAugmenterFactory does not store negative doc ids; + // we do here. + private static class DocIdAugmenter extends DocTransformer + { + final String name; + + public DocIdAugmenter( String display ) + { + this.name = display; + } + + @Override + public String getName() + { + return name; + } + + @Override + public void transform(SolrDocument doc, int docid) { + doc.setField( name, docid ); + } + } + +} diff --git a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/update/processor/UpdateIndexAuthorizationProcessor.java b/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/update/processor/UpdateIndexAuthorizationProcessor.java index 8cd53d33a..d995a7d3c 100644 --- a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/update/processor/UpdateIndexAuthorizationProcessor.java +++ b/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/update/processor/UpdateIndexAuthorizationProcessor.java @@ -27,9 +27,8 @@ import org.apache.solr.update.MergeIndexesCommand; import org.apache.solr.update.RollbackUpdateCommand; import org.apache.sentry.core.model.search.SearchModelAction; + import com.google.common.annotations.VisibleForTesting; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.EnumSet; @@ -40,58 +39,65 @@ public class UpdateIndexAuthorizationProcessor extends UpdateRequestProcessor { private SentryIndexAuthorizationSingleton sentryInstance; public UpdateIndexAuthorizationProcessor(SolrQueryRequest req, - SolrQueryResponse rsp, UpdateRequestProcessor next) { - this(SentryIndexAuthorizationSingleton.getInstance(), req, rsp, next); + SolrQueryResponse rsp, UpdateRequestProcessor next) { //NOPMD + this(SentryIndexAuthorizationSingleton.getInstance(), req, next); } @VisibleForTesting public UpdateIndexAuthorizationProcessor(SentryIndexAuthorizationSingleton sentryInstance, - SolrQueryRequest req, SolrQueryResponse rsp, UpdateRequestProcessor next) { + SolrQueryRequest req, UpdateRequestProcessor next) { super(next); this.sentryInstance = sentryInstance; this.req = req; } - public void authorizeCollectionAction() throws SolrException { + private void authorizeCollectionAction(String operation) throws SolrException { sentryInstance.authorizeCollectionAction( - req, EnumSet.of(SearchModelAction.UPDATE)); + req, EnumSet.of(SearchModelAction.UPDATE), operation); } @Override public void processAdd(AddUpdateCommand cmd) throws IOException { - authorizeCollectionAction(); + authorizeCollectionAction(cmd.name()); super.processAdd(cmd); } @Override public void processDelete(DeleteUpdateCommand cmd) throws IOException { - authorizeCollectionAction(); + String operation = cmd.name(); + if (cmd.isDeleteById()) { + operation += "ById"; + } else { + operation += "ByQuery"; + } + authorizeCollectionAction(operation); super.processDelete(cmd); } @Override public void processMergeIndexes(MergeIndexesCommand cmd) throws IOException { - authorizeCollectionAction(); + authorizeCollectionAction(cmd.name()); super.processMergeIndexes(cmd); } @Override public void processCommit(CommitUpdateCommand cmd) throws IOException { - authorizeCollectionAction(); + authorizeCollectionAction(cmd.name()); super.processCommit(cmd); } @Override public void processRollback(RollbackUpdateCommand cmd) throws IOException { - authorizeCollectionAction(); + authorizeCollectionAction(cmd.name()); super.processRollback(cmd); } @Override public void finish() throws IOException { - authorizeCollectionAction(); + authorizeCollectionAction("finish"); super.finish(); } + } diff --git a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/update/processor/UpdateIndexAuthorizationProcessorFactory.java b/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/update/processor/UpdateIndexAuthorizationProcessorFactory.java index 945dbc419..07f7f2839 100644 --- a/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/update/processor/UpdateIndexAuthorizationProcessorFactory.java +++ b/sentry-solr/solr-sentry-handlers/src/main/java/org/apache/solr/update/processor/UpdateIndexAuthorizationProcessorFactory.java @@ -20,7 +20,6 @@ import org.apache.solr.common.util.NamedList; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.response.SolrQueryResponse; -import org.apache.solr.update.processor.UpdateRequestProcessorFactory; /** * Factory for Sentry's index-level update authorization. diff --git a/sentry-solr/solr-sentry-handlers/src/main/resources/log4j.properties b/sentry-solr/solr-sentry-handlers/src/main/resources/log4j.properties index 62fdcd450..0e61f4aea 100644 --- a/sentry-solr/solr-sentry-handlers/src/main/resources/log4j.properties +++ b/sentry-solr/solr-sentry-handlers/src/main/resources/log4j.properties @@ -20,6 +20,19 @@ # Logging level log4j.rootLogger=INFO, CONSOLE +log4j.logger.org.apache.solr.sentry.AuditLogger=INFO, solrAudit +#log4j.logger.org.apache.solr.sentry.AuditLogger=OFF + +# turn off appending to A1: +#log4j.additivity.org.apache.solr.sentry.AuditLogger=false + +log4j.appender.solrAudit=org.apache.solr.sentry.RollingFileWithoutDeleteAppender +log4j.appender.solrAudit.layout=org.apache.log4j.PatternLayout +log4j.appender.solrAudit.layout.ConversionPattern=%m%n +log4j.appender.solrAudit.File=target/temp/SOLR-1-SOLR_SERVER-d554cdf32962542b8c887a4f9fcbc079 +#log4j.appender.solrAudit.File=/var/log/solr/audit/SENTRY-1-SENTRY_SERVER-d554cdf32962542b8c887a4f9fcbc079 +log4j.appender.solrAudit.MaxFileSize=100MB + log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender log4j.appender.CONSOLE.Target=System.err log4j.appender.CONSOLE.layout=org.apache.solr.util.SolrLogLayout diff --git a/sentry-solr/solr-sentry-handlers/src/main/resources/sentry-handlers/sentry/test-authz-provider.ini b/sentry-solr/solr-sentry-handlers/src/main/resources/sentry-handlers/sentry/test-authz-provider.ini index 8f48a8cc8..ec029c5eb 100644 --- a/sentry-solr/solr-sentry-handlers/src/main/resources/sentry-handlers/sentry/test-authz-provider.ini +++ b/sentry-solr/solr-sentry-handlers/src/main/resources/sentry-handlers/sentry/test-authz-provider.ini @@ -33,3 +33,4 @@ queryOnlyAdmin=queryOnlyAdmin updateOnlyAdmin=updateOnlyAdmin multiGroupUser=junit, queryOnlyAdmin, updateOnlyAdmin undefinedRoleUser=undefinedRoleGroup +bogusUser=bogusUserGroup diff --git a/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/handler/TestSecureReplicationHandler.java b/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/handler/TestSecureReplicationHandler.java index 938767744..6367814d5 100644 --- a/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/handler/TestSecureReplicationHandler.java +++ b/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/handler/TestSecureReplicationHandler.java @@ -18,7 +18,6 @@ import org.apache.solr.cloud.CloudDescriptor; import org.apache.solr.core.SolrCore; -import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.request.SolrRequestHandler; import org.apache.solr.sentry.SentryTestBase; import org.apache.solr.sentry.SentrySingletonTestInstance; diff --git a/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/handler/admin/SecureAdminHandlersTest.java b/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/handler/admin/SecureAdminHandlersTest.java index 3cb2597de..aea44f7e6 100644 --- a/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/handler/admin/SecureAdminHandlersTest.java +++ b/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/handler/admin/SecureAdminHandlersTest.java @@ -31,8 +31,6 @@ import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public class SecureAdminHandlersTest extends SentryTestBase { @@ -146,10 +144,6 @@ private void verifyLuke() throws Exception { verifyQueryAccess("/admin/luke", true); } - private void verifySystem() throws Exception { - verifyQueryAccess("/admin/system", true); - } - private void verifyMBeans() throws Exception { verifyQueryAccess("/admin/mbeans", true); } diff --git a/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/handler/admin/SecureCoreAdminHandlerTest.java b/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/handler/admin/SecureCoreAdminHandlerTest.java index 1857feb66..f93fb6561 100644 --- a/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/handler/admin/SecureCoreAdminHandlerTest.java +++ b/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/handler/admin/SecureCoreAdminHandlerTest.java @@ -16,19 +16,24 @@ */ package org.apache.solr.handler.admin; +import java.lang.reflect.Method; import java.util.Arrays; import java.util.List; +import java.util.Map; + +import net.sf.cglib.proxy.Enhancer; +import net.sf.cglib.proxy.MethodInterceptor; +import net.sf.cglib.proxy.MethodProxy; import org.apache.solr.cloud.CloudDescriptor; import org.apache.solr.common.params.CoreAdminParams; -import org.apache.solr.common.params.CoreAdminParams.CoreAdminAction; import org.apache.solr.common.params.ModifiableSolrParams; import org.apache.solr.common.params.CoreAdminParams.CoreAdminAction; +import org.apache.solr.core.CoreContainer; import org.apache.solr.core.SolrCore; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.sentry.SentryTestBase; import org.apache.solr.sentry.SentrySingletonTestInstance; -import org.eclipse.jetty.util.log.Log; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -39,8 +44,8 @@ public class SecureCoreAdminHandlerTest extends SentryTestBase { private static CloudDescriptor cloudDescriptor; public final static List QUERY_ACTIONS = Arrays.asList( - CoreAdminAction.STATUS - ); + CoreAdminAction.STATUS, + CoreAdminAction.REQUESTSTATUS); public final static List UPDATE_ACTIONS = Arrays.asList( CoreAdminAction.LOAD, CoreAdminAction.UNLOAD, @@ -60,19 +65,20 @@ public class SecureCoreAdminHandlerTest extends SentryTestBase { CoreAdminAction.LOAD_ON_STARTUP, CoreAdminAction.TRANSIENT, CoreAdminAction.OVERSEEROP, - CoreAdminAction.REQUESTSTATUS, // RELOAD needs to go last, because our bogus calls leaves things in a bad state for later calls. // We could handle this more cleanly at the cost of a lot more creating and deleting cores. CoreAdminAction.RELOAD ); - // only specify the collection on these, no cores + // These actions require that the collection is specified on the request. public final static List REQUIRES_COLLECTION = Arrays.asList( CoreAdminAction.CREATE ); // actions which don't check the actual collection public final static List NO_CHECK_COLLECTIONS = Arrays.asList( + CoreAdminAction.STATUS, + CoreAdminAction.REQUESTSTATUS, CoreAdminAction.LOAD, CoreAdminAction.PERSIST, CoreAdminAction.CREATEALIAS, @@ -80,7 +86,6 @@ public class SecureCoreAdminHandlerTest extends SentryTestBase { CoreAdminAction.LOAD_ON_STARTUP, CoreAdminAction.REQUESTBUFFERUPDATES, CoreAdminAction.OVERSEEROP, - CoreAdminAction.REQUESTSTATUS, CoreAdminAction.TRANSIENT ); @@ -115,27 +120,28 @@ private SolrQueryRequest getCoreAdminRequest(String collection, String user, modParams.set(CoreAdminParams.COLLECTION, ""); modParams.set(CoreAdminParams.CORE, ""); modParams.set(CoreAdminParams.NAME, ""); - if (!REQUIRES_COLLECTION.contains(action)) { - for (SolrCore core : h.getCoreContainer().getCores()) { - if(core.getCoreDescriptor().getCloudDescriptor().getCollectionName().equals(collection)) { - modParams.set(CoreAdminParams.CORE, core.getName()); - modParams.set(CoreAdminParams.NAME, core.getName()); - break; - } + for (SolrCore core : h.getCoreContainer().getCores()) { + if(core.getCoreDescriptor().getCloudDescriptor().getCollectionName().equals(collection)) { + modParams.set(CoreAdminParams.CORE, core.getName()); + modParams.set(CoreAdminParams.NAME, core.getName()); + break; } - } else { + } + if (REQUIRES_COLLECTION.contains(action)) { modParams.set(CoreAdminParams.COLLECTION, collection); + modParams.set(CoreAdminParams.CORE, core.getName()); + modParams.set(CoreAdminParams.NAME, core.getName()); } req.setParams(modParams); return req; } - private void verifyQueryAccess(CoreAdminAction action) throws Exception { - CoreAdminHandler handler = new SecureCoreAdminHandler(h.getCoreContainer()); + private void verifyQueryAccess(CoreAdminAction action, boolean checkCollection) throws Exception { + CoreContainer cc = getCleanCoreContainer(action); + CoreAdminHandler handler = new SecureCoreAdminHandler(cc); verifyAuthorized(handler, getCoreAdminRequest("collection1", "junit", action)); verifyAuthorized(handler, getCoreAdminRequest("queryCollection", "junit", action)); - if (action.equals(CoreAdminAction.STATUS)) { - // STATUS doesn't check collection permissions + if (!checkCollection) { verifyAuthorized(handler, getCoreAdminRequest("bogusCollection", "junit", action)); verifyAuthorized(handler, getCoreAdminRequest("updateCollection", "junit", action)); } else { @@ -145,7 +151,8 @@ private void verifyQueryAccess(CoreAdminAction action) throws Exception { } private void verifyUpdateAccess(CoreAdminAction action, boolean checkCollection) throws Exception { - CoreAdminHandler handler = new SecureCoreAdminHandler(h.getCoreContainer()); + CoreContainer cc = getCleanCoreContainer(action); + CoreAdminHandler handler = new SecureCoreAdminHandler(cc); verifyAuthorized(handler, getCoreAdminRequest("collection1", "junit", action)); verifyAuthorized(handler, getCoreAdminRequest("updateCollection", "junit", action)); verifyUnauthorized(handler, getCoreAdminRequest("bogusCollection", "bogusUser", action), "bogusCollection", "bogusUser", true); @@ -154,10 +161,39 @@ private void verifyUpdateAccess(CoreAdminAction action, boolean checkCollection) } } + private CoreContainer getZkAwareCoreContainer(final CoreContainer cc) { + Enhancer e = new Enhancer(); + e.setClassLoader(cc.getClass().getClassLoader()); + e.setSuperclass(CoreContainer.class); + e.setCallback(new MethodInterceptor() { + public Object intercept(Object obj, Method method, Object [] args, MethodProxy proxy) throws Throwable { + if (method.getName().equals("isZooKeeperAware")) { + return Boolean.TRUE; + } + return method.invoke(cc, args); + } + }); + return (CoreContainer)e.create(); + } + + private CoreContainer getCleanCoreContainer(CoreAdminAction action) { + // Ensure CoreContainer is empty + for (String coreName : h.getCoreContainer().getCoreNames()) { + h.getCoreContainer().unload(coreName); + } + for (Map.Entry entry : h.getCoreContainer().getCoreInitFailures().entrySet()) { + String coreName = entry.getKey().toString(); + h.getCoreContainer().unload(coreName); + } + // actions that require the collection attempt to read the collection off the CloudDescriptor, which is only + // present when the CoreContainer is ZkAware. + return REQUIRES_COLLECTION.contains(action) ? getZkAwareCoreContainer(h.getCoreContainer()) : h.getCoreContainer(); + } + @Test public void testSecureAdminHandler() throws Exception { for (CoreAdminAction action : QUERY_ACTIONS) { - verifyQueryAccess(action); + verifyQueryAccess(action, !NO_CHECK_COLLECTIONS.contains(action)); } for (CoreAdminAction action : UPDATE_ACTIONS) { verifyUpdateAccess(action, !NO_CHECK_COLLECTIONS.contains(action)); diff --git a/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/handler/admin/SecureInfoHandlerTest.java b/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/handler/admin/SecureInfoHandlerTest.java index 7221fa0f0..54784f44a 100644 --- a/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/handler/admin/SecureInfoHandlerTest.java +++ b/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/handler/admin/SecureInfoHandlerTest.java @@ -17,15 +17,11 @@ package org.apache.solr.handler.admin; import org.apache.solr.cloud.CloudDescriptor; -import org.apache.solr.common.SolrException; import org.apache.solr.core.SolrCore; -import org.apache.solr.handler.RequestHandlerBase; import org.apache.solr.request.SolrQueryRequest; -import org.apache.solr.response.SolrQueryResponse; import org.apache.solr.sentry.SentryTestBase; import org.apache.solr.sentry.SentrySingletonTestInstance; import org.junit.AfterClass; -import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; diff --git a/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/handler/component/QueryDocAuthorizationComponentTest.java b/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/handler/component/QueryDocAuthorizationComponentTest.java index c94f6fb0e..1f44628f1 100644 --- a/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/handler/component/QueryDocAuthorizationComponentTest.java +++ b/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/handler/component/QueryDocAuthorizationComponentTest.java @@ -25,7 +25,6 @@ import org.apache.solr.common.params.SolrParams; import org.apache.solr.common.util.NamedList; import org.apache.solr.core.SolrCore; -import org.apache.solr.handler.component.ResponseBuilder; import org.apache.solr.sentry.SentryTestBase; import org.apache.solr.sentry.SentryIndexAuthorizationSingleton; import org.apache.solr.sentry.SentrySingletonTestInstance; diff --git a/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/handler/component/QueryIndexAuthorizationComponentTest.java b/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/handler/component/QueryIndexAuthorizationComponentTest.java index b9766e0d6..a1f376085 100644 --- a/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/handler/component/QueryIndexAuthorizationComponentTest.java +++ b/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/handler/component/QueryIndexAuthorizationComponentTest.java @@ -19,7 +19,6 @@ import org.apache.solr.cloud.CloudDescriptor; import org.apache.solr.common.SolrException; import org.apache.solr.core.SolrCore; -import org.apache.solr.handler.component.ResponseBuilder; import org.apache.solr.sentry.SentryIndexAuthorizationSingleton; import org.apache.solr.sentry.SentryTestBase; import org.apache.solr.sentry.SentrySingletonTestInstance; diff --git a/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/sentry/SentryIndexAuthorizationSingletonTest.java b/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/sentry/SentryIndexAuthorizationSingletonTest.java index 4bea2515c..30a849a28 100644 --- a/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/sentry/SentryIndexAuthorizationSingletonTest.java +++ b/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/sentry/SentryIndexAuthorizationSingletonTest.java @@ -23,11 +23,11 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.sentry.core.model.search.SearchModelAction; +import org.apache.sentry.provider.common.SentryGroupNotFoundException; +import org.apache.solr.cloud.CloudDescriptor; import org.apache.solr.common.SolrException; import org.apache.solr.common.params.ModifiableSolrParams; -import org.apache.solr.cloud.CloudDescriptor; import org.apache.solr.core.SolrCore; -// import org.apache.solr.servlet.SolrHadoopAuthenticationFilter; import org.apache.solr.request.LocalSolrQueryRequest; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.request.SolrQueryRequestBase; @@ -47,6 +47,8 @@ public class SentryIndexAuthorizationSingletonTest extends SentryTestBase { private static CloudDescriptor cloudDescriptor; private static SentryIndexAuthorizationSingleton sentryInstance; + private static final String OPERATION_NAME = "myOperation"; + @BeforeClass public static void beforeClass() throws Exception { core = createCore("solrconfig.xml", "schema-minimal.xml"); @@ -80,7 +82,7 @@ private void doExpectUnauthorized(SolrQueryRequest request, private void doExpectUnauthorized(SentryIndexAuthorizationSingleton singleton, SolrQueryRequest request, Set actions, String msgContains) throws Exception { try { - singleton.authorizeCollectionAction(request, actions); + singleton.authorizeCollectionAction(request, actions, OPERATION_NAME); Assert.fail("Expected SolrException"); } catch (SolrException ex) { assertEquals(ex.code(), SolrException.ErrorCode.UNAUTHORIZED.code); @@ -88,6 +90,17 @@ private void doExpectUnauthorized(SentryIndexAuthorizationSingleton singleton, S } } + private void doExpectExceptionWithoutGroup(SentryIndexAuthorizationSingleton singleton, + SolrQueryRequest request, Set actions) + throws Exception { + try { + singleton.authorizeCollectionAction(request, actions, OPERATION_NAME); + Assert.fail("Expected SentryGroupNotFoundException"); + } catch (SentryGroupNotFoundException ex) { + // excepted exception, do nothing + } + } + @Test public void testNoBinding() throws Exception { // Use reflection to construct a non-singleton version of SentryIndexAuthorizationSingleton @@ -121,8 +134,7 @@ public void testNoHttpRequest() throws Exception { public void testNullUserName() throws Exception { SolrQueryRequest request = getRequest(); prepareCollAndUser(core, request, "collection1", null); - doExpectUnauthorized(request, EnumSet.of(SearchModelAction.ALL), - "User null does not have privileges for collection1"); + doExpectExceptionWithoutGroup(sentryInstance, request, EnumSet.of(SearchModelAction.ALL)); } @Test @@ -130,8 +142,7 @@ public void testEmptySuperUser() throws Exception { System.setProperty("solr.authorization.superuser", ""); SolrQueryRequest request = getRequest(); prepareCollAndUser(core, request, "collection1", "solr"); - doExpectUnauthorized(request, EnumSet.of(SearchModelAction.ALL), - "User solr does not have privileges for collection1"); + doExpectExceptionWithoutGroup(sentryInstance, request, EnumSet.of(SearchModelAction.ALL)); } /** @@ -144,7 +155,7 @@ public void testSuperUserAccess() throws Exception { prepareCollAndUser(core, request, "collection1", "junit"); sentryInstance.authorizeCollectionAction( - request, EnumSet.of(SearchModelAction.ALL)); + request, EnumSet.of(SearchModelAction.ALL), OPERATION_NAME); } /** @@ -157,7 +168,7 @@ public void testSuperUserNoAccess() throws Exception { prepareCollAndUser(core, request, "bogusCollection", "junit"); sentryInstance.authorizeCollectionAction( - request, EnumSet.of(SearchModelAction.ALL)); + request, EnumSet.of(SearchModelAction.ALL), OPERATION_NAME); } /** @@ -184,7 +195,9 @@ public void testUserName() throws Exception { String localName = sentryInstance.getUserName(localRequest); assertEquals(superUser, localName); } finally { - if (localRequest != null) localRequest.close(); + if (localRequest != null) { + localRequest.close(); + } } // null userName @@ -199,7 +212,9 @@ public void testUserName() throws Exception { String returnedName = sentryInstance.getUserName(sqr); assertEquals(userName, returnedName); } finally { - if (request != null) request.close(); + if (request != null) { + request.close(); + } } } @@ -211,15 +226,21 @@ public void testGetRoles() throws Exception { Collection emptyCollection = ImmutableSet.of(); // null user - Collection roles = sentryInstance.getRoles(null); - assertTrue(CollectionUtils.isEqualCollection(emptyCollection, roles)); + try { + sentryInstance.getRoles(null); + Assert.fail("Excepted SentryGroupNotFoundException"); + } catch (SentryGroupNotFoundException e) { + } // no group - roles = sentryInstance.getRoles("bogusUser"); - assertTrue(CollectionUtils.isEqualCollection(emptyCollection, roles)); + try { + sentryInstance.getRoles("withoutGroupUser"); + Assert.fail("Excepted SentryGroupNotFoundException"); + } catch (SentryGroupNotFoundException e) { + } // no role - roles = sentryInstance.getRoles("undefinedRoleUser"); + Collection roles = sentryInstance.getRoles("undefinedRoleUser"); assertTrue(CollectionUtils.isEqualCollection(emptyCollection, roles)); // single member diff --git a/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/sentry/SentrySingletonTestInstance.java b/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/sentry/SentrySingletonTestInstance.java index ae024667d..579f79111 100644 --- a/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/sentry/SentrySingletonTestInstance.java +++ b/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/sentry/SentrySingletonTestInstance.java @@ -21,7 +21,6 @@ import org.apache.commons.io.FileUtils; import org.apache.solr.SolrTestCaseJ4; -import org.apache.solr.handler.SecureRequestHandlerUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/sentry/SentryTestBase.java b/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/sentry/SentryTestBase.java index b5548e6e3..e1a1ba8c9 100644 --- a/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/sentry/SentryTestBase.java +++ b/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/sentry/SentryTestBase.java @@ -36,13 +36,14 @@ import org.junit.Assert; +import static org.apache.solr.sentry.SentryIndexAuthorizationSingleton.USER_NAME; +import static org.apache.solr.sentry.SentryIndexAuthorizationSingleton.DO_AS_USER_NAME; + /** * Base class for Sentry tests */ public abstract class SentryTestBase extends SolrTestCaseJ4 { - private static final String USER_NAME = "solr.user.name"; - private SolrQueryRequest request; public void setUp(SolrCore core) throws Exception { @@ -95,9 +96,16 @@ protected SolrQueryRequest prepareCollAndUser(SolrCore core, SolrQueryRequest re cloudDescField.set(coreDescriptor, mCloudDescriptor); HttpServletRequest httpServletRequest = EasyMock.createMock(HttpServletRequest.class); - IExpectationSetters getAttributeExpect = + IExpectationSetters getAttributeUserExpect = EasyMock.expect(httpServletRequest.getAttribute(USER_NAME)).andReturn(user); - if(!onlyOnce) getAttributeExpect.anyTimes(); + if (!onlyOnce) { + getAttributeUserExpect.anyTimes(); + } + IExpectationSetters getAttributeDoAsUserExpect = + EasyMock.expect(httpServletRequest.getAttribute(DO_AS_USER_NAME)).andReturn(null); + if (!onlyOnce) { + getAttributeDoAsUserExpect.anyTimes(); + } EasyMock.replay(httpServletRequest); request.getContext().put("httpRequest", httpServletRequest); return request; diff --git a/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/update/processor/UpdateIndexAuthorizationProcessorTest.java b/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/update/processor/UpdateIndexAuthorizationProcessorTest.java index e2972324a..630ca7caf 100644 --- a/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/update/processor/UpdateIndexAuthorizationProcessorTest.java +++ b/sentry-solr/solr-sentry-handlers/src/test/java/org/apache/solr/update/processor/UpdateIndexAuthorizationProcessorTest.java @@ -19,18 +19,25 @@ import java.lang.reflect.Method; import java.lang.reflect.Modifier; import java.util.Arrays; +import java.util.HashMap; import java.util.List; import java.util.TreeSet; import org.apache.commons.lang.mutable.MutableInt; import org.apache.solr.cloud.CloudDescriptor; import org.apache.solr.common.SolrException; +import org.apache.solr.common.params.MapSolrParams; import org.apache.solr.core.SolrCore; import org.apache.solr.request.SolrQueryRequest; -import org.apache.solr.sentry.SentryTestBase; +import org.apache.solr.request.SolrQueryRequestBase; import org.apache.solr.sentry.SentrySingletonTestInstance; +import org.apache.solr.sentry.SentryTestBase; +import org.apache.solr.update.AddUpdateCommand; +import org.apache.solr.update.CommitUpdateCommand; +import org.apache.solr.update.DeleteUpdateCommand; +import org.apache.solr.update.MergeIndexesCommand; +import org.apache.solr.update.RollbackUpdateCommand; import org.junit.AfterClass; -import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; @@ -66,11 +73,15 @@ public void setUp() throws Exception { } private void verifyAuthorized(String collection, String user) throws Exception { - getProcessor(collection, user).processAdd(null); - getProcessor(collection, user).processDelete(null); - getProcessor(collection, user).processMergeIndexes(null); - getProcessor(collection, user).processCommit(null); - getProcessor(collection, user).processRollback(null); + SolrQueryRequestBase req = new SolrQueryRequestBase(core, new MapSolrParams(new HashMap())) {}; + getProcessor(collection, user).processAdd(new AddUpdateCommand(req)); + getProcessor(collection, user).processDelete(new DeleteUpdateCommand(req)); + DeleteUpdateCommand deleteByQueryCommand = new DeleteUpdateCommand(req); + deleteByQueryCommand.setQuery("*:*"); + getProcessor(collection, user).processDelete(deleteByQueryCommand); + getProcessor(collection, user).processMergeIndexes(new MergeIndexesCommand(null, req)); + getProcessor(collection, user).processCommit(new CommitUpdateCommand(req, false)); + getProcessor(collection, user).processRollback(new RollbackUpdateCommand(req)); getProcessor(collection, user).finish(); } @@ -83,29 +94,30 @@ private void verifyUnauthorizedException(SolrException ex, String exMsgContains, private void verifyUnauthorized(String collection, String user) throws Exception { MutableInt numExceptions = new MutableInt(0); String contains = "User " + user + " does not have privileges for " + collection; + SolrQueryRequestBase req = new SolrQueryRequestBase(core, new MapSolrParams(new HashMap())) {}; try { - getProcessor(collection, user).processAdd(null); + getProcessor(collection, user).processAdd(new AddUpdateCommand(req)); } catch(SolrException ex) { verifyUnauthorizedException(ex, contains, numExceptions); } try { - getProcessor(collection, user).processDelete(null); + getProcessor(collection, user).processDelete(new DeleteUpdateCommand(req)); } catch(SolrException ex) { verifyUnauthorizedException(ex, contains, numExceptions); } try { - getProcessor(collection, user).processMergeIndexes(null); + getProcessor(collection, user).processMergeIndexes(new MergeIndexesCommand(null, req)); } catch(SolrException ex) { verifyUnauthorizedException(ex, contains, numExceptions); } try { - getProcessor(collection, user).processCommit(null); + getProcessor(collection, user).processCommit(new CommitUpdateCommand(req, false)); } catch(SolrException ex) { verifyUnauthorizedException(ex, contains, numExceptions); } try { - getProcessor(collection, user).processRollback(null); + getProcessor(collection, user).processRollback(new RollbackUpdateCommand(req)); } catch(SolrException ex) { verifyUnauthorizedException(ex, contains, numExceptions); } @@ -123,7 +135,7 @@ private UpdateIndexAuthorizationProcessor getProcessor(String collection, String SolrQueryRequest request = getRequest(); prepareCollAndUser(core, request, collection, user); return new UpdateIndexAuthorizationProcessor( - SentrySingletonTestInstance.getInstance().getSentryInstance(), request, null, null); + SentrySingletonTestInstance.getInstance().getSentryInstance(), request, null); } /** diff --git a/sentry-tests/pom.xml b/sentry-tests/pom.xml index 3c6802b9c..88a28bb50 100644 --- a/sentry-tests/pom.xml +++ b/sentry-tests/pom.xml @@ -20,7 +20,7 @@ limitations under the License. org.apache.sentry sentry - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-tests Sentry Tests @@ -30,6 +30,8 @@ limitations under the License. sentry-tests-hive sentry-tests-solr + sentry-tests-sqoop + sentry-tests-kafka diff --git a/sentry-tests/sentry-tests-hive/.gitignore b/sentry-tests/sentry-tests-hive/.gitignore deleted file mode 100644 index a3e474e69..000000000 --- a/sentry-tests/sentry-tests-hive/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -derby.log -TempStatsStore/** -thirdparty/* -sentry_policy_db diff --git a/sentry-tests/sentry-tests-hive/pom.xml b/sentry-tests/sentry-tests-hive/pom.xml index 0f693819b..472cce790 100644 --- a/sentry-tests/sentry-tests-hive/pom.xml +++ b/sentry-tests/sentry-tests-hive/pom.xml @@ -21,7 +21,7 @@ limitations under the License. org.apache.sentry sentry-tests - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-tests-hive Sentry Hive Tests @@ -245,7 +245,6 @@ limitations under the License. org.hamcrest hamcrest-all test - 1.3 org.apache.curator @@ -276,9 +275,14 @@ limitations under the License. org.apache.maven.plugins maven-surefire-plugin + false ${project.build.directory} + + **/TestHDFSIntegration.java + **/TestHDFSIntegrationWithHA.java + @@ -321,45 +325,6 @@ limitations under the License. - - org.apache.maven.plugins - maven-antrun-plugin - - true - true - - - - link-hadoop - generate-sources - - run - - - - - set -e - set -x - /bin/pwd - BASE_DIR=./target - TEST_UTIL_DIR=./testutil - setup_hadoop() { - set -e - set -x - /bin/pwd - cp -f $TEST_UTIL_DIR/* $BASE_DIR/. - chmod 777 $BASE_DIR/hadoop - } - setup_hadoop - - - - - - - - - org.apache.maven.plugins maven-surefire-plugin @@ -428,6 +393,7 @@ limitations under the License. **/TestUriPermissions.java **/TestRuntimeMetadataRetrieval.java **/TestOperations.java + **/TestPrivilegesAtColumnScope.java -Dsentry.e2etest.hiveServer2Type=UnmanagedHiveServer2 -Dsentry.e2etest.DFSType=ClusterDFS @@ -493,6 +459,10 @@ limitations under the License. **/TestDatabaseProvider.java **/TestDbOperations.java **/TestPrivilegeWithGrantOption.java + **/TestDbPrivilegesAtColumnScope.java + **/TestColumnEndToEnd.java + **/TestDbComplexView.java + **/TestConcurrentClients -Dsentry.e2etest.hiveServer2Type=UnmanagedHiveServer2 -Dsentry.e2etest.DFSType=ClusterDFS -Dsentry.e2etest.external.sentry=true diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/AbstractTestWithDbProvider.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/AbstractTestWithDbProvider.java index 04f50edde..17a2d1e7b 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/AbstractTestWithDbProvider.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/AbstractTestWithDbProvider.java @@ -25,14 +25,10 @@ import java.util.Map; import java.util.concurrent.TimeoutException; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; - -import com.google.common.base.Preconditions; -import com.google.common.collect.Maps; -import com.google.common.io.Files; import org.apache.commons.io.FileUtils; import org.apache.curator.test.TestingServer; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.sentry.binding.hive.SentryHiveAuthorizationTaskFactoryImpl; import org.apache.sentry.provider.db.SimpleDBProviderBackend; import org.apache.sentry.provider.file.PolicyFile; @@ -44,36 +40,34 @@ import org.apache.sentry.tests.e2e.hive.Context; import org.apache.sentry.tests.e2e.hive.StaticUserGroup; import org.apache.sentry.tests.e2e.hive.hiveserver.HiveServerFactory; -import org.junit.After; +import org.junit.AfterClass; import org.junit.BeforeClass; +import com.google.common.base.Preconditions; +import com.google.common.collect.Maps; +import com.google.common.io.Files; + public abstract class AbstractTestWithDbProvider extends AbstractTestWithHiveServer { protected static final String SERVER_HOST = "localhost"; - private Map properties = Maps.newHashMap(); - private File dbDir; - private int sentryServerCount = 1; - private List servers = new ArrayList(sentryServerCount); - private Configuration conf; - private PolicyFile policyFile; - private File policyFilePath; - protected Context context; + protected static Map properties = Maps.newHashMap(); + private static File dbDir; + private static int sentryServerCount = 1; + private static List servers = new ArrayList(sentryServerCount); + private static Configuration conf; + private static PolicyFile policyFile; + private static File policyFilePath; + protected static Context context; - protected boolean haEnabled; - private TestingServer zkServer; + protected static boolean haEnabled; + private static TestingServer zkServer; @BeforeClass public static void setupTest() throws Exception { } - @Override - public Context createContext(Map properties) throws Exception { - this.properties = properties; - return createContext(); - } - - public Context createContext() throws Exception { + public static void createContext() throws Exception { conf = new Configuration(false); policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP); properties.put(HiveServerFactory.AUTHZ_PROVIDER_BACKEND, SimpleDBProviderBackend.class.getName()); @@ -86,6 +80,7 @@ public Context createContext() throws Exception { dbDir = new File(Files.createTempDir(), "sentry_policy_db"); properties.put(ServerConfig.SENTRY_STORE_JDBC_URL, "jdbc:derby:;databaseName=" + dbDir.getPath() + ";create=true"); + properties.put(ServerConfig.SENTRY_STORE_JDBC_PASS, "dummy"); properties.put(ServerConfig.SENTRY_VERIFY_SCHEM_VERSION, "false"); properties.put(ServerConfig.SENTRY_STORE_GROUP_MAPPING, ServerConfig.SENTRY_STORE_LOCAL_GROUP_MAPPING); @@ -111,17 +106,16 @@ public Context createContext() throws Exception { String.valueOf(server.getAddress().getPort())); } - context = super.createContext(properties); + context = AbstractTestWithHiveServer.createContext(properties); policyFile .setUserGroupMapping(StaticUserGroup.getStaticMapping()) .write(context.getPolicyFile(), policyFilePath); startSentryService(); - return context; } - @After - public void tearDown() throws Exception { + @AfterClass + public static void tearDown() throws Exception { for (SentryService server : servers) { if (server != null) { server.stop(); @@ -149,7 +143,7 @@ protected void setupAdmin(Context context) throws Exception { connection.close(); } - private void startSentryService() throws Exception { + private static void startSentryService() throws Exception { for (SentryService server : servers) { server.start(); final long start = System.currentTimeMillis(); diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestColumnEndToEnd.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestColumnEndToEnd.java index 742c74fd6..32d0a6138 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestColumnEndToEnd.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestColumnEndToEnd.java @@ -17,15 +17,18 @@ package org.apache.sentry.tests.e2e.dbprovider; -import static org.junit.Assert.assertTrue; +import static org.junit.Assert.*; import java.io.File; import java.io.FileOutputStream; import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; import org.apache.sentry.provider.db.SentryAccessDeniedException; -import org.apache.sentry.provider.file.PolicyFile; import org.apache.sentry.tests.e2e.hive.AbstractTestWithStaticConfiguration; import org.junit.Before; import org.junit.BeforeClass; @@ -33,13 +36,19 @@ import com.google.common.io.Resources; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + public class TestColumnEndToEnd extends AbstractTestWithStaticConfiguration { + private static final Logger LOGGER = LoggerFactory. + getLogger(TestColumnEndToEnd.class); + private final String SINGLE_TYPE_DATA_FILE_NAME = "kv1.dat"; private File dataFile; - private PolicyFile policyFile; @BeforeClass public static void setupTestStaticConfiguration() throws Exception{ + LOGGER.info("TestColumnEndToEnd setupTestStaticConfiguration"); useSentryService = true; AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); } @@ -49,11 +58,11 @@ public static void setupTestStaticConfiguration() throws Exception{ public void setup() throws Exception { super.setupAdmin(); super.setup(); + super.setupPolicy(); dataFile = new File(dataDir, SINGLE_TYPE_DATA_FILE_NAME); FileOutputStream to = new FileOutputStream(dataFile); Resources.copy(Resources.getResource(SINGLE_TYPE_DATA_FILE_NAME), to); to.close(); - policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP); } @Test @@ -78,78 +87,161 @@ public void testBasic() throws Exception { connection.close(); } + @Test + public void testDescribeTbl() throws Exception { + Connection connection = context.createConnection(ADMIN1); + Statement statement = context.createStatement(connection); + statement.execute("CREATE TABLE IF NOT EXISTS t1 (c1 string, c2 string)"); + statement.execute("CREATE TABLE t2 (c1 string, c2 string)"); + statement.execute("CREATE ROLE user_role1"); + statement.execute("GRANT SELECT (c1) ON TABLE t1 TO ROLE user_role1"); + statement.execute("GRANT ROLE user_role1 TO GROUP " + USERGROUP1); + statement.close(); + connection.close(); + + connection = context.createConnection(USER1_1); + statement = context.createStatement(connection); + + // Expect that DESCRIBE table works with only column-level privileges, but other + // DESCRIBE variants like DESCRIBE FORMATTED fail. Note that if a user has privileges + // on any column they can describe all columns. + ResultSet rs = statement.executeQuery("DESCRIBE t1"); + assertTrue(rs.next()); + assertEquals("c1", rs.getString(1)); + assertEquals("string", rs.getString(2)); + assertTrue(rs.next()); + assertEquals("c2", rs.getString(1)); + assertEquals("string", rs.getString(2)); + + statement.executeQuery("DESCRIBE t1 c1"); + statement.executeQuery("DESCRIBE t1 c2"); + + try { + statement.executeQuery("DESCRIBE t2"); + fail("Expected DESCRIBE to fail on t2"); + } catch (SQLException e) { + context.verifyAuthzException(e); + } + + try { + statement.executeQuery("DESCRIBE FORMATTED t1"); + fail("Expected DESCRIBE FORMATTED to fail"); + } catch (SQLException e) { + context.verifyAuthzException(e); + } + + try { + statement.executeQuery("DESCRIBE EXTENDED t1"); + fail("Expected DESCRIBE EXTENDED to fail"); + } catch (SQLException e) { + context.verifyAuthzException(e); + } + statement.close(); + connection.close(); + + // Cleanup + connection = context.createConnection(ADMIN1); + statement = context.createStatement(connection); + statement.execute("DROP TABLE t1"); + statement.execute("DROP TABLE t2"); + statement.execute("DROP ROLE user_role1"); + statement.close(); + connection.close(); + } + @Test public void testNegative() throws Exception { Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); - statement.execute("CREATE TABLE t1 (c1 string, c2 string, c3 string)"); + statement.execute("CREATE TABLE t1 (c1 string, c2 string)"); statement.execute("CREATE ROLE user_role1"); statement.execute("CREATE ROLE user_role2"); statement.execute("GRANT SELECT (c1) ON TABLE t1 TO ROLE user_role1"); statement.execute("GRANT SELECT (c1,c2) ON TABLE t1 TO ROLE user_role2"); + + //Make sure insert/all are not supported try { statement.execute("GRANT INSERT (c2) ON TABLE t1 TO ROLE user_role2"); assertTrue("Sentry should not support privilege: Insert on Column", false); } catch (Exception e) { assertTrue("The error should be 'Sentry does not support privilege: Insert on Column'", - e.getMessage().contains("Sentry does not support privilege: Insert on Column")); + e.getMessage().toUpperCase().contains("SENTRY DOES NOT SUPPORT PRIVILEGE: INSERT ON COLUMN")); } try { statement.execute("GRANT ALL (c2) ON TABLE t1 TO ROLE user_role2"); assertTrue("Sentry should not support privilege: ALL on Column", false); } catch (Exception e) { assertTrue("The error should be 'Sentry does not support privilege: All on Column'", - e.getMessage().contains("Sentry does not support privilege: All on Column")); + e.getMessage().toUpperCase().contains("SENTRY DOES NOT SUPPORT PRIVILEGE: ALL ON COLUMN")); } statement.execute("GRANT ROLE user_role1 TO GROUP " + USERGROUP1); statement.execute("GRANT ROLE user_role2 TO GROUP " + USERGROUP2); statement.close(); connection.close(); + /* + Behavior of select col, select count(col), select *, and select count(*), count(1) + */ // 1.1 user_role1 select c1,c2 from t1, will throw exception connection = context.createConnection(USER1_1); statement = context.createStatement(connection); try { statement.execute("SELECT c1,c2 FROM t1"); - assertTrue("only SELECT allowed on t1.c1!!", false); - } catch (Exception e) { - // Ignore + assertTrue("User with privilege on one column is able to access other column!!", false); + } catch (SQLException e) { + context.verifyAuthzException(e); } - // 1.2 user_role1 select * from t1, will throw exception + // 1.2 user_role1 count(col) works, *, count(*) and count(1) fails + statement.execute("SELECT count(c1) FROM t1"); try { statement.execute("SELECT * FROM t1"); - assertTrue("only SELECT allowed on t1.c1!!", false); - } catch (Exception e) { - // Ignore + assertTrue("Select * should fail - only SELECT allowed on t1.c1!!", false); + } catch (SQLException e) { + context.verifyAuthzException(e); } + try { + statement.execute("SELECT count(*) FROM t1"); + assertTrue("Select count(*) should fail - only SELECT allowed on t1.c1!!", false); + } catch (SQLException e) { + context.verifyAuthzException(e); + } + try { + statement.execute("SELECT count(1) FROM t1"); + assertTrue("Select count(1) should fail - only SELECT allowed on t1.c1!!", false); + } catch (SQLException e) { + context.verifyAuthzException(e); + } + + statement.close(); + connection.close(); + - // 2.1 user_role2 select c1,c2,c3 from t1, will throw exception + // 2.1 user_role2 can do *, count(col), but count(*) and count(1) fails connection = context.createConnection(USER2_1); statement = context.createStatement(connection); + statement.execute("SELECT count(c1) FROM t1"); + statement.execute("SELECT * FROM t1"); + + //SENTRY-838 try { - statement.execute("SELECT c1,c2,c3 FROM t1"); - assertTrue("no permission on table t1!!", false); + statement.execute("SELECT count(*) FROM t1"); + assertTrue("Select count(*) works only with table level privileges - User has select on all columns!!", false); } catch (Exception e) { // Ignore } - - // 2.2 user_role2 select * from t1, will throw exception - connection = context.createConnection(USER2_1); - statement = context.createStatement(connection); try { - statement.execute("SELECT * FROM t1"); - assertTrue("no permission on table t1!!", false); + statement.execute("SELECT count(1) FROM t1"); + assertTrue("Select count(1) works only with table level privileges - User has select on all columns!!", false); } catch (Exception e) { // Ignore } - statement.close(); connection.close(); } @Test - public void testPostive() throws Exception { + public void testPositive() throws Exception { Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); statement.execute("CREATE database " + DB1); @@ -172,6 +264,7 @@ public void testPostive() throws Exception { statement = context.createStatement(connection); statement.execute("use " + DB1); statement.execute("SELECT c1 FROM t1"); + statement.execute("DESCRIBE t1"); // 2.1 user_role2 select c1,c2 on t1 connection = context.createConnection(USER2_1); @@ -244,4 +337,81 @@ public void testCreateTableAsSelect() throws Exception { statement.close(); connection.close(); } + + @Test + public void testShowColumns() throws Exception { + // grant select on test_tb(s) to USER1_1 + Connection connection = context.createConnection(ADMIN1); + Statement statement = context.createStatement(connection); + statement.execute("CREATE database " + DB1); + statement.execute("use " + DB1); + statement.execute("CREATE TABLE test_tb (s string, i string)"); + statement.execute("CREATE ROLE user_role1"); + statement.execute("GRANT SELECT (s) ON TABLE test_tb TO ROLE user_role1"); + statement.execute("GRANT ROLE user_role1 TO GROUP " + USERGROUP1); + statement.close(); + connection.close(); + + // USER1_1 executes "show columns in test_tb" and gets the s column information + connection = context.createConnection(USER1_1); + statement = context.createStatement(connection); + statement.execute("use " + DB1); + ResultSet res = statement.executeQuery("show columns in test_tb"); + + List expectedResult = new ArrayList(); + List returnedResult = new ArrayList(); + expectedResult.add("s"); + while (res.next()) { + returnedResult.add(res.getString(1).trim()); + } + validateReturnedResult(expectedResult, returnedResult); + returnedResult.clear(); + expectedResult.clear(); + res.close(); + + statement.close(); + connection.close(); + + // grant select on test_tb(s, i) to USER2_1 + connection = context.createConnection(ADMIN1); + statement = context.createStatement(connection); + statement.execute("use " + DB1); + statement.execute("CREATE ROLE user_role2"); + statement.execute("GRANT SELECT(s, i) ON TABLE test_tb TO ROLE user_role2"); + statement.execute("GRANT ROLE user_role2 TO GROUP " + USERGROUP2); + statement.close(); + connection.close(); + + // USER2_1 executes "show columns in test_tb" and gets the s,i columns information + connection = context.createConnection(USER2_1); + statement = context.createStatement(connection); + statement.execute("use " + DB1); + res = statement.executeQuery("show columns in test_tb"); + + expectedResult.add("s"); + expectedResult.add("i"); + while (res.next()) { + returnedResult.add(res.getString(1).trim()); + } + validateReturnedResult(expectedResult, returnedResult); + returnedResult.clear(); + expectedResult.clear(); + res.close(); + + statement.close(); + connection.close(); + + // USER3_1 executes "show columns in test_tb" and the exception will be thrown + connection = context.createConnection(USER3_1); + statement = context.createStatement(connection); + try { + // USER3_1 has no privilege on any column, so "show columns in test_tb" will throw an exception + statement.execute("show columns in db_1.test_tb"); + fail("No valid privileges exception should have been thrown"); + } catch (Exception e) { + } + + statement.close(); + connection.close(); + } } diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestConcurrentClients.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestConcurrentClients.java new file mode 100644 index 000000000..bf871f01a --- /dev/null +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestConcurrentClients.java @@ -0,0 +1,343 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.tests.e2e.dbprovider; + +import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient; +import org.apache.sentry.provider.file.PolicyFile; +import org.apache.sentry.tests.e2e.hive.AbstractTestWithStaticConfiguration; + +import org.apache.sentry.tests.e2e.hive.StaticUserGroup; +import static org.junit.Assume.assumeTrue; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.Statement; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.HashMap; +import java.util.Map; + +import org.apache.commons.lang.RandomStringUtils; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertEquals; + +/** + * The test class implements concurrency tests to test: + * Sentry client, HS2 jdbc client etc. + */ +public class TestConcurrentClients extends AbstractTestWithStaticConfiguration { + private static final Logger LOGGER = LoggerFactory + .getLogger(TestConcurrentClients.class); + + private PolicyFile policyFile; + + // define scale for tests + private final int NUM_OF_TABLES = Integer.parseInt(System.getProperty( + "sentry.e2e.concurrency.test.tables-per-db", "1")); + private final int NUM_OF_PAR = Integer.parseInt(System.getProperty( + "sentry.e2e.concurrency.test.partitions-per-tb", "3")); + private final int NUM_OF_THREADS = Integer.parseInt(System.getProperty( + "sentry.e2e.concurrency.test.threads", "30")); + private final int NUM_OF_TASKS = Integer.parseInt(System.getProperty( + "sentry.e2e.concurrency.test.tasks", "100")); + private final Long HS2_CLIENT_TEST_DURATION_MS = Long.parseLong(System.getProperty( + "sentry.e2e.concurrency.test.hs2client.test.time.ms", "10000")); //millis + private final Long SENTRY_CLIENT_TEST_DURATION_MS = Long.parseLong(System.getProperty( + "sentry.e2e.concurrency.test.sentryclient.test.time.ms", "10000")); //millis + + private static Map privileges = new HashMap(); + static { + privileges.put("all_db1", "server=server1->db=" + DB1 + "->action=all"); + } + + @Override + @Before + public void setup() throws Exception { + super.setupAdmin(); + policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP) + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + } + + @BeforeClass + public static void setupTestStaticConfiguration() throws Exception { + assumeTrue(Boolean.parseBoolean(System.getProperty("sentry.scaletest.oncluster", "false"))); + useSentryService = true; // configure sentry client + clientKerberos = true; // need to get client configuration from testing environments + AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); + } + + static String randomString( int len ){ + return RandomStringUtils.random(len, true, false); + } + + private void execStmt(Statement stmt, String sql) throws Exception { + LOGGER.info("Running [" + sql + "]"); + stmt.execute(sql); + } + + private void createDbTb(String user, String db, String tb) throws Exception{ + Connection connection = context.createConnection(user); + Statement statement = context.createStatement(connection); + try { + execStmt(statement, "DROP DATABASE IF EXISTS " + db + " CASCADE"); + execStmt(statement, "CREATE DATABASE " + db); + execStmt(statement, "USE " + db); + for (int i = 0; i < NUM_OF_TABLES; i++) { + String tbName = tb + "_" + Integer.toString(i); + execStmt(statement, "CREATE TABLE " + tbName + " (a string) PARTITIONED BY (b string)"); + } + } catch (Exception ex) { + LOGGER.error("caught exception: " + ex); + } finally { + statement.close(); + connection.close(); + } + } + + private void createPartition(String user, String db, String tb) throws Exception{ + Connection connection = context.createConnection(user); + Statement statement = context.createStatement(connection); + try { + execStmt(statement, "USE " + db); + for (int j = 0; j < NUM_OF_TABLES; j++) { + String tbName = tb + "_" + Integer.toString(j); + for (int i = 0; i < NUM_OF_PAR; i++) { + String randStr = randomString(4); + String sql = "ALTER TABLE " + tbName + " ADD IF NOT EXISTS PARTITION (b = '" + randStr + "') "; + LOGGER.info("[" + i + "] " + sql); + execStmt(statement, sql); + } + } + } catch (Exception ex) { + LOGGER.error("caught exception: " + ex); + } finally { + statement.close(); + connection.close(); + } + } + + private void adminCreateRole(String roleName) throws Exception { + Connection connection = context.createConnection(ADMIN1); + Statement stmt = context.createStatement(connection); + try { + execStmt(stmt, "DROP ROLE " + roleName); + } catch (Exception ex) { + LOGGER.warn("Role does not exist " + roleName); + } finally { + try { + execStmt(stmt, "CREATE ROLE " + roleName); + } catch (Exception ex) { + LOGGER.error("caught exception when create new role: " + ex); + } finally { + stmt.close(); + connection.close(); + } + } + } + + private void adminCleanUp(String db, String roleName) throws Exception { + Connection connection = context.createConnection(ADMIN1); + Statement stmt = context.createStatement(connection); + try { + execStmt(stmt, "DROP DATABASE IF EXISTS " + db + " CASCADE"); + execStmt(stmt, "DROP ROLE " + roleName); + } catch (Exception ex) { + LOGGER.warn("Failed to clean up ", ex); + } finally { + stmt.close(); + connection.close(); + } + } + + private void adminShowRole(String roleName) throws Exception { + Connection connection = context.createConnection(ADMIN1); + Statement stmt = context.createStatement(connection); + boolean found = false; + try { + ResultSet rs = stmt.executeQuery("SHOW ROLES "); + while (rs.next()) { + if (rs.getString("role").equalsIgnoreCase(roleName)) { + LOGGER.info("Found role " + roleName); + found = true; + } + } + } catch (Exception ex) { + LOGGER.error("caught exception when show roles: " + ex); + } finally { + stmt.close(); + connection.close(); + } + assertTrue("failed to detect " + roleName, found); + } + + private void adminGrant(String test_db, String test_tb, + String roleName, String group) throws Exception { + Connection connection = context.createConnection(ADMIN1); + Statement stmt = context.createStatement(connection); + try { + execStmt(stmt, "USE " + test_db); + for (int i = 0; i < NUM_OF_TABLES; i++) { + String tbName = test_tb + "_" + Integer.toString(i); + execStmt(stmt, "GRANT ALL ON TABLE " + tbName + " TO ROLE " + roleName); + } + execStmt(stmt, "GRANT ROLE " + roleName + " TO GROUP " + group); + } catch (Exception ex) { + LOGGER.error("caught exception when grant permission and role: " + ex); + } finally { + stmt.close(); + connection.close(); + } + } + + /** + * A synchronized state class to track concurrency test status from each thread + */ + private final static class TestRuntimeState { + private int numSuccess = 0; + private boolean failed = false; + private Throwable firstException = null; + + public synchronized void setFirstException(Throwable e) { + failed = true; + if (firstException == null) { + firstException = e; + } + } + public synchronized void setNumSuccess() { + numSuccess += 1; + } + public synchronized int getNumSuccess() { + return numSuccess; + } + public synchronized Throwable getFirstException() { + return firstException; + } + } + + /** + * Test when concurrent HS2 clients talking to server, + * Privileges are correctly created and updated. + * @throws Exception + */ + @Test + public void testConccurentHS2Client() throws Exception { + ExecutorService executor = Executors.newFixedThreadPool(NUM_OF_THREADS); + final TestRuntimeState state = new TestRuntimeState(); + + for (int i = 0; i < NUM_OF_TASKS; i ++) { + executor.execute(new Runnable() { + @Override + public void run() { + LOGGER.info("Starting tests: create role, show role, create db and tbl, and create partitions"); + if (state.failed) { + return; + } + try { + Long startTime = System.currentTimeMillis(); + Long elapsedTime = 0L; + while (Long.compare(elapsedTime, HS2_CLIENT_TEST_DURATION_MS) <= 0) { + String randStr = randomString(5); + String test_role = "test_role_" + randStr; + String test_db = "test_db_" + randStr; + String test_tb = "test_tb_" + randStr; + LOGGER.info("Start to test sentry with hs2 client with role " + test_role); + adminCreateRole(test_role); + adminShowRole(test_role); + createDbTb(ADMIN1, test_db, test_tb); + adminGrant(test_db, test_tb, test_role, USERGROUP1); + createPartition(USER1_1, test_db, test_tb); + adminCleanUp(test_db, test_role); + elapsedTime = System.currentTimeMillis() - startTime; + LOGGER.info("elapsedTime = " + elapsedTime); + } + state.setNumSuccess(); + } catch (Exception e) { + LOGGER.error("Exception: " + e); + state.setFirstException(e); + } + } + }); + } + executor.shutdown(); + while (!executor.isTerminated()) { + Thread.sleep(1000); //millisecond + } + Throwable ex = state.getFirstException(); + assertFalse( ex == null ? "Test failed" : ex.toString(), state.failed); + assertEquals(NUM_OF_TASKS, state.getNumSuccess()); + } + + /** + * Test when concurrent sentry clients talking to sentry server, threads data are synchronized + * @throws Exception + */ + @Test + public void testConcurrentSentryClient() throws Exception { + final String HIVE_KEYTAB_PATH = + System.getProperty("sentry.e2etest.hive.policyOwnerKeytab"); + final SentryPolicyServiceClient client = getSentryClient("hive", HIVE_KEYTAB_PATH); + ExecutorService executor = Executors.newFixedThreadPool(NUM_OF_THREADS); + + final TestRuntimeState state = new TestRuntimeState(); + for (int i = 0; i < NUM_OF_TASKS; i ++) { + LOGGER.info("Start to test sentry client with task id [" + i + "]"); + executor.execute(new Runnable() { + @Override + public void run() { + if (state.failed) { + LOGGER.error("found one failed state, abort test from here."); + return; + } + try { + String randStr = randomString(5); + String test_role = "test_role_" + randStr; + LOGGER.info("Start to test role: " + test_role); + Long startTime = System.currentTimeMillis(); + Long elapsedTime = 0L; + while (Long.compare(elapsedTime, SENTRY_CLIENT_TEST_DURATION_MS) <= 0) { + LOGGER.info("Test role " + test_role + " runs " + elapsedTime + " ms."); + client.createRole(ADMIN1, test_role); + client.listRoles(ADMIN1); + client.grantServerPrivilege(ADMIN1, test_role, "server1", false); + client.listAllPrivilegesByRoleName(ADMIN1, test_role); + client.dropRole(ADMIN1, test_role); + elapsedTime = System.currentTimeMillis() - startTime; + } + state.setNumSuccess(); + } catch (Exception e) { + LOGGER.error("Sentry Client Testing Exception: ", e); + state.setFirstException(e); + } + } + }); + } + executor.shutdown(); + while (!executor.isTerminated()) { + Thread.sleep(1000); //millisecond + } + Throwable ex = state.getFirstException(); + assertFalse( ex == null ? "Test failed" : ex.toString(), state.failed); + assertEquals(NUM_OF_TASKS, state.getNumSuccess()); + } +} diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDatabaseProvider.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDatabaseProvider.java index f9e8f808e..06967bda8 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDatabaseProvider.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDatabaseProvider.java @@ -46,56 +46,24 @@ import org.junit.Test; import com.google.common.io.Resources; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestDatabaseProvider extends AbstractTestWithStaticConfiguration { + private static final Logger LOGGER = LoggerFactory + .getLogger(TestDatabaseProvider.class); @BeforeClass public static void setupTestStaticConfiguration() throws Exception{ useSentryService = true; AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); - } - - /** - * This test is only used for manual testing of beeline with Sentry Service - * @throws Exception - */ - @Override - @After - public void clearDB() throws Exception { - Connection connection; - Statement statement; - connection = context.createConnection(ADMIN1); - statement = context.createStatement(connection); - ResultSet resultSet; - resultSet = statement.executeQuery("SHOW roles"); - List roles = new ArrayList(); - while ( resultSet.next()) { - roles.add(resultSet.getString(1)); - } - for(String role:roles) { - statement.execute("DROP Role " + role); - } - - statement.close(); - connection.close(); - if (context != null) { - context.close(); - } - } - - @Ignore - @Test - public void beelineTest() throws Exception{ - while(true) {} + AbstractTestWithStaticConfiguration.setupAdmin(); } @Test public void testBasic() throws Exception { Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); - statement.execute("CREATE ROLE admin_role"); - statement.execute("GRANT ALL ON DATABASE default TO ROLE admin_role"); - statement.execute("GRANT ROLE admin_role TO GROUP " + ADMINGROUP); statement.execute("DROP TABLE t1"); statement.execute("CREATE TABLE t1 (c1 string)"); statement.execute("CREATE ROLE user_role"); @@ -245,7 +213,6 @@ public void testGrantDuplicateonDb() throws Exception { } private File doSetupForGrantDbTests() throws Exception { - super.setupAdmin(); //copy data file to test dir File dataDir = context.getDataDir(); @@ -319,7 +286,7 @@ public void testRevokeServerAfterGrantTable() throws Exception { ResultSet resultSet = statement.executeQuery("SHOW GRANT ROLE user_role"); assertResultSize(resultSet, 2); statement.close(); - connection.close();; + connection.close(); // Revoke on Server connection = context.createConnection(ADMIN1); @@ -363,7 +330,6 @@ public void testRevokeServerAfterGrantTable() throws Exception { * @throws Exception */ private void doSetup() throws Exception { - super.setupAdmin(); Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); @@ -407,7 +373,6 @@ private void doSetup() throws Exception { @Test public void testRevokeFailAnotherRoleExist() throws Exception { - super.setupAdmin(); //copy data file to test dir File dataDir = context.getDataDir(); @@ -508,7 +473,6 @@ public void testRevokeFailAnotherRoleExist() throws Exception { @Test public void testRevokeFailMultipleGrantsExist() throws Exception { - super.setupAdmin(); //copy data file to test dir File dataDir = context.getDataDir(); @@ -590,7 +554,6 @@ public void testRevokeFailMultipleGrantsExist() throws Exception { */ @Test public void testRevokeAllOnServer() throws Exception{ - super.setupAdmin(); //copy data file to test dir File dataDir = context.getDataDir(); @@ -677,7 +640,6 @@ public void testRevokeAllOnServer() throws Exception{ */ @Test public void testRevokeAllOnDb() throws Exception{ - super.setupAdmin(); //copy data file to test dir File dataDir = context.getDataDir(); @@ -759,7 +721,6 @@ public void testRevokeAllOnDb() throws Exception{ */ @Test public void testRevokeAllOnTable() throws Exception{ - super.setupAdmin(); //copy data file to test dir File dataDir = context.getDataDir(); @@ -840,7 +801,6 @@ public void testRevokeAllOnTable() throws Exception{ */ @Test public void testRevokeSELECTOnTable() throws Exception{ - super.setupAdmin(); //copy data file to test dir File dataDir = context.getDataDir(); @@ -920,7 +880,6 @@ public void testRevokeSELECTOnTable() throws Exception{ */ @Test public void testRevokeINSERTOnTable() throws Exception{ - super.setupAdmin(); //copy data file to test dir File dataDir = context.getDataDir(); @@ -1018,6 +977,8 @@ public void testGrantRevokePrivileges() throws Exception { //Grant/Revoke All on server by admin statement.execute("GRANT ALL ON SERVER server1 to role role1"); + statement.execute("GRANT Role role1 to group " + ADMINGROUP); + statement.execute("Create table tab1(col1 int)"); resultSet = statement.executeQuery("SHOW GRANT ROLE role1"); assertResultSize(resultSet, 1); while(resultSet.next()) { @@ -1147,6 +1108,29 @@ public void testGrantRevokePrivileges() throws Exception { resultSet = statement.executeQuery("SHOW GRANT ROLE role1"); assertResultSize(resultSet, 0); + + //Grant/Revoke SELECT on column by admin + statement.execute("GRANT SELECT(col1) ON TABLE tab1 to role role1"); + resultSet = statement.executeQuery("SHOW GRANT ROLE role1"); + assertResultSize(resultSet, 1); + while(resultSet.next()) { + assertThat(resultSet.getString(1), equalToIgnoringCase("default")); + assertThat(resultSet.getString(2), equalToIgnoringCase("tab1")); + assertThat(resultSet.getString(3), equalToIgnoringCase(""));//partition + assertThat(resultSet.getString(4), equalToIgnoringCase("col1"));//column + assertThat(resultSet.getString(5), equalToIgnoringCase("role1"));//principalName + assertThat(resultSet.getString(6), equalToIgnoringCase("role"));//principalType + assertThat(resultSet.getString(7), equalToIgnoringCase("select")); + assertThat(resultSet.getBoolean(8), is(new Boolean("False")));//grantOption + //Create time is not tested + //assertThat(resultSet.getLong(9), is(new Long(0))); + assertThat(resultSet.getString(10), equalToIgnoringCase("--"));//grantor + } + + statement.execute("REVOKE SELECT(col1) ON TABLE tab1 from role role1"); + resultSet = statement.executeQuery("SHOW GRANT ROLE role1"); + assertResultSize(resultSet, 0); + //Revoke Partial privilege on table by admin statement.execute("GRANT ALL ON TABLE tab1 to role role1"); resultSet = statement.executeQuery("SHOW GRANT ROLE role1"); @@ -1189,6 +1173,7 @@ public void testGrantRevokePrivileges() throws Exception { assertThat(resultSet.getString(10), equalToIgnoringCase("--"));//grantor } + statement.close(); connection.close(); } @@ -1201,6 +1186,20 @@ private void assertResultSize(ResultSet resultSet, int expected) throws SQLExcep assertThat(count, is(expected)); } + private void assertTestRoles(ResultSet resultSet, List expected, boolean isAdmin) throws SQLException{ + List returned = new ArrayList<>(); + while(resultSet.next()) { + String role = resultSet.getString(1); + if (role.startsWith("role") || (isAdmin && role.startsWith("admin_role"))) { + LOGGER.info("Found role " + role); + returned.add(role); + } else { + LOGGER.error("Found an incorrect role so ignore it from validation: " + role); + } + } + validateReturnedResult(expected, returned); + } + /** * Create and Drop role by admin * @throws Exception @@ -1211,10 +1210,16 @@ public void testCreateDropRole() throws Exception { Statement statement = context.createStatement(connection); statement.execute("CREATE ROLE role1"); ResultSet resultSet = statement.executeQuery("SHOW roles"); - assertResultSize(resultSet, 1); + List expected = new ArrayList(); + expected.add("role1"); + expected.add("admin_role"); + assertTestRoles(resultSet, expected, true); + statement.execute("DROP ROLE role1"); resultSet = statement.executeQuery("SHOW roles"); - assertResultSize(resultSet, 0); + expected.clear(); + expected.add("admin_role"); + assertTestRoles(resultSet, expected, true); } /** @@ -1242,6 +1247,7 @@ public void testCornerCases() throws Exception { Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); + statement.execute("CREATE TABLE IF NOT EXISTS tab1(c1 string)"); //Drop a role which does not exist context.assertSentryException(statement, "DROP ROLE role1", SentryNoSuchObjectException.class.getSimpleName()); @@ -1359,7 +1365,10 @@ public void testShowRoles() throws Exception { Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); ResultSet resultSet = statement.executeQuery("SHOW ROLES"); - assertResultSize(resultSet, 0); + List expected = new ArrayList<>(); + expected.add("admin_role"); + assertTestRoles(resultSet, expected, true); + statement.execute("CREATE ROLE role1"); statement.execute("CREATE ROLE role2"); resultSet = statement.executeQuery("SHOW ROLES"); @@ -1367,13 +1376,9 @@ public void testShowRoles() throws Exception { assertThat(resultSetMetaData.getColumnCount(), is(1)); assertThat(resultSetMetaData.getColumnName(1), equalToIgnoringCase("role")); - Set roles = new HashSet(); - while ( resultSet.next()) { - roles.add(resultSet.getString(1)); - } - assertThat(roles.size(), is(2)); - assertTrue(roles.contains("role1")); - assertTrue(roles.contains("role2")); + expected.add("role1"); + expected.add("role2"); + assertTestRoles(resultSet, expected, true); statement.close(); connection.close(); } @@ -1395,9 +1400,9 @@ public void testShowRolesByGroup() throws Exception { statement.execute("CREATE ROLE role1"); statement.execute("CREATE ROLE role2"); statement.execute("CREATE ROLE role3"); - statement.execute("GRANT ROLE role1 to GROUP " + ADMINGROUP); + statement.execute("GRANT ROLE role1 to GROUP " + USERGROUP1); - ResultSet resultSet = statement.executeQuery("SHOW ROLE GRANT GROUP " + ADMINGROUP); + ResultSet resultSet = statement.executeQuery("SHOW ROLE GRANT GROUP " + USERGROUP1); ResultSetMetaData resultSetMetaData = resultSet.getMetaData(); assertThat(resultSetMetaData.getColumnCount(), is(4)); assertThat(resultSetMetaData.getColumnName(1), equalToIgnoringCase("role")); @@ -1491,6 +1496,7 @@ public void testShowPrivilegesByRole() throws Exception { ResultSet resultSet = statement.executeQuery("SHOW GRANT ROLE role1"); assertResultSize(resultSet, 0); statement.execute("CREATE ROLE role2"); + statement.execute("CREATE TABLE IF NOT EXISTS t1(c1 string, c2 int)"); statement.execute("GRANT SELECT ON TABLE t1 TO ROLE role1"); statement.execute("GRANT ROLE role1 to GROUP " + USERGROUP1); @@ -1549,6 +1555,10 @@ public void testShowPrivilegesByRoleOnObjectGivenColumn() throws Exception { Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); statement.execute("CREATE ROLE role1"); + statement.execute("CREATE TABLE IF NOT EXISTS t1(c1 string, c2 int)"); + statement.execute("CREATE TABLE IF NOT EXISTS t2(c1 string, c2 int)"); + statement.execute("CREATE TABLE IF NOT EXISTS t3(c1 string, c2 int)"); + statement.execute("CREATE TABLE IF NOT EXISTS t4(c1 string, c2 int)"); statement.execute("GRANT SELECT (c1) ON TABLE t1 TO ROLE role1"); statement.execute("GRANT SELECT (c2) ON TABLE t2 TO ROLE role1"); statement.execute("GRANT SELECT (c1,c2) ON TABLE t3 TO ROLE role1"); @@ -1684,6 +1694,7 @@ public void testShowPrivilegesByRoleOnObjectGivenTable() throws Exception { Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); statement.execute("CREATE ROLE role1"); + statement.execute("CREATE TABLE IF NOT EXISTS t1(c1 string)"); statement.execute("GRANT SELECT ON TABLE t1 TO ROLE role1"); //On table - positive @@ -1882,7 +1893,7 @@ public void testShowCurrentRole() throws Exception { public void testShowAllCurrentRoles() throws Exception { Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); - String testRole1 = "testRole1", testRole2 = "testRole2"; + String testRole1 = "role1", testRole2 = "role2"; statement.execute("CREATE ROLE " + testRole1); statement.execute("CREATE ROLE " + testRole2); statement.execute("GRANT ROLE " + testRole1 + " TO GROUP " + ADMINGROUP); @@ -1891,11 +1902,17 @@ public void testShowAllCurrentRoles() throws Exception { statement.execute("GRANT ROLE " + testRole2 + " TO GROUP " + USERGROUP1); ResultSet resultSet = statement.executeQuery("SHOW CURRENT ROLES"); - assertResultSize(resultSet, 2); + List expected = new ArrayList<>(); + expected.add("admin_role"); + expected.add(testRole1); + expected.add(testRole2); + assertTestRoles(resultSet, expected, true); statement.execute("SET ROLE " + testRole1); resultSet = statement.executeQuery("SHOW CURRENT ROLES"); - assertResultSize(resultSet, 1); + expected.clear(); + expected.add(testRole1); + assertTestRoles(resultSet, expected, true); statement.close(); connection.close(); @@ -1911,11 +1928,16 @@ public void testShowAllCurrentRoles() throws Exception { statement = context.createStatement(connection); resultSet = statement.executeQuery("SHOW CURRENT ROLES"); - assertResultSize(resultSet, 2); + expected.clear(); + expected.add(testRole1); + expected.add(testRole2); + assertTestRoles(resultSet, expected, false); statement.execute("SET ROLE " + testRole2); resultSet = statement.executeQuery("SHOW CURRENT ROLES"); - assertResultSize(resultSet, 1); + expected.clear(); + expected.add(testRole2); + assertTestRoles(resultSet, expected, false); statement.close(); connection.close(); @@ -1925,7 +1947,7 @@ public void testShowAllCurrentRoles() throws Exception { public void testSetRole() throws Exception { Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); - String testRole0 = "testRole1", testRole1 = "testRole2"; + String testRole0 = "role1", testRole1 = "role2"; statement.execute("CREATE ROLE " + testRole0); statement.execute("CREATE ROLE " + testRole1); @@ -1984,16 +2006,21 @@ public void testUriWithEquals() throws Exception { } @Test - public void caseSensitiveGroupNames() throws Exception { + public void testCaseSensitiveGroupNames() throws Exception { Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); - String testRole1 = "testRole1"; + ResultSet resultSet; + resultSet = statement.executeQuery("SHOW ROLE GRANT GROUP " + ADMINGROUP); + List expected = new ArrayList<>(); + assertTestRoles(resultSet, expected, false); + + String testRole1 = "role1"; statement.execute("CREATE ROLE " + testRole1); statement.execute("GRANT ROLE " + testRole1 + " TO GROUP " + ADMINGROUP); - - ResultSet resultSet; resultSet = statement.executeQuery("SHOW ROLE GRANT GROUP " + ADMINGROUP); - assertResultSize(resultSet, 1); + expected.clear(); + expected.add(testRole1); + assertTestRoles(resultSet, expected, false); context.assertSentryException(statement, "SHOW ROLE GRANT GROUP Admin", SentryNoSuchObjectException.class.getSimpleName()); @@ -2008,14 +2035,13 @@ public void caseSensitiveGroupNames() throws Exception { */ @Test public void testGrantRevokeRoleToGroups() throws Exception { - super.setupAdmin(); Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE"); statement.execute("CREATE DATABASE " + DB1); statement.execute("USE " + DB1); statement.execute("DROP TABLE IF EXISTS t1"); - statement.execute("CREATE TABLE t1 (c1 string)"); + statement.execute("CREATE TABLE t1 (c1 string,c2 string,c3 string,c4 string,c5 string)"); statement.execute("CREATE ROLE user_role"); statement.execute("GRANT ALL ON TABLE t1 TO ROLE user_role"); @@ -2069,4 +2095,89 @@ public void testGrantRevokeRoleToGroups() throws Exception { connection.close(); } + /* SENTRY-827 */ + @Test + public void serverActions() throws Exception { + String[] dbs = {DB1, DB2}; + String tbl = TBL1; + + //To test Insert + File dataDir = context.getDataDir(); + File dataFile = new File(dataDir, SINGLE_TYPE_DATA_FILE_NAME); + FileOutputStream to = new FileOutputStream(dataFile); + Resources.copy(Resources.getResource(SINGLE_TYPE_DATA_FILE_NAME), to); + to.close(); + + //setup roles and group mapping + Connection connection = context.createConnection(ADMIN1); + Statement statement = context.createStatement(connection); + + statement.execute("CREATE ROLE server_all"); + statement.execute("CREATE ROLE server_select"); + statement.execute("CREATE ROLE server_insert"); + + statement.execute("GRANT ALL ON SERVER server1 to ROLE server_all"); + statement.execute("GRANT SELECT ON SERVER server1 to ROLE server_select"); + statement.execute("GRANT INSERT ON SERVER server1 to ROLE server_insert"); + statement.execute("GRANT ALL ON URI 'file://" + dataFile.getPath() + "' TO ROLE server_select"); + statement.execute("GRANT ALL ON URI 'file://" + dataFile.getPath() + "' TO ROLE server_insert"); + + statement.execute("GRANT ROLE server_all to GROUP " + ADMINGROUP); + statement.execute("GRANT ROLE server_select to GROUP " + USERGROUP1); + statement.execute("GRANT ROLE server_insert to GROUP " + USERGROUP2); + + for (String db : dbs) { + statement.execute("CREATE DATABASE IF NOT EXISTS " + db); + statement.execute("CREATE TABLE IF NOT EXISTS " + db + "." + tbl + "(a String)"); + } + statement.close(); + connection.close(); + + connection = context.createConnection(USER1_1); + statement = context.createStatement(connection); + //Test SELECT, ensure INSERT fails + for (String db : dbs) { + statement.execute("SELECT * FROM " + db + "." + tbl); + try{ + statement.execute("LOAD DATA LOCAL INPATH '" + dataFile.getPath() + + "' INTO TABLE " + db + "." + tbl); + assertTrue("INSERT should not be capable here:",true); + }catch(SQLException e){} + } + statement.close(); + connection.close(); + + connection = context.createConnection(USER2_1); + statement = context.createStatement(connection); + //Test INSERT, ensure SELECT fails + for (String db : dbs){ + statement.execute("LOAD DATA LOCAL INPATH '" + dataFile.getPath() + + "' INTO TABLE " + db + "." + tbl); + try{ + statement.execute("SELECT * FROM " + db + "." + tbl); + }catch(SQLException e){} + } + + statement.close(); + connection.close(); + + //Enusre revoke worked + connection = context.createConnection(ADMIN1); + statement = context.createStatement(connection); + statement.execute("REVOKE SELECT ON SERVER server1 from ROLE server_select"); + + statement.close(); + connection.close(); + + connection = context.createConnection(USER1_1); + statement = context.createStatement(connection); + + try { + statement.execute("SELECT * FROM " + dbs[0] + "." + tbl); + assertTrue("Revoke Select on server Failed", false); + } catch (SQLException e) {} + + statement.close(); + connection.close(); + } } diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbColumnLevelMetaDataOps.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbColumnLevelMetaDataOps.java new file mode 100644 index 000000000..a454202a0 --- /dev/null +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbColumnLevelMetaDataOps.java @@ -0,0 +1,371 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.tests.e2e.dbprovider; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.Statement; + +import org.apache.hive.service.cli.HiveSQLException; +import org.apache.sentry.tests.e2e.hive.AbstractTestWithStaticConfiguration; +import org.apache.sentry.tests.e2e.hive.PrivilegeResultSet; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Contains tests for meta data operations with column level privileges + */ +public class TestDbColumnLevelMetaDataOps extends AbstractTestWithStaticConfiguration { + private static final Logger LOGGER = LoggerFactory. + getLogger(TestDbColumnLevelMetaDataOps.class); + + private static final String TEST_COL_METADATA_OPS_DB = "test_col_metadata_ops_db"; + private static final String TEST_COL_METADATA_OPS_TB = "test_col_metadata_ops_tb"; + private static final String TEST_COL_METADATA_OPS_ROLE = "test_col_metadata_ops_role"; + + @BeforeClass + public static void setupTestStaticConfiguration() throws Exception{ + LOGGER.info("TestColumnEndToEnd setupTestStaticConfiguration"); + useSentryService = true; + AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); + } + + @Override + @Before + public void setup() throws Exception { + super.setupAdmin(); + super.setup(); + createTestData(); + } + private static Statement statement = null; + private static Connection connection = null; + + private static void establishSession(String user) throws Exception{ + if (statement != null) { + statement.close(); + } + if (connection != null) { + connection.close(); + } + connection = context.createConnection(user); + statement = context.createStatement(connection); + } + + /** + * Create test database, table and role + * and grant column level privilege + * @throws Exception + */ + private void createTestData() throws Exception { + establishSession(ADMIN1); + statement.execute("CREATE DATABASE " + TEST_COL_METADATA_OPS_DB); + statement.execute("USE " + TEST_COL_METADATA_OPS_DB); + statement.execute("CREATE TABLE " + TEST_COL_METADATA_OPS_TB + + " (privileged STRING, unprivileged INT) partitioned by (privileged_par STRING, unprivileged_par INT)"); + statement.execute("INSERT INTO TABLE " + TEST_COL_METADATA_OPS_TB + + " PARTITION(privileged_par = 'privileged_par', unprivileged_par = 1) VALUES ('test1', 1)"); + + statement.execute("CREATE ROLE " + TEST_COL_METADATA_OPS_ROLE); + statement.execute("GRANT SELECT(privileged) ON TABLE " + TEST_COL_METADATA_OPS_TB + " TO ROLE " + TEST_COL_METADATA_OPS_ROLE); + statement.execute("GRANT ROLE " + TEST_COL_METADATA_OPS_ROLE + " TO GROUP " + USERGROUP1); + + PrivilegeResultSet prset = new PrivilegeResultSet(statement, "SHOW GRANT ROLE " + + TEST_COL_METADATA_OPS_ROLE + " ON DATABASE " + TEST_COL_METADATA_OPS_DB); + LOGGER.info("SHOW GRANT : " + prset.toString()); + prset.verifyResultSetColumn("table", TEST_COL_METADATA_OPS_TB); + prset.verifyResultSetColumn("column", "privileged"); + prset.verifyResultSetColumn("privilege", "select"); + } + + private ResultSet executeQueryWithLog(String query) throws Exception { + ResultSet rs; + try { + LOGGER.info("Running " + query); + rs = statement.executeQuery(query); + return rs; + } catch (HiveSQLException ex) { + LOGGER.error("Privilege exception occurs when running : " + query); + throw ex; + } + } + + private void validateFiltersInaccessibleColumns(String query, String colMetaField, String user, + String privileged) throws Exception { + establishSession(user); + statement.execute("USE " + TEST_COL_METADATA_OPS_DB); + ResultSet rs = executeQueryWithLog(query); + int numColumns = 0; + while (rs.next()) { + String val = rs.getString(colMetaField); + numColumns++; + // Relax validation for now: + // user with any select privilege can perform metadata operations, + // even though it might show some columns which he doesn't have privileges + assertTrue("Can access non privileged column", val.equalsIgnoreCase(privileged)); + } + rs.close(); + assertTrue("Looks like we accessed more columns than needed", numColumns == 1); + } + + private void validateShowsAllColumns(String query, String colMetaField, String user, + String privileged, String unprivileged) throws Exception { + establishSession(user); + statement.execute("USE " + TEST_COL_METADATA_OPS_DB); + ResultSet rs = executeQueryWithLog(query); + boolean found = false; + while (rs.next()) { + String val = rs.getString(colMetaField); + // Relax validation for now: + // user with any select privilege can perform metadata operations, + // even though it might show some columns which he doesn't have privileges + //assertFalse("column unprivileged shouldn't be shown in result", + // val.equalsIgnoreCase("unprivileged")); + if (val.equalsIgnoreCase("unprivileged")) { + LOGGER.warn("column unprivileged related metadata info is not disabled from result"); + } + if (val.toLowerCase().contains(privileged)) { + LOGGER.info("detected privileged column information: " + privileged); + found = true; + } else if (val.toLowerCase().contains(unprivileged)) { + LOGGER.warn("detected unexpected column information: " + unprivileged); + } + } + rs.close(); + assertTrue("failed to detect column privileged from result", found); + } + + private void validateShowsAllColumns(String query, String colMetaField, String user) throws Exception { + validateShowsAllColumns(query, colMetaField, user, "privileged", "unprivileged"); + } + + + private void validateSemanticException(String query, String user) throws Exception { + establishSession(user); + try { + LOGGER.info("Running " + query); + statement.execute(query); + fail("failed to throw SemanticException"); + } catch (Exception ex) { + String err = "SemanticException No valid privileges"; + assertTrue("failed to detect " + err + "\n" + ex.getMessage(), + ex.getMessage().contains("SemanticException No valid privileges")); + } + } + + /** + * Test with column level privilege + * user can NOT "show table extended" + */ + @Test + public void testShowExtended() throws Exception { + String query = "SHOW TABLE EXTENDED IN " + TEST_COL_METADATA_OPS_DB + + " like '" + TEST_COL_METADATA_OPS_TB + "'"; + // with column level privileges, user can not do show extended + validateSemanticException(query, USER1_1); + // negative test, without any privileges, user can not do it also + validateSemanticException(query, USER2_1); + } + + /** + * Test with column level privileges, + * user can list all columns for now + */ + @Test + public void testShowColumns() throws Exception { + String query = "SHOW COLUMNS IN " + TEST_COL_METADATA_OPS_DB + "." + TEST_COL_METADATA_OPS_TB; + // with column level privileges, user can show columns + validateFiltersInaccessibleColumns(query, "field", USER1_1, "privileged"); + // without column/table level privileges, any user can NOT show columns + validateSemanticException(query, USER2_1); + } + + /** + * Test SHOW TBLPROPERTIES requires table level privileges + * @throws Exception + */ + @Test + public void testShowProperties() throws Exception { + String query = "SHOW TBLPROPERTIES " + TEST_COL_METADATA_OPS_DB + "." + TEST_COL_METADATA_OPS_TB; + validateSemanticException(query, USER1_1); + validateSemanticException(query, USER2_1); + } + + /** + * Test with column level select privilege, + * user can do "describe table" + */ + @Test + public void testDescribeTable() throws Exception { + String query = "DESCRIBE " + TEST_COL_METADATA_OPS_DB + "." + TEST_COL_METADATA_OPS_TB; + // with column level privilege, user can describe table, but columns are not filtered for now + validateShowsAllColumns(query, "col_name", USER1_1); + // without column/table level privileges, any user can NOT describe table + validateSemanticException(query, USER2_1); + + // only with table level privileges user can describe extended/formatted + query = "DESCRIBE EXTENDED " + TEST_COL_METADATA_OPS_DB + "." + TEST_COL_METADATA_OPS_TB; + validateSemanticException(query, USER1_1); + validateSemanticException(query, USER2_1); + + query = "DESCRIBE EXTENDED " + TEST_COL_METADATA_OPS_DB + "." + TEST_COL_METADATA_OPS_TB + " s"; + validateSemanticException(query, USER1_1); + validateSemanticException(query, USER2_1); + + query = "DESCRIBE FORMATTED " + TEST_COL_METADATA_OPS_DB + "." + TEST_COL_METADATA_OPS_TB; + validateSemanticException(query, USER1_1); + validateSemanticException(query, USER2_1); + + query = "DESCRIBE FORMATTED " + TEST_COL_METADATA_OPS_DB + "." + TEST_COL_METADATA_OPS_TB + " s"; + validateSemanticException(query, USER1_1); + validateSemanticException(query, USER2_1); + } + + /** + * Test with column level select privilege, + * user can only do "explain select column"; + * any other select requires table level privileges + * @throws Exception + */ + @Ignore("After fix SENTRY-849, should enable this test") + @Test + public void testExplainSelect() throws Exception { + String query = "EXPLAIN SELECT privileged FROM " + TEST_COL_METADATA_OPS_DB + "." + TEST_COL_METADATA_OPS_TB; + // With column level privilege, user can explain select column + validateShowsAllColumns(query, "Explain", USER1_1); + // Without column/table level privilege, user can NOT explain select column + validateSemanticException(query, USER2_1); + + // user can NOT explain select unprivileged column + query = "EXPLAIN SELECT unprivileged FROM " + TEST_COL_METADATA_OPS_DB + "." + TEST_COL_METADATA_OPS_TB; + validateSemanticException(query, USER1_1); + validateSemanticException(query, USER2_1); + + query = "EXPLAIN SELECT * FROM " + TEST_COL_METADATA_OPS_DB + "." + TEST_COL_METADATA_OPS_TB; + validateSemanticException(query, USER1_1); + validateSemanticException(query, USER2_1); + + query = "EXPLAIN SELECT count(*) FROM " + TEST_COL_METADATA_OPS_DB + "." + TEST_COL_METADATA_OPS_TB; + validateSemanticException(query, USER1_1); + validateSemanticException(query, USER2_1); + + query = "EXPLAIN SELECT * FROM (SELECT privileged AS c FROM " + + TEST_COL_METADATA_OPS_DB + "." + TEST_COL_METADATA_OPS_TB + " union all select unprivileged as c from " + + TEST_COL_METADATA_OPS_DB + "." + TEST_COL_METADATA_OPS_TB + ") subq1 order by c"; + validateSemanticException(query, USER1_1); + validateSemanticException(query, USER2_1); + } + + /** + * Test if add a new column and grant privilege, + * user1 needs explicit grant on new column to access this column + */ + @Test + public void testShowNewColumn() throws Exception { + String colName = "newcol"; + establishSession(ADMIN1); + statement.execute("USE " + TEST_COL_METADATA_OPS_DB); + statement.execute("ALTER TABLE " + TEST_COL_METADATA_OPS_TB + " ADD COLUMNS (" + colName + " STRING)"); + + String query = "SHOW COLUMNS IN " + TEST_COL_METADATA_OPS_DB + "." + TEST_COL_METADATA_OPS_TB; + establishSession(USER1_1); + ResultSet rs = executeQueryWithLog(query); + boolean found = false; + while (rs.next() && !found) { + String val = rs.getString("field"); + LOGGER.info("found " + val); + if (val.equalsIgnoreCase(colName)) { + found = true; + } + } + assertTrue("Should not have implicit access to new column " + colName, !found); + rs.close(); + + establishSession(ADMIN1); + statement.execute("GRANT SELECT(" + colName + ") ON TABLE " + TEST_COL_METADATA_OPS_TB + " TO ROLE " + TEST_COL_METADATA_OPS_ROLE); + + establishSession(USER1_1); + rs = executeQueryWithLog(query); + found = false; + while (rs.next() && !found) { + String val = rs.getString("field"); + LOGGER.info("found " + val); + if (val.equalsIgnoreCase(colName)) { + found = true; + } + } + assertTrue("Should not have implicit access to new column " + colName, !found); + rs.close(); + validateSemanticException(query, USER2_1); + } + + /** + * Grant user column level privileges, show partitions + * should list user's granted columns + * @throws Exception + */ + @Ignore("After fix SENTRY-898, turn on this test") + @Test + public void testShowPartitions() throws Exception { + final String PAR_ROLE_NAME = TEST_COL_METADATA_OPS_ROLE + "_2"; + + establishSession(ADMIN1); + statement.execute("USE " + TEST_COL_METADATA_OPS_DB); + statement.execute("CREATE ROLE " + PAR_ROLE_NAME); + statement.execute("GRANT SELECT(privileged_par) ON TABLE " + TEST_COL_METADATA_OPS_TB + " TO ROLE " + PAR_ROLE_NAME); + statement.execute("GRANT ROLE " + PAR_ROLE_NAME + " TO GROUP " + USERGROUP1); + + String query = "SHOW PARTITIONS " + TEST_COL_METADATA_OPS_DB + "." + TEST_COL_METADATA_OPS_TB; + validateFiltersInaccessibleColumns(query, "partition", USER1_1, "privileged_par"); + } + + /** + * Requires table level privileges + */ + @Test + public void testShowTblProperties() throws Exception { + String query = "SHOW TBLPROPERTIES " + TEST_COL_METADATA_OPS_DB + "." + TEST_COL_METADATA_OPS_TB; + validateSemanticException(query, USER1_1); + } + + /** + * Requires table level privileges + */ + @Test + public void testShowCreateTable() throws Exception { + String query = "SHOW CREATE TABLE " + TEST_COL_METADATA_OPS_DB + "." + TEST_COL_METADATA_OPS_TB; + validateSemanticException(query, USER1_1); + } + + /** + * Requires table level privileges + */ + @Test + public void testTableExtendLike() throws Exception { + String query = "SHOW TABLE EXTENDED IN " + TEST_COL_METADATA_OPS_DB + " LIKE " + TEST_COL_METADATA_OPS_TB; + validateSemanticException(query, USER1_1); + } +} diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbComplexView.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbComplexView.java new file mode 100644 index 000000000..35f41c6ef --- /dev/null +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbComplexView.java @@ -0,0 +1,314 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.tests.e2e.dbprovider; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import java.util.ArrayList; +import java.util.List; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertFalse; + +import org.apache.sentry.provider.file.PolicyFile; +import org.apache.sentry.tests.e2e.hive.AbstractTestWithStaticConfiguration; + +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TestDbComplexView extends AbstractTestWithStaticConfiguration { + + private static final Logger LOGGER = LoggerFactory + .getLogger(TestDbComplexView.class); + + private static final String TEST_VIEW_DB = "test_complex_view_database"; + private static final String TEST_VIEW_TB = "test_complex_view_table"; + private static final String TEST_VIEW_TB2 = "test_complex_view_table_2"; + private static final String TEST_VIEW = "test_complex_view"; + private static final String TEST_VIEW_ROLE = "test_complex_view_role"; + + /** + * Run query and validate one column with given column name + * @param user + * @param sql + * @param db + * @param colName + * @param colVal + * @return + * @throws Exception + */ + private static boolean execValidate(String user, String sql, String db, + String colName, String colVal) throws Exception { + boolean status = false; + Connection conn = null; + Statement stmt = null; + try { + conn = context.createConnection(user); + stmt = context.createStatement(conn); + LOGGER.info("Running [USE " + db + ";" + sql + "] to validate column " + colName + " = " + colVal); + stmt.execute("USE " + db); + ResultSet rset = stmt.executeQuery(sql); + while (rset.next()) { + String val = rset.getString(colName); + if (val.equalsIgnoreCase(colVal)) { + LOGGER.info("found [" + colName + "] = " + colVal); + status = true; + break; + } else { + LOGGER.warn("[" + colName + "] = " + val + " not equal to " + colVal); + } + } + rset.close(); + } catch (SQLException ex) { + LOGGER.error("SQLException: ", ex); + } catch (Exception ex) { + LOGGER.error("Exception: ", ex); + } finally { + try { + if (stmt != null) { + stmt.close(); + } + if (conn != null) { + conn.close(); + } + } catch (Exception ex) { + LOGGER.error("failed to close connection and statement: " + ex); + } + } + return status; + } + + @BeforeClass + public static void setupTestStaticConfiguration() throws Exception { + useSentryService = true; + AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); + } + + @Override + @Before + public void setup() throws Exception { + super.setupAdmin(); + super.setup(); + PolicyFile.setAdminOnServer1(ADMINGROUP); + + // prepare test db and base table + List sqls = new ArrayList(); + sqls.add("USE DEFAULT"); + sqls.add("DROP DATABASE IF EXISTS " + TEST_VIEW_DB + " CASCADE"); + sqls.add("CREATE DATABASE IF NOT EXISTS " + TEST_VIEW_DB); + sqls.add("USE " + TEST_VIEW_DB); + sqls.add("CREATE TABLE " + TEST_VIEW_TB + " (userid VARCHAR(64), link STRING, source STRING) " + + "PARTITIONED BY (datestamp STRING) CLUSTERED BY (userid) INTO 256 BUCKETS STORED AS ORC"); + sqls.add("INSERT INTO TABLE " + TEST_VIEW_TB + " PARTITION (datestamp = '2014-09-23') VALUES " + + "('tlee', " + "'mail.com', 'sports.com'), ('jdoe', 'mail.com', null)"); + sqls.add("SELECT userid FROM " + TEST_VIEW_TB); + sqls.add("CREATE TABLE " + TEST_VIEW_TB2 + " (userid VARCHAR(64), name VARCHAR(64), age INT, " + + "gpa DECIMAL(3, 2)) CLUSTERED BY (age) INTO 2 BUCKETS STORED AS ORC"); + sqls.add("INSERT INTO TABLE " + TEST_VIEW_TB2 + " VALUES ('rgates', 'Robert Gates', 35, 1.28), " + + "('tlee', 'Tod Lee', 32, 2.32)"); + sqls.add("SELECT * FROM " + TEST_VIEW_TB2); + execBatch(ADMIN1, sqls); + } + + private void createTestRole(String user, String roleName) throws Exception { + Connection conn = context.createConnection(user); + Statement stmt = conn.createStatement(); + try { + exec(stmt, "DROP ROLE " + roleName); + } catch (Exception ex) { + LOGGER.info("test role doesn't exist, but it's ok"); + } finally { + exec(stmt, "CREATE ROLE " + roleName); + } + if (stmt != null) { + stmt.close(); + } + if (conn != null) { + conn.close(); + } + } + + private void grantAndValidatePrivilege(String testView, String testRole, String testGroup, + String user, boolean revoke) throws Exception { + createTestRole(ADMIN1, testRole); + List sqls = new ArrayList(); + + // grant privilege + sqls.add("USE " + TEST_VIEW_DB); + sqls.add("GRANT SELECT ON TABLE " + testView + " TO ROLE " + testRole); + sqls.add("GRANT ROLE " + testRole + " TO GROUP " + testGroup); + execBatch(ADMIN1, sqls); + + // show grant should pass and could list view + assertTrue("can not find select privilege from " + testRole, + execValidate(ADMIN1, "SHOW GRANT ROLE " + testRole + " ON TABLE " + testView, + TEST_VIEW_DB, "privilege", "select")); + assertTrue("can not find " + testView, + execValidate(user, "SHOW TABLES", TEST_VIEW_DB, "tab_name", testView)); + + // select from view should pass + sqls.clear(); + sqls.add("USE " + TEST_VIEW_DB); + sqls.add("SELECT * FROM " + testView); + execBatch(user, sqls); + + if (revoke) { + // revoke privilege + sqls.clear(); + sqls.add("USE " + TEST_VIEW_DB); + sqls.add("REVOKE SELECT ON TABLE " + testView + " FROM ROLE " + testRole); + execBatch(ADMIN1, sqls); + + // shouldn't be able to show grant + assertFalse("should not find select from " + testRole, + execValidate(ADMIN1, "SHOW GRANT ROLE " + testRole + " ON TABLE " + testView, + TEST_VIEW_DB, "privilege", "select")); + + // select from view should fail + sqls.clear(); + sqls.add("USE " + TEST_VIEW_DB); + sqls.add("SELECT * FROM " + testView); + try { + execBatch(user, sqls); + } catch (SQLException ex) { + LOGGER.info("Expected SQLException here", ex); + } + } + } + + private void grantAndValidatePrivilege(String testView, String testRole, + String testGroup, String user) throws Exception { + grantAndValidatePrivilege(testView, testRole, testGroup, user, true); + } + /** + * Create view1 and view2 from view1 + * Grant and validate select privileges to both views + * @throws Exception + */ + @Test + public void testDbViewFromView() throws Exception { + List sqls = new ArrayList(); + // create a simple view + sqls.add("USE " + TEST_VIEW_DB); + sqls.add("CREATE VIEW " + TEST_VIEW + + "(userid,link) AS SELECT userid,link from " + TEST_VIEW_TB); + + // create another view from the previous view + String testView2 = "view1_from_" + TEST_VIEW; + String testRole2 = testView2 + "_test_role"; + sqls.add(String.format("CREATE VIEW %s AS SELECT userid,link from %s", + testView2, TEST_VIEW)); + + String testView3 = "view2_from_" + TEST_VIEW; + sqls.add(String.format("CREATE VIEW %s(userid,link) AS SELECT userid,link from %s", + testView3, TEST_VIEW)); + + execBatch(ADMIN1, sqls); + + // validate privileges + grantAndValidatePrivilege(TEST_VIEW, TEST_VIEW_ROLE, USERGROUP1, USER1_1); + grantAndValidatePrivilege(testView2, testRole2, USERGROUP2, USER2_1); + + // Disabled because of SENTRY-745, also need to backport HIVE-10875 + //grantAndValidatePrivilege(testView3, testRole3, USERGROUP3, USER3_1); + } + + /** + * Create a view by join two tables + * Grant and verify select privilege + * @throws Exception + */ + @Test + public void TestDbViewWithJoin() throws Exception { + List sqls = new ArrayList(); + // create a joint view + sqls.add("USE " + TEST_VIEW_DB); + sqls.add(String.format("create view %s as select name,age,gpa from %s join %s on " + + "(%s.userid=%s.userid) where name='Tod Lee'", TEST_VIEW, TEST_VIEW_TB2, + TEST_VIEW_TB, TEST_VIEW_TB2, TEST_VIEW_TB)); + execBatch(ADMIN1, sqls); + + // validate privileges + grantAndValidatePrivilege(TEST_VIEW, TEST_VIEW_ROLE, USERGROUP1, USER1_1); + } + + /** + * Create a view with nested query + * Grant and verify select privilege + * @throws Exception + * SENTRY-716: Hive plugin does not correctly enforce + * privileges for new in case of nested queries + * Once backport HIVE-10875 to Sentry repo, will enable this test. + */ + @Ignore ("After SENTRY-716 is fixed, turn on this test") + @Test + public void TestDbViewWithNestedQuery() throws Exception { + List sqls = new ArrayList(); + // create a joint view + sqls.add("USE " + TEST_VIEW_DB); + sqls.add("CREATE VIEW " + TEST_VIEW + " AS SELECT * FROM " + TEST_VIEW_TB); + execBatch(ADMIN1, sqls); + grantAndValidatePrivilege(TEST_VIEW, TEST_VIEW_ROLE, USERGROUP1, USER1_1, false); + + sqls.clear(); + sqls.add("USE " + TEST_VIEW_DB); + sqls.add("SELECT * FROM (SELECT * FROM " + TEST_VIEW + ") v2"); + execBatch(USER1_1, sqls); + } + + /** + * Create a view with union two tables + * Grant and verify select privilege + * @throws Exception + * SENTRY-747: Create a view by union tables, grant select + * then select from view encounter errors + * Once backport HIVE-10875 to Sentry repo, will enable this test. + */ + @Ignore ("After SENTRY-747 is fixed, turn on this test") + @Test + public void TestDbViewWithUnion() throws Exception { + List sqls = new ArrayList(); + String testTable = "test_user_info"; + sqls.add("USE " + TEST_VIEW_DB); + sqls.add("DROP TABLE IF EXISTS " + testTable); + sqls.add("CREATE TABLE " + testTable + " (userid VARCHAR(64), name STRING, address STRING, tel STRING) "); + sqls.add("INSERT INTO TABLE " + testTable + " VALUES " + + "('tlee', " + "'Tod Lee', '1234 23nd Ave SFO, CA', '123-456-7890')"); + sqls.add("SELECT * FROM " + testTable); + sqls.add(String.format("CREATE VIEW " + TEST_VIEW + " AS " + + "SELECT u.userid, u.name, u.address, res.uid " + + "FROM (" + + "SELECT t1.userid AS uid " + + "FROM %s t1 " + + "UNION ALL " + + "SELECT t2.userid AS uid " + + "FROM %s t2 " + + ") res JOIN %s u ON (u.userid = res.uid)", + TEST_VIEW_TB, TEST_VIEW_TB2, testTable)); + execBatch(ADMIN1, sqls); + grantAndValidatePrivilege(TEST_VIEW, TEST_VIEW_ROLE, USERGROUP1, USER1_1); + } +} diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbConnections.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbConnections.java index 70242631f..2af05360d 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbConnections.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbConnections.java @@ -19,8 +19,6 @@ import static org.junit.Assert.*; -import java.io.File; -import java.io.FileOutputStream; import java.sql.Connection; import java.sql.Statement; @@ -32,10 +30,7 @@ import org.junit.BeforeClass; import org.junit.Test; -import com.google.common.io.Resources; - public class TestDbConnections extends AbstractTestWithStaticConfiguration { - private PolicyFile policyFile; @BeforeClass public static void setupTestStaticConfiguration() throws Exception { @@ -48,13 +43,13 @@ public static void setupTestStaticConfiguration() throws Exception { public void setup() throws Exception { super.setupAdmin(); super.setup(); - policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP); + PolicyFile.setAdminOnServer1(ADMINGROUP); } /** * Currently the hive binding opens a new server connection for each * statement. This test verifies that the client connection is closed properly - * at the end. Test Queries, DDLs, Auth DDLs and metdata filtering (eg show + * at the end. Test Queries, DDLs, Auth DDLs and metadata filtering (eg show * tables/databases) * @throws Exception */ @@ -62,6 +57,7 @@ public void setup() throws Exception { public void testClientConnections() throws Exception { String roleName = "connectionTest"; long preConnectionClientId; + // Connect through user admin1. Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); @@ -74,37 +70,47 @@ public void testClientConnections() throws Exception { assertTrue(preConnectionClientId < getSentrySrv().getTotalClients()); assertEquals(0, getSentrySrv().getNumActiveClients()); - // client connection is closed after DDLs + // Verify that client connection is closed after DDLs. preConnectionClientId = getSentrySrv().getTotalClients(); statement.execute("CREATE TABLE t1 (c1 string)"); assertTrue(preConnectionClientId < getSentrySrv().getTotalClients()); assertEquals(0, getSentrySrv().getNumActiveClients()); - // client connection is closed after queries + // Verify that client connection is closed after queries. preConnectionClientId = getSentrySrv().getTotalClients(); statement.execute("SELECT * FROM t1"); assertTrue(preConnectionClientId < getSentrySrv().getTotalClients()); assertEquals(0, getSentrySrv().getNumActiveClients()); + // Verify client invocation via metastore filter. + preConnectionClientId = getSentrySrv().getTotalClients(); + statement.executeQuery("show tables"); + assertTrue(preConnectionClientId < getSentrySrv().getTotalClients()); + assertEquals(0, getSentrySrv().getNumActiveClients()); + + // Verify that client connection is closed after drop table. preConnectionClientId = getSentrySrv().getTotalClients(); statement.execute("DROP TABLE t1"); assertTrue(preConnectionClientId < getSentrySrv().getTotalClients()); assertEquals(0, getSentrySrv().getNumActiveClients()); - // client connection is closed after auth DDL + // Verify that client connection is closed after auth DDL. preConnectionClientId = getSentrySrv().getTotalClients(); statement.execute("CREATE ROLE " + roleName); assertEquals(0, getSentrySrv().getNumActiveClients()); assertTrue(preConnectionClientId < getSentrySrv().getTotalClients()); + context.assertSentryException(statement, "CREATE ROLE " + roleName, SentryAlreadyExistsException.class.getSimpleName()); assertEquals(0, getSentrySrv().getNumActiveClients()); statement.execute("DROP ROLE " + roleName); assertEquals(0, getSentrySrv().getNumActiveClients()); - // client invocation via metastore filter + // Verify client invocation via metastore filter preConnectionClientId = getSentrySrv().getTotalClients(); statement.executeQuery("show tables"); + // There are no tables, so auth check does not happen + // sentry will create connection to get privileges for cache assertTrue(preConnectionClientId < getSentrySrv().getTotalClients()); assertEquals(0, getSentrySrv().getNumActiveClients()); @@ -112,24 +118,25 @@ public void testClientConnections() throws Exception { connection.close(); assertEquals(0, getSentrySrv().getNumActiveClients()); + // Connect through user user1_1. connection = context.createConnection(USER1_1); statement = context.createStatement(connection); assertEquals(0, getSentrySrv().getNumActiveClients()); - // verify client connection is closed after statement auth error + // Verify that client connection is closed after statement auth error. preConnectionClientId = getSentrySrv().getTotalClients(); context.assertAuthzException(statement, "USE DB_1"); assertTrue(preConnectionClientId < getSentrySrv().getTotalClients()); assertEquals(0, getSentrySrv().getNumActiveClients()); - // verify client connection is closed after auth DDL error + // Verify that client connection is closed after auth DDL error. preConnectionClientId = getSentrySrv().getTotalClients(); context.assertSentryException(statement, "CREATE ROLE " + roleName, SentryAccessDeniedException.class.getSimpleName()); assertTrue(preConnectionClientId < getSentrySrv().getTotalClients()); assertEquals(0, getSentrySrv().getNumActiveClients()); - // client invocation via metastore filter + // Verify that client invocation via metastore filter. preConnectionClientId = getSentrySrv().getTotalClients(); statement.executeQuery("show databases"); assertTrue(preConnectionClientId < getSentrySrv().getTotalClients()); @@ -137,7 +144,6 @@ public void testClientConnections() throws Exception { statement.close(); connection.close(); - assertEquals(0, getSentrySrv().getNumActiveClients()); } diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbCrossDbOps.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbCrossDbOps.java index 719dddfed..0aa166c2c 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbCrossDbOps.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbCrossDbOps.java @@ -34,7 +34,6 @@ public void setup() throws Exception { } @BeforeClass public static void setupTestStaticConfiguration() throws Exception{ - //policy_on_hdfs = true; useSentryService = true; AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); } diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbDDLAuditLog.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbDDLAuditLog.java index 2cecdfda0..3afd6b26e 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbDDLAuditLog.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbDDLAuditLog.java @@ -18,21 +18,19 @@ package org.apache.sentry.tests.e2e.dbprovider; import static org.hamcrest.core.Is.is; -import static org.hamcrest.text.IsEqualIgnoringCase.equalToIgnoringCase; import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.sql.Connection; import java.sql.Statement; -import java.util.ArrayList; import java.util.HashMap; -import java.util.List; import java.util.Map; -import org.apache.log4j.AppenderSkeleton; import org.apache.log4j.Level; import org.apache.log4j.Logger; -import org.apache.log4j.spi.LoggingEvent; +import org.apache.sentry.provider.db.log.appender.AuditLoggerTestAppender; +import org.apache.sentry.provider.db.log.util.CommandUtil; import org.apache.sentry.provider.db.log.util.Constants; import org.apache.sentry.tests.e2e.hive.AbstractTestWithStaticConfiguration; import org.codehaus.jettison.json.JSONObject; @@ -42,36 +40,12 @@ public class TestDbDDLAuditLog extends AbstractTestWithStaticConfiguration { - public static class TestAppender extends AppenderSkeleton { - public static List events = new ArrayList(); - - public void close() { - } - - public boolean requiresLayout() { - return false; - } - - @Override - protected void append(LoggingEvent event) { - events.add(event); - } - - static String getLastLogEvent() { - return events.get(events.size() - 1).getMessage().toString(); - } - - static Level getLastLogLevel() { - return events.get(events.size() - 1).getLevel(); - } - } - @BeforeClass public static void setupTestStaticConfiguration() throws Exception { useSentryService = true; AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); Logger logger = Logger.getLogger("sentry.hive.authorization.ddl.logger"); - TestAppender testAppender = new TestAppender(); + AuditLoggerTestAppender testAppender = new AuditLoggerTestAppender(); logger.addAppender(testAppender); logger.setLevel(Level.INFO); } @@ -98,6 +72,7 @@ public void testBasic() throws Exception { fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_CREATE_ROLE); fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "CREATE ROLE " + roleName); fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.TRUE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); assertAuditLog(fieldValueMap); statement.execute("GRANT ROLE " + roleName + " TO GROUP " + groupName); @@ -106,8 +81,12 @@ public void testBasic() throws Exception { fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "GRANT ROLE " + roleName + " TO GROUP " + groupName); fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.TRUE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); assertAuditLog(fieldValueMap); + statement.execute("create database " + dbName); + statement.execute("use " + dbName); + statement.execute("CREATE TABLE " + tableName + " (c1 string)"); statement.execute("GRANT ALL ON DATABASE " + dbName + " TO ROLE " + roleName); fieldValueMap.clear(); fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_GRANT_PRIVILEGE); @@ -115,6 +94,7 @@ public void testBasic() throws Exception { + " TO ROLE " + roleName); fieldValueMap.put(Constants.LOG_FIELD_DATABASE_NAME, dbName); fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.TRUE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); assertAuditLog(fieldValueMap); statement.execute("GRANT SELECT ON TABLE " + tableName + " TO ROLE " + roleName @@ -125,6 +105,7 @@ public void testBasic() throws Exception { + " TO ROLE " + roleName + " WITH GRANT OPTION"); fieldValueMap.put(Constants.LOG_FIELD_TABLE_NAME, tableName); fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.TRUE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); assertAuditLog(fieldValueMap); // for error audit log @@ -136,6 +117,7 @@ public void testBasic() throws Exception { fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_CREATE_ROLE); fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "CREATE ROLE " + roleName); fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.FALSE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); assertAuditLog(fieldValueMap); } try { @@ -147,6 +129,7 @@ public void testBasic() throws Exception { fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "GRANT ROLE errorROLE TO GROUP " + groupName); fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.FALSE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); assertAuditLog(fieldValueMap); } try { @@ -158,6 +141,7 @@ public void testBasic() throws Exception { fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "GRANT ALL ON DATABASE " + dbName + " TO ROLE errorRole"); fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.FALSE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); assertAuditLog(fieldValueMap); } try { @@ -169,6 +153,7 @@ public void testBasic() throws Exception { fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "GRANT INSERT ON DATABASE " + dbName + " TO ROLE errorRole"); fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.FALSE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); assertAuditLog(fieldValueMap); } try { @@ -180,6 +165,7 @@ public void testBasic() throws Exception { fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "GRANT SELECT ON DATABASE " + dbName + " TO ROLE errorRole"); fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.FALSE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); assertAuditLog(fieldValueMap); } try { @@ -191,6 +177,7 @@ public void testBasic() throws Exception { fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "GRANT SELECT ON TABLE " + tableName + " TO ROLE errorRole"); fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.FALSE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); assertAuditLog(fieldValueMap); } @@ -201,6 +188,7 @@ public void testBasic() throws Exception { + " FROM ROLE " + roleName); fieldValueMap.put(Constants.LOG_FIELD_TABLE_NAME, tableName); fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.TRUE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); assertAuditLog(fieldValueMap); statement.execute("REVOKE ALL ON DATABASE " + dbName + " FROM ROLE " + roleName); @@ -210,6 +198,7 @@ public void testBasic() throws Exception { + " FROM ROLE " + roleName); fieldValueMap.put(Constants.LOG_FIELD_DATABASE_NAME, dbName); fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.TRUE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); assertAuditLog(fieldValueMap); statement.execute("REVOKE ROLE " + roleName + " FROM GROUP " + groupName); @@ -218,14 +207,16 @@ public void testBasic() throws Exception { fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "REVOKE ROLE " + roleName + " FROM GROUP " + groupName); fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.TRUE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); assertAuditLog(fieldValueMap); statement.execute("DROP ROLE " + roleName); fieldValueMap.clear(); fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_DROP_ROLE); fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "DROP ROLE " + roleName); - assertAuditLog(fieldValueMap); fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.TRUE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); + assertAuditLog(fieldValueMap); // for error audit log try { @@ -237,6 +228,7 @@ public void testBasic() throws Exception { fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "REVOKE SELECT ON TABLE " + tableName + " FROM ROLE errorRole"); fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.FALSE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); assertAuditLog(fieldValueMap); } @@ -249,6 +241,7 @@ public void testBasic() throws Exception { fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "REVOKE ALL ON DATABASE " + dbName + " FROM ROLE errorRole"); fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.FALSE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); assertAuditLog(fieldValueMap); } @@ -261,6 +254,7 @@ public void testBasic() throws Exception { fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "REVOKE ROLE errorRole FROM GROUP " + groupName); fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.FALSE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); assertAuditLog(fieldValueMap); } @@ -272,6 +266,7 @@ public void testBasic() throws Exception { fieldValueMap.put(Constants.LOG_FIELD_OPERATION, Constants.OPERATION_DROP_ROLE); fieldValueMap.put(Constants.LOG_FIELD_OPERATION_TEXT, "DROP ROLE errorRole"); fieldValueMap.put(Constants.LOG_FIELD_ALLOWED, Constants.FALSE); + fieldValueMap.put(Constants.LOG_FIELD_IP_ADDRESS, null); assertAuditLog(fieldValueMap); } @@ -280,11 +275,16 @@ public void testBasic() throws Exception { } private void assertAuditLog(Map fieldValueMap) throws Exception { - assertThat(TestAppender.getLastLogLevel(), is(Level.INFO)); - JSONObject jsonObject = new JSONObject(TestAppender.getLastLogEvent()); + assertThat(AuditLoggerTestAppender.getLastLogLevel(), is(Level.INFO)); + JSONObject jsonObject = new JSONObject(AuditLoggerTestAppender.getLastLogEvent()); if (fieldValueMap != null) { for (Map.Entry entry : fieldValueMap.entrySet()) { - assertThat(jsonObject.get(entry.getKey()).toString(), equalToIgnoringCase(entry.getValue())); + String entryKey = entry.getKey(); + if (Constants.LOG_FIELD_IP_ADDRESS.equals(entryKey)) { + assertTrue(CommandUtil.assertIPInAuditLog(jsonObject.get(entryKey).toString())); + } else { + assertTrue(entry.getValue().equalsIgnoreCase(jsonObject.get(entryKey).toString())); + } } } } diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbEndToEnd.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbEndToEnd.java index acb789f0b..d9f30e0d7 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbEndToEnd.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbEndToEnd.java @@ -24,7 +24,6 @@ import java.io.FileOutputStream; import java.sql.Connection; import java.sql.ResultSet; -import java.sql.SQLException; import java.sql.Statement; import org.apache.sentry.provider.db.SentryAccessDeniedException; @@ -39,7 +38,6 @@ public class TestDbEndToEnd extends AbstractTestWithStaticConfiguration { private final String SINGLE_TYPE_DATA_FILE_NAME = "kv1.dat"; private File dataFile; - private PolicyFile policyFile; @BeforeClass public static void setupTestStaticConfiguration() throws Exception{ @@ -56,13 +54,14 @@ public void setup() throws Exception { FileOutputStream to = new FileOutputStream(dataFile); Resources.copy(Resources.getResource(SINGLE_TYPE_DATA_FILE_NAME), to); to.close(); - policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP); + PolicyFile.setAdminOnServer1(ADMINGROUP); } @Test public void testBasic() throws Exception { Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); + statement.execute("DROP TABLE IF EXISTS t1"); statement.execute("CREATE TABLE t1 (c1 string)"); statement.execute("CREATE ROLE user_role"); statement.execute("GRANT SELECT ON TABLE t1 TO ROLE user_role"); @@ -96,6 +95,7 @@ public void testBasic() throws Exception { public void testNonDefault() throws Exception { Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); + statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE"); statement.execute("CREATE database " + DB1); statement.execute("USE " + DB1); statement.execute("CREATE TABLE t1 (c1 string)"); @@ -116,6 +116,7 @@ public void testNonDefault() throws Exception { public void testUPrivileges() throws Exception { Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); + statement.execute("DROP TABLE IF EXISTS t1"); statement.execute("CREATE TABLE t1 (c1 string)"); statement.execute("CREATE ROLE user_role"); statement.execute("CREATE ROLE uri_role"); @@ -171,7 +172,10 @@ public void testEndToEnd1() throws Exception { statement.execute("DROP DATABASE IF EXISTS " + DB2 + " CASCADE"); statement.execute("CREATE DATABASE " + DB2); statement.execute("USE " + DB2); + statement.execute("DROP TABLE IF EXISTS " + DB2 + "." + tableName1); statement.execute("DROP TABLE IF EXISTS " + DB2 + "." + tableName2); + statement.execute("create table " + DB2 + "." + tableName1 + + " (under_col int comment 'the under column', value string)"); statement.execute("create table " + DB2 + "." + tableName2 + " (under_col int comment 'the under column', value string)"); statement.execute("load data local inpath '" + dataFile.getPath() @@ -195,6 +199,9 @@ public void testEndToEnd1() throws Exception { + "' TO ROLE data_uri"); statement.execute("USE " + DB1); + statement.execute("DROP TABLE IF EXISTS " + DB1 + "." + tableName1); + statement.execute("create table " + DB1 + "." + tableName1 + + " (under_col int comment 'the under column', value string)"); statement.execute("GRANT SELECT ON TABLE " + tableName1 + " TO ROLE select_tb1"); @@ -223,7 +230,7 @@ public void testEndToEnd1() throws Exception { // 7 connection = context.createConnection(ADMIN1); statement = context.createStatement(connection); - statement.execute("USE " + DB2); + statement.execute("USE " + DB1); statement.execute("DROP TABLE IF EXISTS " + DB1 + "." + tableName1); statement.execute("create table " + DB1 + "." + tableName1 + " (under_col int comment 'the under column', value string)"); diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbExportImportPrivileges.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbExportImportPrivileges.java index 3d67ab76a..43064ee34 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbExportImportPrivileges.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbExportImportPrivileges.java @@ -21,16 +21,22 @@ import org.apache.sentry.tests.e2e.hive.TestExportImportPrivileges; import org.junit.Before; import org.junit.BeforeClass; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestDbExportImportPrivileges extends TestExportImportPrivileges { + private static final Logger LOGGER = LoggerFactory. + getLogger(TestDbExportImportPrivileges.class); @Override @Before public void setup() throws Exception { + LOGGER.info("TestDbExportImportPrivileges setup"); super.setupAdmin(); super.setup(); } @BeforeClass public static void setupTestStaticConfiguration() throws Exception { + LOGGER.info("TestDbExportImportPrivileges setupTestStaticConfiguration"); useSentryService = true; AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); } diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbJDBCInterface.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbJDBCInterface.java new file mode 100644 index 000000000..a26e90a2c --- /dev/null +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbJDBCInterface.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.tests.e2e.dbprovider; + +import org.apache.sentry.tests.e2e.hive.AbstractTestWithStaticConfiguration; +import org.apache.sentry.tests.e2e.hive.TestJDBCInterface; +import org.junit.Before; +import org.junit.BeforeClass; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TestDbJDBCInterface extends TestJDBCInterface { + private static final Logger LOGGER = LoggerFactory. + getLogger(TestDbJDBCInterface.class); + @Override + @Before + public void setup() throws Exception { + LOGGER.info("TestDbJDBCInterface setup"); + super.setupAdmin(); + super.setup(); + } + + @BeforeClass + public static void setupTestStaticConfiguration() throws Exception { + LOGGER.info("TestDbJDBCInterface setupTestStaticConfiguration"); + useSentryService = true; + AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); + } + +} diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbMetadataObjectRetrieval.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbMetadataObjectRetrieval.java index 53c7d0b79..ec99b3007 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbMetadataObjectRetrieval.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbMetadataObjectRetrieval.java @@ -21,19 +21,24 @@ import org.apache.sentry.tests.e2e.hive.TestMetadataObjectRetrieval; import org.junit.Before; import org.junit.BeforeClass; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestDbMetadataObjectRetrieval extends TestMetadataObjectRetrieval { + private static final Logger LOGGER = LoggerFactory + .getLogger(TestDbMetadataObjectRetrieval.class); @Override @Before public void setup() throws Exception { + LOGGER.info("TestDbMetadataObjectRetrieval setup"); super.setupAdmin(); super.setup(); } @BeforeClass public static void setupTestStaticConfiguration() throws Exception { + LOGGER.info("TestDbMetadataObjectRetrieval setupTestStaticConfiguration"); useSentryService = true; AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); - } } diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbPrivilegeCleanupOnDrop.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbPrivilegeCleanupOnDrop.java index a35cf2167..767bcbe02 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbPrivilegeCleanupOnDrop.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbPrivilegeCleanupOnDrop.java @@ -57,15 +57,19 @@ public class TestDbPrivilegeCleanupOnDrop extends @BeforeClass public static void setupTestStaticConfiguration() throws Exception { useSentryService = true; - setMetastoreListener = true; + if (!setMetastoreListener) { + setMetastoreListener = true; + } AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); } + @Override @Before - public void setUp() throws Exception { + public void setup() throws Exception { + super.setupAdmin(); + super.setup(); // context = createContext(); File dataFile = new File(dataDir, SINGLE_TYPE_DATA_FILE_NAME); - setupAdmin(); FileOutputStream to = new FileOutputStream(dataFile); Resources.copy(Resources.getResource(SINGLE_TYPE_DATA_FILE_NAME), to); to.close(); @@ -181,8 +185,6 @@ public void testRenameTables() throws Exception { */ @Test public void testDropAndRenameWithMultiAction() throws Exception { - super.setupAdmin(); - Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); statement.execute("CREATE ROLE user_role"); @@ -322,7 +324,9 @@ private void verifyPrivilegeDropped(Statement statement, List roles, ResultSet resultSet = statement.executeQuery("SHOW GRANT ROLE " + roleName); while (resultSet.next()) { - assertFalse(objectName.equalsIgnoreCase(resultSet.getString(resultPos))); + String returned = resultSet.getString(resultPos); + assertFalse("value " + objectName + " shouldn't be detected, but actually " + returned + " is found from resultSet", + objectName.equalsIgnoreCase(returned)); } resultSet.close(); } diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbPrivilegesAtDatabaseScope.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbPrivilegesAtDatabaseScope.java index e1cda2980..883bedd95 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbPrivilegesAtDatabaseScope.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbPrivilegesAtDatabaseScope.java @@ -21,20 +21,26 @@ import org.apache.sentry.tests.e2e.hive.TestPrivilegesAtDatabaseScope; import org.junit.Before; import org.junit.BeforeClass; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestDbPrivilegesAtDatabaseScope extends TestPrivilegesAtDatabaseScope { + private static final Logger LOGGER = LoggerFactory. + getLogger(TestDbPrivilegesAtDatabaseScope.class); + @Override @Before public void setup() throws Exception { + LOGGER.info("TestDbPrivilegesAtDatabaseScope setup"); super.setupAdmin(); super.setup(); } @BeforeClass public static void setupTestStaticConfiguration() throws Exception { + LOGGER.info("TestDbPrivilegesAtDatabaseScope setupTestStaticConfiguration"); useSentryService = true; AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); - } } diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbPrivilegesAtTableScope.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbPrivilegesAtTableScope.java index 9fb6f7f83..a4f07df8d 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbPrivilegesAtTableScope.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbPrivilegesAtTableScope.java @@ -28,12 +28,12 @@ public class TestDbPrivilegesAtTableScope extends TestPrivilegesAtTableScope { public void setup() throws Exception { super.setupAdmin(); super.setup(); + prepareDBDataForTest(); } @BeforeClass public static void setupTestStaticConfiguration() throws Exception { useSentryService = true; AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); - prepareDBDataForTest(); } } diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbRuntimeMetadataRetrieval.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbRuntimeMetadataRetrieval.java index 53246562f..8d98179ea 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbRuntimeMetadataRetrieval.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbRuntimeMetadataRetrieval.java @@ -20,17 +20,24 @@ import org.apache.sentry.tests.e2e.hive.TestRuntimeMetadataRetrieval; import org.junit.Before; import org.junit.BeforeClass; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestDbRuntimeMetadataRetrieval extends TestRuntimeMetadataRetrieval { + private static final Logger LOGGER = LoggerFactory. + getLogger(TestDbRuntimeMetadataRetrieval.class); + @Override @Before public void setup() throws Exception { + LOGGER.info("TestDbRuntimeMetadataRetrieval setup"); super.setupAdmin(); super.setup(); } @BeforeClass public static void setupTestStaticConfiguration() throws Exception { + LOGGER.info("TestDbRuntimeMetadataRetrieval setupTestStaticConfiguration"); useSentryService = true; AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbSandboxOps.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbSandboxOps.java index e21dfe366..fa429e75d 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbSandboxOps.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbSandboxOps.java @@ -16,8 +16,6 @@ */ package org.apache.sentry.tests.e2e.dbprovider; -import static org.junit.Assert.assertTrue; - import org.apache.sentry.tests.e2e.hive.AbstractTestWithStaticConfiguration; import org.apache.sentry.tests.e2e.hive.TestSandboxOps; import org.junit.Before; diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbSentryOnFailureHookLoading.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbSentryOnFailureHookLoading.java index 1af8baa11..8cb04f763 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbSentryOnFailureHookLoading.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbSentryOnFailureHookLoading.java @@ -17,38 +17,35 @@ package org.apache.sentry.tests.e2e.dbprovider; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - import java.sql.Connection; +import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.util.ArrayList; import java.util.HashMap; -import java.util.Map; +import java.util.List; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.sentry.binding.hive.conf.HiveAuthzConf; import org.apache.sentry.provider.db.SentryAccessDeniedException; -import org.apache.sentry.provider.file.PolicyFile; import org.apache.sentry.tests.e2e.hive.DummySentryOnFailureHook; -import org.apache.sentry.tests.e2e.hive.StaticUserGroup; import org.apache.sentry.tests.e2e.hive.hiveserver.HiveServerFactory; +import org.junit.After; import org.junit.Assume; -import org.junit.Before; +import org.junit.BeforeClass; import org.junit.Test; public class TestDbSentryOnFailureHookLoading extends AbstractTestWithDbProvider { - Map testProperties; - @Before - public void setup() throws Exception { - testProperties = new HashMap(); - testProperties.put(HiveAuthzConf.AuthzConfVars.AUTHZ_ONFAILURE_HOOKS.getVar(), + @BeforeClass + public static void setup() throws Exception { + properties = new HashMap(); + properties.put(HiveAuthzConf.AuthzConfVars.AUTHZ_ONFAILURE_HOOKS.getVar(), DummySentryOnFailureHook.class.getName()); - createContext(testProperties); + createContext(); DummySentryOnFailureHook.invoked = false; // Do not run these tests if run with external HiveServer2 @@ -62,6 +59,29 @@ public void setup() throws Exception { } } + @After + public void clearDB() throws Exception { + Connection connection; + Statement statement; + connection = context.createConnection(ADMIN1); + statement = context.createStatement(connection); + ResultSet resultSet; + resultSet = statement.executeQuery("SHOW roles"); + List roles = new ArrayList(); + while ( resultSet.next()) { + roles.add(resultSet.getString(1)); + } + for(String role:roles) { + statement.execute("DROP Role " + role); + } + + statement.close(); + connection.close(); + if (context != null) { + context.close(); + } + } + /* Admin creates database DB_2 * user1 tries to drop DB_2, but it has permissions for DB_1. */ @@ -71,26 +91,31 @@ public void testOnFailureHookLoading() throws Exception { // setup db objects needed by the test Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); + try { + statement.execute("DROP ROLE admin_role"); + } catch (Exception ex) { + //It is ok if admin_role already exists + } statement.execute("CREATE ROLE admin_role"); statement.execute("GRANT ALL ON SERVER " + HiveServerFactory.DEFAULT_AUTHZ_SERVER_NAME + " TO ROLE admin_role"); statement.execute("GRANT ROLE admin_role TO GROUP " + ADMINGROUP); - statement.execute("CREATE ROLE all_db1"); - statement.execute("GRANT ALL ON DATABASE DB_1 TO ROLE all_db1"); - statement.execute("GRANT ROLE all_db1 TO GROUP " + USERGROUP1); - - statement.execute("CREATE ROLE read_db2_tab2"); - statement.execute("GRANT ROLE read_db2_tab2 TO GROUP " + USERGROUP1); - statement.execute("DROP DATABASE IF EXISTS DB_1 CASCADE"); statement.execute("DROP DATABASE IF EXISTS DB_2 CASCADE"); statement.execute("CREATE DATABASE DB_1"); statement.execute("CREATE DATABASE DB_2"); statement.execute("CREATE TABLE db_2.tab1(a int )"); + statement.execute("CREATE ROLE all_db1"); + statement.execute("GRANT ALL ON DATABASE DB_1 TO ROLE all_db1"); + statement.execute("GRANT ROLE all_db1 TO GROUP " + USERGROUP1); + + statement.execute("CREATE ROLE lock_db2_tab1"); + statement.execute("GRANT ROLE lock_db2_tab1 TO GROUP " + USERGROUP1); + statement.execute("USE db_2"); - statement.execute("GRANT SELECT ON TABLE tab2 TO ROLE read_db2_tab2");// To give user1 privilege to do USE db_2 + statement.execute("GRANT LOCK ON TABLE tab1 TO ROLE lock_db2_tab1");// To give user1 privilege to do USE db_2 statement.close(); connection.close(); @@ -143,6 +168,7 @@ public void testOnFailureHookForAuthDDL() throws Exception { statement.execute("DROP DATABASE IF EXISTS DB_1 CASCADE"); statement.execute("DROP DATABASE IF EXISTS DB_2 CASCADE"); statement.execute("CREATE DATABASE DB_1"); + statement.execute("CREATE TABLE DB_1.tab1(a int )"); statement.execute("CREATE ROLE all_db1"); statement.execute("GRANT ALL ON DATABASE DB_1 TO ROLE all_db1"); statement.execute("GRANT ROLE all_db1 TO GROUP " + USERGROUP1); @@ -189,12 +215,12 @@ public void testOnFailureHookForAuthDDL() throws Exception { //Grant privilege on table doesnt expose db and table objects verifyFailureHook(statement, - "GRANT ALL ON TABLE tab1 TO ROLE admin_role", + "GRANT ALL ON TABLE db_1.tab1 TO ROLE admin_role", HiveOperation.GRANT_PRIVILEGE, null, null, true); //Revoke privilege on table doesnt expose db and table objects verifyFailureHook(statement, - "REVOKE ALL ON TABLE server1 FROM ROLE admin_role", + "REVOKE ALL ON TABLE db_1.tab1 FROM ROLE admin_role", HiveOperation.REVOKE_PRIVILEGE, null, null, true); //Grant privilege on database doesnt expose db and table objects @@ -216,12 +242,12 @@ public void testOnFailureHookForAuthDDL() throws Exception { private void verifyFailureHook(Statement statement, String sqlStr, HiveOperation expectedOp, String dbName, String tableName, boolean checkSentryAccessDeniedException) throws Exception { // negative test case: non admin user can't create role - assertFalse(DummySentryOnFailureHook.invoked); + Assert.assertFalse(DummySentryOnFailureHook.invoked); try { statement.execute(sqlStr); Assert.fail("Expected SQL exception for " + sqlStr); } catch (SQLException e) { - assertTrue(DummySentryOnFailureHook.invoked); + Assert.assertTrue("FailureHook is not ran : " + e.getMessage(), DummySentryOnFailureHook.invoked); } finally { DummySentryOnFailureHook.invoked = false; } diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbUriPermissions.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbUriPermissions.java index 43a310f99..1a90e06ea 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbUriPermissions.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbUriPermissions.java @@ -21,15 +21,23 @@ import org.junit.Before; import org.junit.BeforeClass; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + public class TestDbUriPermissions extends TestUriPermissions { + private static final Logger LOGGER = LoggerFactory. + getLogger(TestDbUriPermissions.class); + @Override @Before public void setup() throws Exception { + LOGGER.info("TestDbUriPermissions setup"); super.setupAdmin(); super.setup(); } @BeforeClass public static void setupTestStaticConfiguration() throws Exception { + LOGGER.info("TestDbUriPermissions setupTestStaticConfiguration"); useSentryService = true; AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestPrivilegeWithGrantOption.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestPrivilegeWithGrantOption.java index 5c49f98ec..65ece8f4b 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestPrivilegeWithGrantOption.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestPrivilegeWithGrantOption.java @@ -17,16 +17,12 @@ package org.apache.sentry.tests.e2e.dbprovider; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.sentry.binding.hive.conf.HiveAuthzConf; @@ -221,7 +217,7 @@ private void runSQLWithError(Statement statement, String sqlStr, HiveOperation expectedOp, String dbName, String tableName, boolean checkSentryAccessDeniedException) throws Exception { // negative test case: non admin user can't create role - assertFalse(DummySentryOnFailureHook.invoked); + Assert.assertFalse(DummySentryOnFailureHook.invoked); try { statement.execute(sqlStr); Assert.fail("Expected SQL exception for " + sqlStr); @@ -241,7 +237,7 @@ private void verifyFailureHook(HiveOperation expectedOp, return; } - assertTrue(DummySentryOnFailureHook.invoked); + Assert.assertTrue(DummySentryOnFailureHook.invoked); if (expectedOp != null) { Assert.assertNotNull("Hive op is null for op: " + expectedOp, DummySentryOnFailureHook.hiveOp); Assert.assertTrue(expectedOp.equals(DummySentryOnFailureHook.hiveOp)); @@ -265,8 +261,8 @@ private void verifySingleGrantWithGrantOption(Statement statetment, String statementSql, int dbObjectPosition, String dbObjectName) throws Exception { ResultSet res = statetment.executeQuery(statementSql); - assertTrue(res.next()); - assertEquals(dbObjectName, res.getString(dbObjectPosition)); + Assert.assertTrue(res.next()); + Assert.assertEquals(dbObjectName, res.getString(dbObjectPosition)); res.close(); } diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestPrivilegeWithHAGrantOption.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestPrivilegeWithHAGrantOption.java index 84f998e10..d837a85a2 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestPrivilegeWithHAGrantOption.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestPrivilegeWithHAGrantOption.java @@ -21,32 +21,27 @@ import java.sql.SQLException; import java.sql.Statement; import java.util.HashMap; -import java.util.Map; -import org.apache.hadoop.hive.ql.plan.HiveOperation; +import org.junit.Assert; -import junit.framework.Assert; +import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.sentry.binding.hive.conf.HiveAuthzConf; import org.apache.sentry.provider.db.SentryAccessDeniedException; import org.apache.sentry.tests.e2e.hive.DummySentryOnFailureHook; import org.apache.sentry.tests.e2e.hive.hiveserver.HiveServerFactory; import org.junit.Assume; -import org.junit.Before; +import org.junit.BeforeClass; import org.junit.Test; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; public class TestPrivilegeWithHAGrantOption extends AbstractTestWithDbProvider { - Map testProperties; - - @Before - public void setup() throws Exception { + @BeforeClass + public static void setup() throws Exception { haEnabled = true; - testProperties = new HashMap(); - testProperties.put(HiveAuthzConf.AuthzConfVars.AUTHZ_ONFAILURE_HOOKS.getVar(), + properties = new HashMap(); + properties.put(HiveAuthzConf.AuthzConfVars.AUTHZ_ONFAILURE_HOOKS.getVar(), DummySentryOnFailureHook.class.getName()); - createContext(testProperties); + createContext(); DummySentryOnFailureHook.invoked = false; // Do not run these tests if run with external HiveServer2 @@ -137,12 +132,12 @@ public void testOnGrantPrivilege() throws Exception { private void verifyFailureHook(Statement statement, String sqlStr, HiveOperation expectedOp, String dbName, String tableName, boolean checkSentryAccessDeniedException) throws Exception { // negative test case: non admin user can't create role - assertFalse(DummySentryOnFailureHook.invoked); + Assert.assertFalse(DummySentryOnFailureHook.invoked); try { statement.execute(sqlStr); Assert.fail("Expected SQL exception for " + sqlStr); } catch (SQLException e) { - assertTrue(DummySentryOnFailureHook.invoked); + Assert.assertTrue(DummySentryOnFailureHook.invoked); } finally { DummySentryOnFailureHook.invoked = false; } diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/ha/TestHaEnd2End.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/ha/TestHaEnd2End.java index 70828da4f..6ad70cf3f 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/ha/TestHaEnd2End.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/ha/TestHaEnd2End.java @@ -40,7 +40,6 @@ public class TestHaEnd2End extends AbstractTestWithStaticConfiguration { private final String SINGLE_TYPE_DATA_FILE_NAME = "kv1.dat"; private File dataFile; - private PolicyFile policyFile; @BeforeClass public static void setupTestStaticConfiguration() throws Exception { @@ -58,7 +57,7 @@ public void setup() throws Exception { FileOutputStream to = new FileOutputStream(dataFile); Resources.copy(Resources.getResource(SINGLE_TYPE_DATA_FILE_NAME), to); to.close(); - policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP); + PolicyFile.setAdminOnServer1(ADMINGROUP); } /** diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegration.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegration.java index 955c68a09..4799d36f5 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegration.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegration.java @@ -35,60 +35,52 @@ import java.util.List; import java.util.Map; import java.util.StringTokenizer; -import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import com.google.common.base.Preconditions; -import junit.framework.Assert; +import org.junit.Assert; -import org.apache.curator.test.TestingServer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntryType; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.*; import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapred.FileInputFormat; -import org.apache.hadoop.mapred.FileOutputFormat; -import org.apache.hadoop.mapred.JobClient; -import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.MapReduceBase; import org.apache.hadoop.mapred.Mapper; -import org.apache.hadoop.mapred.MiniMRClientCluster; import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reducer; import org.apache.hadoop.mapred.Reporter; -import org.apache.hadoop.mapred.RunningJob; -import org.apache.hadoop.mapred.TextInputFormat; -import org.apache.hadoop.mapred.TextOutputFormat; import org.apache.hadoop.security.UserGroupInformation; import org.apache.sentry.binding.hive.SentryHiveAuthorizationTaskFactoryImpl; import org.apache.sentry.binding.hive.conf.HiveAuthzConf; +import org.apache.sentry.hdfs.PathsUpdate; import org.apache.sentry.hdfs.SentryAuthorizationProvider; +import org.apache.sentry.provider.db.SentryAlreadyExistsException; import org.apache.sentry.provider.db.SimpleDBProviderBackend; import org.apache.sentry.provider.file.LocalGroupResourceAuthorizationProvider; import org.apache.sentry.provider.file.PolicyFile; -import org.apache.sentry.service.thrift.SentryService; -import org.apache.sentry.service.thrift.SentryServiceFactory; -import org.apache.sentry.service.thrift.ServiceConstants.ClientConfig; import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; import org.apache.sentry.tests.e2e.hive.StaticUserGroup; import org.apache.sentry.tests.e2e.hive.fs.MiniDFS; import org.apache.sentry.tests.e2e.hive.hiveserver.HiveServerFactory; import org.apache.sentry.tests.e2e.hive.hiveserver.InternalHiveServer; import org.apache.sentry.tests.e2e.hive.hiveserver.InternalMetastoreServer; +import org.apache.sentry.tests.e2e.minisentry.SentrySrv; +import org.apache.sentry.tests.e2e.minisentry.SentrySrvFactory; import org.fest.reflect.core.Reflection; import org.junit.After; import org.junit.AfterClass; @@ -101,12 +93,13 @@ import com.google.common.collect.Maps; import com.google.common.io.Files; import com.google.common.io.Resources; +import org.apache.hadoop.hive.metastore.api.Table; public class TestHDFSIntegration { private static final Logger LOGGER = LoggerFactory .getLogger(TestHDFSIntegration.class); - protected static boolean testSentryHA = false; + public static class WordCountMapper extends MapReduceBase implements Mapper { @@ -140,20 +133,28 @@ public void reduce(Text key, Iterator values, private static final int NUM_RETRIES = 10; private static final int RETRY_WAIT = 1000; + private static final String EXTERNAL_SENTRY_SERVICE = "sentry.e2etest.external.sentry"; + private static final String DFS_NAMENODE_AUTHORIZATION_PROVIDER_KEY = + "dfs.namenode.authorization.provider.class"; private static MiniDFSCluster miniDFS; - private MiniMRClientCluster miniMR; private static InternalHiveServer hiveServer2; private static InternalMetastoreServer metastore; - private static SentryService sentryService; + private static HiveMetaStoreClient hmsClient; + + private static int sentryPort = -1; + protected static SentrySrv sentryServer; + protected static boolean testSentryHA = false; + private static final long STALE_THRESHOLD = 5000; + private static final long CACHE_REFRESH = 100; //Default is 500, but we want it to be low + // in our tests so that changes reflect soon + private static String fsURI; private static int hmsPort; - private static int sentryPort = -1; private static File baseDir; private static File policyFileLocation; private static UserGroupInformation adminUgi; private static UserGroupInformation hiveUgi; - private static TestingServer server; // Variables which are used for cleanup after test // Please set these values in each test @@ -162,6 +163,8 @@ public void reduce(Text key, Iterator values, private String[] roles; private String admin; + private static Configuration hadoopConf; + protected static File assertCreateDir(File dir) { if(!dir.isDirectory()) { Assert.assertTrue("Failed creating " + dir, dir.mkdirs()); @@ -176,17 +179,6 @@ private static int findPort() throws IOException { return port; } - private static void waitOnSentryService() throws Exception { - sentryService.start(); - final long start = System.currentTimeMillis(); - while (!sentryService.isRunning()) { - Thread.sleep(1000); - if (System.currentTimeMillis() - start > 60000L) { - throw new TimeoutException("Server did not start after 60 seconds"); - } - } - } - @BeforeClass public static void setup() throws Exception { Class.forName("org.apache.hive.jdbc.HiveDriver"); @@ -254,6 +246,12 @@ public Void run() throws Exception { hiveConf.set("datanucleus.autoStartMechanism", "SchemaTable"); hmsPort = findPort(); LOGGER.info("\n\n HMS port : " + hmsPort + "\n\n"); + + // Sets hive.metastore.authorization.storage.checks to true, so that + // disallow the operations such as drop-partition if the user in question + // doesn't have permissions to delete the corresponding directory + // on the storage. + hiveConf.set("hive.metastore.authorization.storage.checks", "true"); hiveConf.set("hive.metastore.uris", "thrift://localhost:" + hmsPort); hiveConf.set("hive.metastore.pre.event.listeners", "org.apache.sentry.binding.metastore.MetastoreAuthzBinding"); hiveConf.set("hive.metastore.event.listeners", "org.apache.sentry.binding.metastore.SentryMetastorePostEventListener"); @@ -282,9 +280,9 @@ public Void run() throws Exception { out.close(); Reflection.staticField("hiveSiteURL") - .ofType(URL.class) - .in(HiveConf.class) - .set(hiveSite.toURI().toURL()); + .ofType(URL.class) + .in(HiveConf.class) + .set(hiveSite.toURI().toURL()); metastore = new InternalMetastoreServer(hiveConf); new Thread() { @@ -292,13 +290,16 @@ public Void run() throws Exception { public void run() { try { metastore.start(); - while(true){} + while (true) { + Thread.sleep(1000L); + } } catch (Exception e) { LOGGER.info("Could not start Hive Server"); } } }.start(); + hmsClient = new HiveMetaStoreClient(hiveConf); startHiveServer2(retries, hiveConf); return null; } @@ -317,7 +318,9 @@ private static void startHiveServer2(final int retries, HiveConf hiveConf) public void run() { try { hiveServer2.start(); - while(keepRunning.get()){} + while (keepRunning.get()) { + Thread.sleep(1000L); + } } catch (Exception e) { LOGGER.info("Could not start Hive Server"); } @@ -349,26 +352,28 @@ private static void startDFSandYARN() throws IOException, @Override public Void run() throws Exception { System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, "target/test/data"); - Configuration conf = new HdfsConfiguration(); - conf.set(DFSConfigKeys.DFS_NAMENODE_AUTHORIZATION_PROVIDER_KEY, + hadoopConf = new HdfsConfiguration(); + hadoopConf.set(DFS_NAMENODE_AUTHORIZATION_PROVIDER_KEY, SentryAuthorizationProvider.class.getName()); - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); - conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); + hadoopConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); + hadoopConf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); File dfsDir = assertCreateDir(new File(baseDir, "dfs")); - conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dfsDir.getPath()); - conf.set("hadoop.security.group.mapping", + hadoopConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dfsDir.getPath()); + hadoopConf.set("hadoop.security.group.mapping", MiniDFS.PseudoGroupMappingService.class.getName()); Configuration.addDefaultResource("test.xml"); - conf.set("sentry.authorization-provider.hdfs-path-prefixes", "/user/hive/warehouse,/tmp/external"); - conf.set("sentry.authorization-provider.cache-refresh-retry-wait.ms", "5000"); - conf.set("sentry.authorization-provider.cache-stale-threshold.ms", "3000"); + hadoopConf.set("sentry.authorization-provider.hdfs-path-prefixes", "/user/hive/warehouse,/tmp/external"); + hadoopConf.set("sentry.authorization-provider.cache-refresh-retry-wait.ms", "5000"); + hadoopConf.set("sentry.authorization-provider.cache-refresh-interval.ms", String.valueOf(CACHE_REFRESH)); - conf.set("sentry.hdfs.service.security.mode", "none"); - conf.set("sentry.hdfs.service.client.server.rpc-address", "localhost"); - conf.set("sentry.hdfs.service.client.server.rpc-port", String.valueOf(sentryPort)); + hadoopConf.set("sentry.authorization-provider.cache-stale-threshold.ms", String.valueOf(STALE_THRESHOLD)); + + hadoopConf.set("sentry.hdfs.service.security.mode", "none"); + hadoopConf.set("sentry.hdfs.service.client.server.rpc-address", "localhost"); + hadoopConf.set("sentry.hdfs.service.client.server.rpc-port", String.valueOf(sentryPort)); EditLogFileOutputStream.setShouldSkipFsyncForTesting(true); - miniDFS = new MiniDFSCluster.Builder(conf).build(); + miniDFS = new MiniDFSCluster.Builder(hadoopConf).build(); Path tmpPath = new Path("/tmp"); Path hivePath = new Path("/user/hive"); Path warehousePath = new Path(hivePath, "warehouse"); @@ -377,7 +382,7 @@ public Void run() throws Exception { LOGGER.info("\n\n Is dir :" + directory + "\n\n"); LOGGER.info("\n\n DefaultFS :" + miniDFS.getFileSystem().getUri() + "\n\n"); fsURI = miniDFS.getFileSystem().getUri().toString(); - conf.set("fs.defaultFS", fsURI); + hadoopConf.set("fs.defaultFS", fsURI); // Create Yarn cluster // miniMR = MiniMRClientClusterFactory.create(this.getClass(), 1, conf); @@ -415,67 +420,56 @@ public Void run() throws Exception { }); } - private static void startSentry() throws IOException, - InterruptedException { - hiveUgi.doAs(new PrivilegedExceptionAction() { - @Override - public Void run() throws Exception { - Configuration sentryConf = new Configuration(false); - Map properties = Maps.newHashMap(); - properties.put(HiveServerFactory.AUTHZ_PROVIDER_BACKEND, - SimpleDBProviderBackend.class.getName()); - properties.put(ConfVars.HIVE_AUTHORIZATION_TASK_FACTORY.varname, - SentryHiveAuthorizationTaskFactoryImpl.class.getName()); - properties - .put(ConfVars.HIVE_SERVER2_THRIFT_MIN_WORKER_THREADS.varname, "2"); - properties.put("hive.metastore.uris", "thrift://localhost:" + hmsPort); - properties.put(ServerConfig.SECURITY_MODE, ServerConfig.SECURITY_MODE_NONE); + private static void startSentry() throws Exception { + try { + + hiveUgi.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + Configuration sentryConf = new Configuration(false); + Map properties = Maps.newHashMap(); + properties.put(HiveServerFactory.AUTHZ_PROVIDER_BACKEND, + SimpleDBProviderBackend.class.getName()); + properties.put(ConfVars.HIVE_AUTHORIZATION_TASK_FACTORY.varname, + SentryHiveAuthorizationTaskFactoryImpl.class.getName()); + properties + .put(ConfVars.HIVE_SERVER2_THRIFT_MIN_WORKER_THREADS.varname, "2"); + properties.put("hive.metastore.uris", "thrift://localhost:" + hmsPort); + properties.put("hive.exec.local.scratchdir", Files.createTempDir().getAbsolutePath()); + properties.put(ServerConfig.SECURITY_MODE, ServerConfig.SECURITY_MODE_NONE); // properties.put("sentry.service.server.compact.transport", "true"); - properties.put("sentry.hive.testing.mode", "true"); - properties.put("sentry.service.reporting", "JMX"); - properties.put(ServerConfig.ADMIN_GROUPS, "hive,admin"); - properties.put(ServerConfig.RPC_ADDRESS, "localhost"); - properties.put(ServerConfig.RPC_PORT, String.valueOf(sentryPort < 0 ? 0 : sentryPort)); - properties.put(ServerConfig.SENTRY_VERIFY_SCHEM_VERSION, "false"); - - properties.put(ServerConfig.SENTRY_STORE_GROUP_MAPPING, ServerConfig.SENTRY_STORE_LOCAL_GROUP_MAPPING); - properties.put(ServerConfig.SENTRY_STORE_GROUP_MAPPING_RESOURCE, policyFileLocation.getPath()); - properties.put(ServerConfig.SENTRY_STORE_JDBC_URL, - "jdbc:derby:;databaseName=" + baseDir.getPath() - + "/sentrystore_db;create=true"); - properties.put("sentry.service.processor.factories", - "org.apache.sentry.provider.db.service.thrift.SentryPolicyStoreProcessorFactory,org.apache.sentry.hdfs.SentryHDFSServiceProcessorFactory"); - properties.put("sentry.policy.store.plugins", "org.apache.sentry.hdfs.SentryPlugin"); - properties.put(ServerConfig.RPC_MIN_THREADS, "3"); - if (testSentryHA) { - haSetup(properties); - } - for (Map.Entry entry : properties.entrySet()) { - sentryConf.set(entry.getKey(), entry.getValue()); + properties.put("sentry.hive.testing.mode", "true"); + properties.put("sentry.service.reporting", "JMX"); + properties.put(ServerConfig.ADMIN_GROUPS, "hive,admin"); + properties.put(ServerConfig.RPC_ADDRESS, "localhost"); + properties.put(ServerConfig.RPC_PORT, String.valueOf(sentryPort > 0 ? sentryPort : 0)); + properties.put(ServerConfig.SENTRY_VERIFY_SCHEM_VERSION, "false"); + + properties.put(ServerConfig.SENTRY_STORE_GROUP_MAPPING, ServerConfig.SENTRY_STORE_LOCAL_GROUP_MAPPING); + properties.put(ServerConfig.SENTRY_STORE_GROUP_MAPPING_RESOURCE, policyFileLocation.getPath()); + properties.put(ServerConfig.SENTRY_STORE_JDBC_URL, + "jdbc:derby:;databaseName=" + baseDir.getPath() + + "/sentrystore_db;create=true"); + properties.put(ServerConfig.SENTRY_STORE_JDBC_PASS, "dummy"); + properties.put("sentry.service.processor.factories", + "org.apache.sentry.provider.db.service.thrift.SentryPolicyStoreProcessorFactory,org.apache.sentry.hdfs.SentryHDFSServiceProcessorFactory"); + properties.put("sentry.policy.store.plugins", "org.apache.sentry.hdfs.SentryPlugin"); + properties.put(ServerConfig.RPC_MIN_THREADS, "3"); + for (Map.Entry entry : properties.entrySet()) { + sentryConf.set(entry.getKey(), entry.getValue()); + } + sentryServer = SentrySrvFactory.create(SentrySrvFactory.SentrySrvType.INTERNAL_SERVER, + sentryConf, testSentryHA ? 2 : 1); + sentryPort = sentryServer.get(0).getAddress().getPort(); + sentryServer.startAll(); + LOGGER.info("\n\n Sentry service started \n\n"); + return null; } - sentryService = new SentryServiceFactory().create(sentryConf); - properties.put(ClientConfig.SERVER_RPC_ADDRESS, sentryService.getAddress() - .getHostName()); - sentryConf.set(ClientConfig.SERVER_RPC_ADDRESS, sentryService.getAddress() - .getHostName()); - properties.put(ClientConfig.SERVER_RPC_PORT, - String.valueOf(sentryService.getAddress().getPort())); - sentryConf.set(ClientConfig.SERVER_RPC_PORT, - String.valueOf(sentryService.getAddress().getPort())); - waitOnSentryService(); - sentryPort = sentryService.getAddress().getPort(); - LOGGER.info("\n\n Sentry port : " + sentryPort + "\n\n"); - return null; - } - }); - } - - public static void haSetup(Map properties) throws Exception { - server = new TestingServer(); - server.start(); - properties.put(ServerConfig.SENTRY_HA_ZOOKEEPER_QUORUM, - server.getConnectString()); - properties.put(ServerConfig.SENTRY_HA_ENABLED, "true"); + }); + } catch (Exception e) { + //An exception happening in above block will result in a wrapped UndeclaredThrowableException. + throw new Exception(e.getCause()); + } } @After @@ -498,7 +492,7 @@ public void cleanAfterTest() throws Exception { conn = hiveServer2.createConnection("hive", "hive"); stmt = conn.createStatement(); for( String role:roles) { - stmt.execute("drop role " + role); + stmt.execute("drop role " + role); } stmt.close(); conn.close(); @@ -524,8 +518,12 @@ public static void cleanUp() throws Exception { hiveServer2.shutdown(); } } finally { - if (metastore != null) { - metastore.shutdown(); + try { + if (metastore != null) { + metastore.shutdown(); + } + } finally { + sentryServer.close(); } } } @@ -535,7 +533,7 @@ public static void cleanUp() throws Exception { public void testEnd2End() throws Throwable { tmpHDFSDir = new Path("/tmp/external"); dbNames = new String[]{"db1"}; - roles = new String[]{"admin_role"}; + roles = new String[]{"admin_role", "db_role", "tab_role", "p1_admin"}; admin = "hive"; Connection conn; @@ -551,6 +549,14 @@ public void testEnd2End() throws Throwable { stmt.execute("alter table p1 add partition (month=2, day=1)"); stmt.execute("alter table p1 add partition (month=2, day=2)"); + // db privileges + stmt.execute("create database db5"); + stmt.execute("create role db_role"); + stmt.execute("create role tab_role"); + stmt.execute("grant role db_role to group hbase"); + stmt.execute("grant role tab_role to group flume"); + stmt.execute("create table db5.p2(id int)"); + stmt.execute("create role p1_admin"); stmt.execute("grant role p1_admin to group hbase"); @@ -559,6 +565,15 @@ public void testEnd2End() throws Throwable { verifyOnAllSubDirs("/user/hive/warehouse/p1", null, "hbase", false); + stmt.execute("grant all on database db5 to role db_role"); + stmt.execute("use db5"); + stmt.execute("grant all on table p2 to role tab_role"); + stmt.execute("use default"); + verifyOnAllSubDirs("/user/hive/warehouse/db5.db", FsAction.ALL, "hbase", true); + verifyOnAllSubDirs("/user/hive/warehouse/db5.db/p2", FsAction.ALL, "hbase", true); + verifyOnAllSubDirs("/user/hive/warehouse/db5.db/p2", FsAction.ALL, "flume", true); + verifyOnPath("/user/hive/warehouse/db5.db", FsAction.ALL, "flume", false); + loadData(stmt); verifyHDFSandMR(stmt); @@ -614,14 +629,51 @@ public Void run() throws Exception { verifyOnAllSubDirs("/user/hive/warehouse/p3", FsAction.WRITE_EXECUTE, "hbase", true); verifyOnAllSubDirs("/user/hive/warehouse/p3/month=1/day=3", FsAction.WRITE_EXECUTE, "hbase", true); - sentryService.stop(); - // Verify that Sentry permission are still enforced for the "stale" period - verifyOnAllSubDirs("/user/hive/warehouse/p3", FsAction.WRITE_EXECUTE, "hbase", true); + // Test DB case insensitivity + stmt.execute("create database extdb"); + stmt.execute("grant all on database ExtDb to role p1_admin"); + writeToPath("/tmp/external/ext100", 5, "foo", "bar"); + writeToPath("/tmp/external/ext101", 5, "foo", "bar"); + stmt.execute("use extdb"); + stmt.execute( + "create table ext100 (s string) location \'/tmp/external/ext100\'"); + verifyQuery(stmt, "ext100", 5); + verifyOnAllSubDirs("/tmp/external/ext100", FsAction.ALL, "hbase", true); + stmt.execute("use default"); - // Verify that Sentry permission are NOT enforced AFTER "stale" period - verifyOnAllSubDirs("/user/hive/warehouse/p3", null, "hbase", false); + stmt.execute("use EXTDB"); + stmt.execute( + "create table ext101 (s string) location \'/tmp/external/ext101\'"); + verifyQuery(stmt, "ext101", 5); + verifyOnAllSubDirs("/tmp/external/ext101", FsAction.ALL, "hbase", true); + + // Test table case insensitivity + stmt.execute("grant all on table exT100 to role tab_role"); + verifyOnAllSubDirs("/tmp/external/ext100", FsAction.ALL, "flume", true); + + stmt.execute("use default"); + + //TODO: SENTRY-795: HDFS permissions do not sync when Sentry restarts in HA mode. + if(!testSentryHA) { + long beforeStop = System.currentTimeMillis(); + sentryServer.stopAll(); + long timeTakenForStopMs = System.currentTimeMillis() - beforeStop; + LOGGER.info("Time taken for Sentry server stop: " + timeTakenForStopMs); + + // Verify that Sentry permission are still enforced for the "stale" period only if stop did not take too long + if(timeTakenForStopMs < STALE_THRESHOLD) { + verifyOnAllSubDirs("/user/hive/warehouse/p3", FsAction.WRITE_EXECUTE, "hbase", true); + Thread.sleep((STALE_THRESHOLD - timeTakenForStopMs)); + } else { + LOGGER.warn("Sentry server stop took too long"); + } + + // Verify that Sentry permission are NOT enforced AFTER "stale" period + verifyOnAllSubDirs("/user/hive/warehouse/p3", null, "hbase", false); + + sentryServer.startAll(); + } - startSentry(); // Verify that After Sentry restart permissions are re-enforced verifyOnAllSubDirs("/user/hive/warehouse/p3", FsAction.WRITE_EXECUTE, "hbase", true); @@ -889,6 +941,626 @@ public void testExternalTable() throws Throwable { } + /** + * Make sure when events such as table creation fail, the path should not be sync to NameNode plugin. + */ + @Test + public void testTableCreationFailure() throws Throwable { + String dbName = "db1"; + + tmpHDFSDir = new Path("/tmp/external"); + if (!miniDFS.getFileSystem().exists(tmpHDFSDir)) { + miniDFS.getFileSystem().mkdirs(tmpHDFSDir); + } + + miniDFS.getFileSystem().setPermission(tmpHDFSDir, FsPermission.valueOf("drwxrwx---")); + Path partitionDir = new Path("/tmp/external/p1"); + if (!miniDFS.getFileSystem().exists(partitionDir)) { + miniDFS.getFileSystem().mkdirs(partitionDir); + } + + dbNames = new String[]{dbName}; + roles = new String[]{"admin_role"}; + admin = StaticUserGroup.ADMIN1; + + Connection conn; + Statement stmt; + + conn = hiveServer2.createConnection("hive", "hive"); + stmt = conn.createStatement(); + stmt.execute("create role admin_role"); + stmt.execute("grant all on server server1 to role admin_role"); + stmt.execute("grant all on uri 'hdfs:///tmp/external' to role admin_role"); + stmt.execute("grant role admin_role to group " + StaticUserGroup.ADMINGROUP); + stmt.execute("grant role admin_role to group " + StaticUserGroup.HIVE); + stmt.close(); + conn.close(); + + conn = hiveServer2.createConnection(StaticUserGroup.ADMIN1, StaticUserGroup.ADMIN1); + stmt = conn.createStatement(); + stmt.execute("create database " + dbName); + + // Expect table creation to fail because hive:hive does not have + // permission to write at parent directory. + try { + stmt.execute("create external table tab1(a int) location 'hdfs:///tmp/external/p1'"); + Assert.fail("Expect table creation to fail"); + } catch (Exception ex) { + LOGGER.error("Exception when creating table: " + ex.getMessage()); + } + + // When the table creation failed, the path will not be managed by sentry. And the + // permission of the path will not be hive:hive. + verifyOnAllSubDirs("/tmp/external/p1", null, StaticUserGroup.HIVE, true); + + stmt.close(); + conn.close(); + } + + /** + * Make sure when events such as add partition fail, the path should not be sync to NameNode plugin. + */ + @Test + public void testAddPartitionFailure() throws Throwable { + String dbName = "db1"; + + tmpHDFSDir = new Path("/tmp/external"); + if (!miniDFS.getFileSystem().exists(tmpHDFSDir)) { + miniDFS.getFileSystem().mkdirs(tmpHDFSDir); + } + + miniDFS.getFileSystem().setPermission(tmpHDFSDir, FsPermission.valueOf("drwxrwx---")); + Path partitionDir = new Path("/tmp/external/p1"); + if (!miniDFS.getFileSystem().exists(partitionDir)) { + miniDFS.getFileSystem().mkdirs(partitionDir); + } + + dbNames = new String[]{dbName}; + roles = new String[]{"admin_role"}; + admin = StaticUserGroup.ADMIN1; + + Connection conn; + Statement stmt; + + conn = hiveServer2.createConnection("hive", "hive"); + stmt = conn.createStatement(); + stmt.execute("create role admin_role"); + stmt.execute("grant all on server server1 to role admin_role"); + stmt.execute("grant role admin_role to group " + StaticUserGroup.ADMINGROUP); + stmt.close(); + conn.close(); + + conn = hiveServer2.createConnection(StaticUserGroup.ADMIN1, StaticUserGroup.ADMIN1); + stmt = conn.createStatement(); + stmt.execute("create database " + dbName); + stmt.execute("create external table tab2 (s string) partitioned by (month int)"); + + // Expect adding partition to fail because hive:hive does not have + // permission to write at parent directory. + try { + stmt.execute("alter table tab2 add partition (month = 1) location '/tmp/external/p1'"); + Assert.fail("Expect adding partition to fail"); + } catch (Exception ex) { + LOGGER.error("Exception when adding partition: " + ex.getMessage()); + } + + // When the table creation failed, the path will not be managed by sentry. And the + // permission of the path will not be hive:hive. + verifyOnAllSubDirs("/tmp/external/p1", null, StaticUserGroup.HIVE, true); + + stmt.close(); + conn.close(); + } + + /** + * Make sure when events such as drop table fail, the path should not be sync to NameNode plugin. + */ + @Test + public void testDropTableFailure() throws Throwable { + String dbName = "db1"; + tmpHDFSDir = new Path("/tmp/external"); + if (!miniDFS.getFileSystem().exists(tmpHDFSDir)) { + miniDFS.getFileSystem().mkdirs(tmpHDFSDir); + } + + miniDFS.getFileSystem().setPermission(tmpHDFSDir, FsPermission.valueOf("drwxrwxrwx")); + Path partitionDir = new Path("/tmp/external/p1"); + if (!miniDFS.getFileSystem().exists(partitionDir)) { + miniDFS.getFileSystem().mkdirs(partitionDir); + } + dbNames = new String[]{dbName}; + roles = new String[]{"admin_role"}; + admin = StaticUserGroup.ADMIN1; + + Connection conn; + Statement stmt; + + conn = hiveServer2.createConnection("hive", "hive"); + stmt = conn.createStatement(); + stmt.execute("create role admin_role"); + stmt.execute("grant all on server server1 to role admin_role"); + stmt.execute("grant role admin_role to group " + StaticUserGroup.ADMINGROUP); + stmt.close(); + conn.close(); + + conn = hiveServer2.createConnection(StaticUserGroup.ADMIN1, StaticUserGroup.ADMIN1); + stmt = conn.createStatement(); + stmt.execute("create database " + dbName); + stmt.execute("create external table tab1(a int) location 'hdfs:///tmp/external/p1'"); + + miniDFS.getFileSystem().setPermission(tmpHDFSDir, FsPermission.valueOf("drwxrwx---")); + + // Expect dropping table to fail because hive:hive does not have + // permission to write at parent directory when + // hive.metastore.authorization.storage.checks property is true. + try { + stmt.execute("drop table tab1"); + Assert.fail("Expect dropping table to fail"); + } catch (Exception ex) { + LOGGER.error("Exception when creating table: " + ex.getMessage()); + } + + // When the table dropping failed, the path will still be managed by sentry. And the + // permission of the path still should be hive:hive. + verifyOnAllSubDirs("/tmp/external/p1", FsAction.ALL, StaticUserGroup.HIVE, true); + + stmt.close(); + conn.close(); + } + + /** + * Make sure when events such as drop table fail, the path should not be sync to NameNode plugin. + */ + @Test + public void testDropPartitionFailure() throws Throwable { + String dbName = "db1"; + + tmpHDFSDir = new Path("/tmp/external"); + if (!miniDFS.getFileSystem().exists(tmpHDFSDir)) { + miniDFS.getFileSystem().mkdirs(tmpHDFSDir); + } + + miniDFS.getFileSystem().setPermission(tmpHDFSDir, FsPermission.valueOf("drwxrwxrwx")); + Path partitionDir = new Path("/tmp/external/p1"); + if (!miniDFS.getFileSystem().exists(partitionDir)) { + miniDFS.getFileSystem().mkdirs(partitionDir); + } + + dbNames = new String[]{dbName}; + roles = new String[]{"admin_role"}; + admin = StaticUserGroup.ADMIN1; + + Connection conn; + Statement stmt; + + conn = hiveServer2.createConnection("hive", "hive"); + stmt = conn.createStatement(); + stmt.execute("create role admin_role"); + stmt.execute("grant all on server server1 to role admin_role"); + stmt.execute("grant role admin_role to group " + StaticUserGroup.ADMINGROUP); + stmt.close(); + conn.close(); + + conn = hiveServer2.createConnection(StaticUserGroup.ADMIN1, StaticUserGroup.ADMIN1); + stmt = conn.createStatement(); + stmt.execute("create database " + dbName); + stmt.execute("create table tab3 (s string) partitioned by (month int)"); + stmt.execute("alter table tab3 add partition (month = 1) location '/tmp/external/p1'"); + + miniDFS.getFileSystem().setPermission(tmpHDFSDir, FsPermission.valueOf("drwxrwx---")); + + + // Expect dropping partition to fail because because hive:hive does not have + // permission to write at parent directory. + try { + stmt.execute("ALTER TABLE tab3 DROP PARTITION (month = 1)"); + Assert.fail("Expect dropping partition to fail"); + } catch (Exception ex) { + LOGGER.error("Exception when dropping partition: " + ex.getMessage()); + } + + // When the partition dropping failed, the path for the partition will still + // be managed by sentry. And the permission of the path still should be hive:hive. + verifyOnAllSubDirs("/tmp/external/p1", FsAction.ALL, StaticUserGroup.HIVE, true); + + stmt.close(); + conn.close(); + } + + @Test + public void testColumnPrivileges() throws Throwable { + String dbName = "db2"; + + tmpHDFSDir = new Path("/tmp/external"); + dbNames = new String[]{dbName}; + roles = new String[]{"admin_role", "tab_role", "db_role", "col_role"}; + admin = StaticUserGroup.ADMIN1; + + Connection conn; + Statement stmt; + + conn = hiveServer2.createConnection("hive", "hive"); + stmt = conn.createStatement(); + stmt.execute("create role admin_role"); + stmt.execute("grant all on server server1 to role admin_role with grant option"); + stmt.execute("grant role admin_role to group " + StaticUserGroup.ADMINGROUP); + + conn = hiveServer2.createConnection(StaticUserGroup.ADMIN1, StaticUserGroup.ADMIN1); + stmt = conn.createStatement(); + stmt.execute("create database " + dbName); + stmt.execute("use "+ dbName); + stmt.execute("create table p1 (s string) partitioned by (month int, day int)"); + stmt.execute("alter table p1 add partition (month=1, day=1)"); + stmt.execute("alter table p1 add partition (month=1, day=2)"); + stmt.execute("alter table p1 add partition (month=2, day=1)"); + stmt.execute("alter table p1 add partition (month=2, day=2)"); + loadData(stmt); + + stmt.execute("create role db_role"); + stmt.execute("grant select on database " + dbName + " to role db_role"); + stmt.execute("create role tab_role"); + stmt.execute("grant select on p1 to role tab_role"); + stmt.execute("create role col_role"); + stmt.execute("grant select(s) on p1 to role col_role"); + + stmt.execute("grant role col_role to group "+ StaticUserGroup.USERGROUP1); + + stmt.execute("grant role tab_role to group "+ StaticUserGroup.USERGROUP2); + stmt.execute("grant role col_role to group "+ StaticUserGroup.USERGROUP2); + + stmt.execute("grant role db_role to group "+ StaticUserGroup.USERGROUP3); + stmt.execute("grant role col_role to group "+ StaticUserGroup.USERGROUP3); + + stmt.execute("grant role col_role to group " + StaticUserGroup.ADMINGROUP); + + Thread.sleep(CACHE_REFRESH);//Wait till sentry cache is updated in Namenode + + //User with just column level privileges cannot read HDFS + verifyOnAllSubDirs("/user/hive/warehouse/" + dbName + ".db/p1", null, StaticUserGroup.USERGROUP1, false); + + //User with permissions on table and column can read HDFS file + verifyOnAllSubDirs("/user/hive/warehouse/" + dbName + ".db/p1", FsAction.READ_EXECUTE, StaticUserGroup.USERGROUP2, true); + + //User with permissions on db and column can read HDFS file + verifyOnAllSubDirs("/user/hive/warehouse/" + dbName + ".db/p1", FsAction.READ_EXECUTE, StaticUserGroup.USERGROUP3, true); + + //User with permissions on server and column cannot read HDFS file + //TODO:SENTRY-751 + verifyOnAllSubDirs("/user/hive/warehouse/" + dbName + ".db/p1", null, StaticUserGroup.ADMINGROUP, false); + + stmt.close(); + conn.close(); + + } + + /* + TODO:SENTRY-819 + */ + @Test + public void testAllColumn() throws Throwable { + String dbName = "db2"; + + tmpHDFSDir = new Path("/tmp/external"); + dbNames = new String[]{dbName}; + roles = new String[]{"admin_role", "col_role"}; + admin = StaticUserGroup.ADMIN1; + + Connection conn; + Statement stmt; + + conn = hiveServer2.createConnection("hive", "hive"); + stmt = conn.createStatement(); + stmt.execute("create role admin_role"); + stmt.execute("grant all on server server1 to role admin_role with grant option"); + stmt.execute("grant role admin_role to group " + StaticUserGroup.ADMINGROUP); + + conn = hiveServer2.createConnection(StaticUserGroup.ADMIN1, StaticUserGroup.ADMIN1); + stmt = conn.createStatement(); + stmt.execute("create database " + dbName); + stmt.execute("use " + dbName); + stmt.execute("create table p1 (c1 string, c2 string) partitioned by (month int, day int)"); + stmt.execute("alter table p1 add partition (month=1, day=1)"); + loadDataTwoCols(stmt); + + stmt.execute("create role col_role"); + stmt.execute("grant select(c1,c2) on p1 to role col_role"); + stmt.execute("grant role col_role to group "+ StaticUserGroup.USERGROUP1); + Thread.sleep(100); + + //User with privileges on all columns of the data cannot still read the HDFS files + verifyOnAllSubDirs("/user/hive/warehouse/" + dbName + ".db/p1", null, StaticUserGroup.USERGROUP1, false); + + stmt.close(); + conn.close(); + + } + //SENTRY-780 + @Test + public void testViews() throws Throwable { + String dbName= "db1"; + + tmpHDFSDir = new Path("/tmp/external"); + dbNames = new String[]{dbName}; + roles = new String[]{"admin_role"}; + admin = StaticUserGroup.ADMIN1; + + Connection conn; + Statement stmt; + + conn = hiveServer2.createConnection("hive", "hive"); + stmt = conn.createStatement(); + + stmt.execute("create role admin_role"); + stmt.execute("grant all on server server1 to role admin_role"); + stmt.execute("grant role admin_role to group " + StaticUserGroup.ADMINGROUP); + + conn = hiveServer2.createConnection(StaticUserGroup.ADMIN1, StaticUserGroup.ADMIN1); + stmt = conn.createStatement(); + try { + stmt.execute("create database " + dbName); + stmt.execute("create table test(a string)"); + stmt.execute("create view testView as select * from test"); + stmt.execute("create or replace view testView as select * from test"); + stmt.execute("drop view testView"); + } catch(Exception s) { + throw s; + } + + stmt.close(); + conn.close(); + } + + //SENTRY-884 + @Test + public void testAccessToTableDirectory() throws Throwable { + String dbName= "db1"; + + tmpHDFSDir = new Path("/tmp/external"); + dbNames = new String[]{dbName}; + roles = new String[]{"admin_role", "table_role"}; + admin = StaticUserGroup.ADMIN1; + + Connection conn; + Statement stmt; + + conn = hiveServer2.createConnection("hive", "hive"); + stmt = conn.createStatement(); + + stmt.execute("create role admin_role"); + stmt.execute("grant all on server server1 to role admin_role"); + stmt.execute("grant role admin_role to group " + StaticUserGroup.ADMINGROUP); + + conn = hiveServer2.createConnection(StaticUserGroup.ADMIN1, StaticUserGroup.ADMIN1); + stmt = conn.createStatement(); + stmt.execute("create database " + dbName); + stmt.execute("use " + dbName); + stmt.execute("create table tb1(a string)"); + + stmt.execute("create role table_role"); + stmt.execute("grant all on table tb1 to role table_role"); + stmt.execute("grant role table_role to group " + StaticUserGroup.USERGROUP1); + Thread.sleep(CACHE_REFRESH);//Wait till sentry cache is updated in Namenode + //Verify user1 is able to access table directory + verifyAccessToPath(StaticUserGroup.USER1_1, StaticUserGroup.USERGROUP1, "/user/hive/warehouse/db1.db/tb1", true); + + stmt.close(); + conn.close(); + } + + /* SENTRY-953 */ + @Test + public void testAuthzObjOnPartitionMultipleTables() throws Throwable { + String dbName = "db1"; + + tmpHDFSDir = new Path("/tmp/external"); + miniDFS.getFileSystem().mkdirs(tmpHDFSDir); + Path partitionDir = new Path("/tmp/external/p1"); + miniDFS.getFileSystem().mkdirs(partitionDir); + + dbNames = new String[]{dbName}; + roles = new String[]{"admin_role", "tab1_role", "tab2_role", "tab3_role"}; + admin = StaticUserGroup.ADMIN1; + + Connection conn; + Statement stmt; + + conn = hiveServer2.createConnection("hive", "hive"); + stmt = conn.createStatement(); + + stmt.execute("create role admin_role"); + stmt.execute("grant all on server server1 to role admin_role"); + stmt.execute("grant role admin_role to group " + StaticUserGroup.ADMINGROUP); + + // Create external table tab1 on location '/tmp/external/p1'. + // Create tab1_role, and grant it with insert permission on table tab1 to user_group1. + conn = hiveServer2.createConnection(StaticUserGroup.ADMIN1, StaticUserGroup.ADMIN1); + stmt = conn.createStatement(); + stmt.execute("create database " + dbName); + stmt.execute("use " + dbName); + stmt.execute("create external table tab1 (s string) partitioned by (month int) location '/tmp/external/p1'"); + stmt.execute("create role tab1_role"); + stmt.execute("grant insert on table tab1 to role tab1_role"); + stmt.execute("grant role tab1_role to group " + StaticUserGroup.USERGROUP1); + Thread.sleep(CACHE_REFRESH);//Wait till sentry cache is updated in Namenode + + // Verify that user_group1 has insert(write_execute) permission on '/tmp/external/p1'. + verifyOnAllSubDirs("/tmp/external/p1", FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP1, true); + + // Create external table tab2 and partition on location '/tmp/external'. + // Create tab2_role, and grant it with select permission on table tab2 to user_group2. + stmt.execute("create external table tab2 (s string) partitioned by (month int)"); + stmt.execute("alter table tab2 add partition (month = 1) location '/tmp/external'"); + stmt.execute("create role tab2_role"); + stmt.execute("grant select on table tab2 to role tab2_role"); + stmt.execute("grant role tab2_role to group " + StaticUserGroup.USERGROUP2); + Thread.sleep(CACHE_REFRESH);//Wait till sentry cache is updated in Namenode + + // Verify that user_group2 have select(read_execute) permission on both paths. + verifyOnAllSubDirs("/user/hive/warehouse/" + dbName + ".db/tab2", FsAction.READ_EXECUTE, StaticUserGroup.USERGROUP2, true); + verifyOnPath("/tmp/external", FsAction.READ_EXECUTE, StaticUserGroup.USERGROUP2, true); + + // Create table tab3 and partition on the same location '/tmp/external' as tab2. + // Create tab3_role, and grant it with insert permission on table tab3 to user_group3. + stmt.execute("create table tab3 (s string) partitioned by (month int)"); + stmt.execute("alter table tab3 add partition (month = 1) location '/tmp/external'"); + stmt.execute("create role tab3_role"); + stmt.execute("grant insert on table tab3 to role tab3_role"); + stmt.execute("grant role tab3_role to group " + StaticUserGroup.USERGROUP3); + Thread.sleep(CACHE_REFRESH);//Wait till sentry cache is updated in Namenode + + // When two partitions of different tables pointing to the same location with different grants, + // ACLs should have union (no duplicates) of both rules. + verifyOnAllSubDirs("/user/hive/warehouse/" + dbName + ".db/tab3", FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP3, true); + verifyOnPath("/tmp/external", FsAction.READ_EXECUTE, StaticUserGroup.USERGROUP2, true); + verifyOnPath("/tmp/external", FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP3, true); + + // When alter the table name (tab2 to be tabx), ACLs should remain the same. + stmt.execute("alter table tab2 rename to tabx"); + Thread.sleep(CACHE_REFRESH);//Wait till sentry cache is updated in Namenode + verifyOnPath("/tmp/external", FsAction.READ_EXECUTE, StaticUserGroup.USERGROUP2, true); + verifyOnPath("/tmp/external", FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP3, true); + + // When drop a partition that shares the same location with other partition belonging to + // other table, should still have the other table permissions. + stmt.execute("ALTER TABLE tabx DROP PARTITION (month = 1)"); + Thread.sleep(CACHE_REFRESH);//Wait till sentry cache is updated in Namenode + verifyOnAllSubDirs("/user/hive/warehouse/" + dbName + ".db/tab3", FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP3, true); + verifyOnPath("/tmp/external", FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP3, true); + + // When drop a table that has a partition shares the same location with other partition + // belonging to other table, should still have the other table permissions. + stmt.execute("DROP TABLE IF EXISTS tabx"); + Thread.sleep(CACHE_REFRESH);//Wait till sentry cache is updated in Namenode + verifyOnAllSubDirs("/user/hive/warehouse/" + dbName + ".db/tab3", FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP3, true); + verifyOnPath("/tmp/external", FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP3, true); + + stmt.close(); + conn.close(); + + miniDFS.getFileSystem().delete(partitionDir, true); + } + + /* SENTRY-953 */ + @Test + public void testAuthzObjOnPartitionSameTable() throws Throwable { + String dbName = "db1"; + + tmpHDFSDir = new Path("/tmp/external/p1"); + miniDFS.getFileSystem().mkdirs(tmpHDFSDir); + + dbNames = new String[]{dbName}; + roles = new String[]{"admin_role", "tab1_role"}; + admin = StaticUserGroup.ADMIN1; + + Connection conn; + Statement stmt; + + conn = hiveServer2.createConnection("hive", "hive"); + stmt = conn.createStatement(); + + stmt.execute("create role admin_role"); + stmt.execute("grant all on server server1 to role admin_role"); + stmt.execute("grant role admin_role to group " + StaticUserGroup.ADMINGROUP); + + // Create table tab1 and partition on the same location '/tmp/external/p1'. + // Create tab1_role, and grant it with insert permission on table tab1 to user_group1. + conn = hiveServer2.createConnection(StaticUserGroup.ADMIN1, StaticUserGroup.ADMIN1); + stmt = conn.createStatement(); + stmt.execute("create database " + dbName); + stmt.execute("use " + dbName); + stmt.execute("create table tab1 (s string) partitioned by (month int)"); + stmt.execute("alter table tab1 add partition (month = 1) location '/tmp/external/p1'"); + stmt.execute("create role tab1_role"); + stmt.execute("grant insert on table tab1 to role tab1_role"); + stmt.execute("grant role tab1_role to group " + StaticUserGroup.USERGROUP1); + Thread.sleep(CACHE_REFRESH);//Wait till sentry cache is updated in Namenode + + // Verify that user_group1 has insert(write_execute) permission on '/tmp/external/p1'. + verifyOnAllSubDirs("/tmp/external/p1", FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP1, true); + + // When two partitions of the same table pointing to the same location, + // ACLS should not be repeated. Exception will be thrown if there are duplicates. + stmt.execute("alter table tab1 add partition (month = 2) location '/tmp/external/p1'"); + verifyOnPath("/tmp/external/p1", FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP1, true); + + stmt.close(); + conn.close(); + } + + /* SENTRY-953 */ + @Test + public void testAuthzObjOnMultipleTables() throws Throwable { + String dbName = "db1"; + + tmpHDFSDir = new Path("/tmp/external/p1"); + miniDFS.getFileSystem().mkdirs(tmpHDFSDir); + + dbNames = new String[]{dbName}; + roles = new String[]{"admin_role", "tab1_role", "tab2_role"}; + admin = StaticUserGroup.ADMIN1; + + Connection conn; + Statement stmt; + + conn = hiveServer2.createConnection("hive", "hive"); + stmt = conn.createStatement(); + + stmt.execute("create role admin_role"); + stmt.execute("grant all on server server1 to role admin_role"); + stmt.execute("grant role admin_role to group " + StaticUserGroup.ADMINGROUP); + + // Create external table tab1 on location '/tmp/external/p1'. + // Create tab1_role, and grant it with insert permission on table tab1 to user_group1. + conn = hiveServer2.createConnection(StaticUserGroup.ADMIN1, StaticUserGroup.ADMIN1); + stmt = conn.createStatement(); + stmt.execute("create database " + dbName); + stmt.execute("use " + dbName); + stmt.execute("create external table tab1 (s string) partitioned by (month int) location '/tmp/external/p1'"); + stmt.execute("create role tab1_role"); + stmt.execute("grant insert on table tab1 to role tab1_role"); + stmt.execute("grant role tab1_role to group " + StaticUserGroup.USERGROUP1); + Thread.sleep(CACHE_REFRESH);//Wait till sentry cache is updated in Namenode + + // Verify that user_group1 has insert(write_execute) permission on '/tmp/external/p1'. + verifyOnAllSubDirs("/tmp/external/p1", FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP1, true); + + // Create table tab2 on the same location '/tmp/external/p1' as table tab1. + // Create tab2_role, and grant it with select permission on table tab2 to user_group1. + stmt.execute("create table tab2 (s string) partitioned by (month int) location '/tmp/external/p1'"); + stmt.execute("create role tab2_role"); + stmt.execute("grant select on table tab2 to role tab2_role"); + stmt.execute("grant role tab2_role to group " + StaticUserGroup.USERGROUP1); + + // When two tables pointing to the same location, ACLS should have union (no duplicates) + // of both rules. + verifyOnPath("/tmp/external/p1", FsAction.ALL, StaticUserGroup.USERGROUP1, true); + + // When drop table tab1, ACLs of tab2 still remain. + stmt.execute("DROP TABLE IF EXISTS tab1"); + Thread.sleep(CACHE_REFRESH);//Wait till sentry cache is updated in Namenode + verifyOnPath("/tmp/external/p1", FsAction.READ_EXECUTE, StaticUserGroup.USERGROUP1, true); + + stmt.close(); + conn.close(); + } + + private void verifyAccessToPath(String user, String group, String path, boolean hasPermission) throws Exception{ + Path p = new Path(path); + UserGroupInformation hadoopUser = + UserGroupInformation.createUserForTesting(user, new String[] {group}); + FileSystem fs = DFSTestUtil.getFileSystemAs(hadoopUser, hadoopConf); + try { + fs.listFiles(p, true); + if(!hasPermission) { + Assert.assertFalse("Expected listing files to fail", false); + } + } catch (Exception e) { + if(hasPermission) { + throw e; + } + } + } + private void verifyQuery(Statement stmt, String table, int n) throws Throwable { verifyQuery(stmt, table, n, NUM_RETRIES); } @@ -910,6 +1582,70 @@ private void verifyQuery(Statement stmt, String table, int n, int retry) throws } } + /** + * SENTRY-1002: + * Ensure the paths with no scheme will not cause NPE during paths update. + */ + @Test + public void testMissingScheme() throws Throwable { + + // In the local test environment, EXTERNAL_SENTRY_SERVICE is false, + // set the default URI scheme to be hdfs. + boolean testConfOff = new Boolean(System.getProperty(EXTERNAL_SENTRY_SERVICE, "false")); + if (!testConfOff) { + PathsUpdate.getConfiguration().set("fs.defaultFS", "hdfs:///"); + } + + tmpHDFSDir = new Path("/tmp/external"); + if (!miniDFS.getFileSystem().exists(tmpHDFSDir)) { + miniDFS.getFileSystem().mkdirs(tmpHDFSDir); + } + + Path partitionDir = new Path("/tmp/external/p1"); + if (!miniDFS.getFileSystem().exists(partitionDir)) { + miniDFS.getFileSystem().mkdirs(partitionDir); + } + + String dbName = "db1"; + String tblName = "tab1"; + dbNames = new String[]{dbName}; + roles = new String[]{"admin_role"}; + admin = StaticUserGroup.ADMIN1; + + Connection conn; + Statement stmt; + + conn = hiveServer2.createConnection("hive", "hive"); + stmt = conn.createStatement(); + stmt.execute("create role admin_role"); + stmt.execute("grant all on server server1 to role admin_role"); + stmt.execute("grant role admin_role to group " + StaticUserGroup.ADMINGROUP); + stmt.close(); + conn.close(); + + conn = hiveServer2.createConnection(StaticUserGroup.ADMIN1, StaticUserGroup.ADMIN1); + stmt = conn.createStatement(); + stmt.execute("create database " + dbName); + stmt.execute("create external table " + dbName + "." + tblName + "(s string) location '/tmp/external/p1'"); + + // Deep copy of table tab1 + Table tbCopy = hmsClient.getTable(dbName, tblName); + + // Change the location of the table to strip the scheme. + StorageDescriptor sd = hmsClient.getTable(dbName, tblName).getSd(); + sd.setLocation("/tmp/external"); + tbCopy.setSd(sd); + + // Alter table tab1 to be tbCopy which is at scheme-less location. + // And the corresponding path will be updated to sentry server. + hmsClient.alter_table(dbName, "tab1", tbCopy); + Assert.assertEquals(hmsClient.getTable(dbName, tblName).getSd().getLocation(), "/tmp/external"); + verifyOnPath("/tmp/external", FsAction.ALL, StaticUserGroup.HIVE, true); + + stmt.close(); + conn.close(); + } + private void loadData(Statement stmt) throws IOException, SQLException { FSDataOutputStream f1 = miniDFS.getFileSystem().create(new Path("/tmp/f1.txt")); f1.writeChars("m1d1_t1\n"); @@ -934,6 +1670,23 @@ private void loadData(Statement stmt) throws IOException, SQLException { rs.close(); } + private void loadDataTwoCols(Statement stmt) throws IOException, SQLException { + FSDataOutputStream f1 = miniDFS.getFileSystem().create(new Path("/tmp/f2.txt")); + f1.writeChars("m1d1_t1, m1d1_t2\n"); + f1.writeChars("m1d1_t2, m1d1_t2\n"); + f1.writeChars("m1d1_t3, m1d1_t2\n"); + f1.flush(); + f1.close(); + stmt.execute("load data inpath \'/tmp/f2.txt\' overwrite into table p1 partition (month=1, day=1)"); + ResultSet rs = stmt.executeQuery("select * from p1"); + List vals = new ArrayList(); + while (rs.next()) { + vals.add(rs.getString(1)); + } + Assert.assertEquals(3, vals.size()); + rs.close(); + } + private void writeToPath(String path, int numRows, String user, String group) throws IOException { Path p = new Path(path); miniDFS.getFileSystem().mkdirs(p); @@ -981,7 +1734,9 @@ public Void run() throws Exception { List lines = new ArrayList(); do { line = in.readLine(); - if (line != null) lines.add(line); + if (line != null) { + lines.add(line); + } } while (line != null); Assert.assertEquals(3, lines.size()); in.close(); @@ -1021,12 +1776,10 @@ private void verifyOnAllSubDirs(Path p, FsAction fsAction, String group, boolean throw th; } } - if (recurse) { - if (fStatus.isDirectory()) { - FileStatus[] children = miniDFS.getFileSystem().listStatus(p); - for (FileStatus fs : children) { - verifyOnAllSubDirs(fs.getPath(), fsAction, group, groupShouldExist, recurse, NUM_RETRIES); - } + if (recurse && fStatus.isDirectory()) { + FileStatus[] children = miniDFS.getFileSystem().listStatus(p); + for (FileStatus fs : children) { + verifyOnAllSubDirs(fs.getPath(), fsAction, group, groupShouldExist, recurse, NUM_RETRIES); } } } @@ -1036,12 +1789,19 @@ private Map getAcls(Path path) throws Exception { Map acls = new HashMap(); for (AclEntry ent : aclStatus.getEntries()) { if (ent.getType().equals(AclEntryType.GROUP)) { - acls.put(ent.getName(), ent.getPermission()); + + // In case of duplicate acl exist, exception should be thrown. + if (acls.containsKey(ent.getName())) { + throw new SentryAlreadyExistsException("The acl " + ent.getName() + " already exists.\n"); + } else { + acls.put(ent.getName(), ent.getPermission()); + } } } return acls; } +/* private void runWordCount(JobConf job, String inPath, String outPath) throws Exception { Path in = new Path(inPath); Path out = new Path(outPath); @@ -1070,5 +1830,6 @@ private void runWordCount(JobConf job, String inPath, String outPath) throws Exc } } +*/ } diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithHiveServer.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithHiveServer.java index 9b3c04a95..a314c0dde 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithHiveServer.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithHiveServer.java @@ -16,30 +16,32 @@ */ package org.apache.sentry.tests.e2e.hive; -import com.google.common.io.Files; -import junit.framework.Assert; +import java.io.File; +import java.util.Map; + +import org.junit.Assert; + import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.sentry.tests.e2e.hive.hiveserver.HiveServer; import org.apache.sentry.tests.e2e.hive.hiveserver.HiveServerFactory; -import org.junit.After; +import org.junit.AfterClass; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; -import java.util.Map; +import com.google.common.io.Files; public abstract class AbstractTestWithHiveServer { private static final Logger LOGGER = LoggerFactory .getLogger(AbstractTestWithHiveServer.class); - protected File baseDir; - protected File logDir; - protected File confDir; - protected File dataDir; - protected File policyFile; - protected HiveServer hiveServer; - protected FileSystem fileSystem; + protected static File baseDir; + protected static File logDir; + protected static File confDir; + protected static File dataDir; + protected static File policyFile; + protected static HiveServer hiveServer; + protected static FileSystem fileSystem; protected static final String ADMIN1 = StaticUserGroup.ADMIN1, ADMINGROUP = StaticUserGroup.ADMINGROUP, @@ -50,7 +52,7 @@ public abstract class AbstractTestWithHiveServer { USERGROUP2 = StaticUserGroup.USERGROUP2, USERGROUP3 = StaticUserGroup.USERGROUP3; - public Context createContext(Map properties) + public static Context createContext(Map properties) throws Exception { fileSystem = FileSystem.get(new Configuration()); baseDir = Files.createTempDir(); @@ -61,8 +63,8 @@ public Context createContext(Map properties) policyFile = new File(confDir, HiveServerFactory.AUTHZ_PROVIDER_FILENAME); hiveServer = HiveServerFactory.create(properties, baseDir, confDir, logDir, policyFile.getPath(), fileSystem); hiveServer.start(); - return new Context(hiveServer, getFileSystem(), - baseDir, confDir, dataDir, policyFile); + return new Context(hiveServer, fileSystem, + baseDir, dataDir, policyFile); } protected static File assertCreateDir(File dir) { @@ -76,8 +78,8 @@ protected FileSystem getFileSystem() { return fileSystem; } - @After - public void tearDownWithHiveServer() throws Exception { + @AfterClass + public static void tearDownWithHiveServer() throws Exception { if(hiveServer != null) { hiveServer.shutdown(); hiveServer = null; diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithStaticConfiguration.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithStaticConfiguration.java index 3a8a6efc3..b96175797 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithStaticConfiguration.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithStaticConfiguration.java @@ -16,13 +16,13 @@ */ package org.apache.sentry.tests.e2e.hive; -import static org.apache.sentry.provider.common.ProviderConstants.AUTHORIZABLE_SPLITTER; -import static org.apache.sentry.provider.common.ProviderConstants.PRIVILEGE_PREFIX; -import static org.apache.sentry.provider.common.ProviderConstants.ROLE_SPLITTER; -import static org.junit.Assert.assertTrue; +import static org.apache.sentry.policy.common.PolicyConstants.AUTHORIZABLE_SPLITTER; +import static org.apache.sentry.policy.common.PolicyConstants.PRIVILEGE_PREFIX; +import static org.apache.sentry.policy.common.PolicyConstants.ROLE_SPLITTER; import java.io.File; import java.io.IOException; +import java.security.PrivilegedExceptionAction; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; @@ -32,13 +32,17 @@ import java.util.Collection; import java.util.List; import java.util.Map; -import java.util.concurrent.TimeoutException; +import java.util.HashSet; -import junit.framework.Assert; +import com.google.common.collect.Sets; +import org.junit.Assert; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.sentry.binding.hive.SentryHiveAuthorizationTaskFactoryImpl; @@ -49,6 +53,7 @@ import org.apache.sentry.provider.db.SimpleDBProviderBackend; import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient; import org.apache.sentry.provider.file.PolicyFile; +import org.apache.sentry.service.thrift.KerberosConfiguration; import org.apache.sentry.service.thrift.SentryServiceClientFactory; import org.apache.sentry.service.thrift.ServiceConstants.ClientConfig; import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; @@ -70,6 +75,10 @@ import com.google.common.collect.Maps; import com.google.common.io.Files; +import javax.security.auth.Subject; +import javax.security.auth.kerberos.KerberosPrincipal; +import javax.security.auth.login.LoginContext; + public abstract class AbstractTestWithStaticConfiguration { private static final Logger LOGGER = LoggerFactory .getLogger(AbstractTestWithStaticConfiguration.class); @@ -113,11 +122,11 @@ public abstract class AbstractTestWithStaticConfiguration { protected static boolean policyOnHdfs = false; protected static boolean useSentryService = false; - protected static boolean setMetastoreListener = false; + protected static boolean setMetastoreListener = true; protected static String testServerType = null; protected static boolean enableHiveConcurrency = false; // indicate if the database need to be clear for every test case in one test class - protected static boolean clearDbAfterPerTest = true; + protected static boolean clearDbPerTest = true; protected static File baseDir; protected static File logDir; @@ -135,10 +144,41 @@ public abstract class AbstractTestWithStaticConfiguration { protected static Context context; protected final String semanticException = "SemanticException No valid privileges"; + protected static boolean clientKerberos = false; + protected static String REALM = System.getProperty("sentry.service.realm", "EXAMPLE.COM"); + protected static final String SERVER_KERBEROS_NAME = "sentry/" + SERVER_HOST + "@" + REALM; + protected static final String SERVER_KEY_TAB = System.getProperty("sentry.service.server.keytab"); + + private static LoginContext clientLoginContext; + protected static SentryPolicyServiceClient client; + + /** + * Get sentry client with authenticated Subject + * (its security-related attributes(for example, kerberos principal and key) + * @param clientShortName + * @param clientKeyTabDir + * @return client's Subject + */ + public static Subject getClientSubject(String clientShortName, String clientKeyTabDir) { + String clientKerberosPrincipal = clientShortName + "@" + REALM; + File clientKeyTabFile = new File(clientKeyTabDir); + Subject clientSubject = new Subject(false, Sets.newHashSet( + new KerberosPrincipal(clientKerberosPrincipal)), new HashSet(), + new HashSet()); + try { + clientLoginContext = new LoginContext("", clientSubject, null, + KerberosConfiguration.createClientConfig(clientKerberosPrincipal, clientKeyTabFile)); + clientLoginContext.login(); + } catch (Exception ex) { + LOGGER.error("Exception: " + ex); + } + clientSubject = clientLoginContext.getSubject(); + return clientSubject; + } public static void createContext() throws Exception { context = new Context(hiveServer, fileSystem, - baseDir, confDir, dataDir, policyFileLocation); + baseDir, dataDir, policyFileLocation); } protected void dropDb(String user, String...dbs) throws Exception { Connection connection = context.createConnection(user); @@ -154,7 +194,7 @@ protected void createDb(String user, String...dbs) throws Exception { Statement statement = connection.createStatement(); ArrayList allowedDBs = new ArrayList(Arrays.asList(DB1, DB2, DB3)); for(String db : dbs) { - assertTrue(db + " is not part of known test dbs which will be cleaned up after the test", allowedDBs.contains(db)); + Assert.assertTrue(db + " is not part of known test dbs which will be cleaned up after the test", allowedDBs.contains(db)); statement.execute("CREATE DATABASE " + db); } statement.close(); @@ -191,9 +231,10 @@ protected static File assertCreateDir(File dir) { @BeforeClass public static void setupTestStaticConfiguration() throws Exception { + LOGGER.info("AbstractTestWithStaticConfiguration setupTestStaticConfiguration"); properties = Maps.newHashMap(); if(!policyOnHdfs) { - policyOnHdfs = new Boolean(System.getProperty("sentry.e2etest.policyonhdfs", "false")); + policyOnHdfs = Boolean.valueOf(System.getProperty("sentry.e2etest.policyonhdfs", "false")); } if (testServerType != null) { properties.put("sentry.e2etest.hiveServer2Type", testServerType); @@ -224,7 +265,7 @@ public static void setupTestStaticConfiguration() throws Exception { policyURI = policyFileLocation.getPath(); } - boolean startSentry = new Boolean(System.getProperty(EXTERNAL_SENTRY_SERVICE, "false")); + boolean startSentry = Boolean.valueOf(System.getProperty(EXTERNAL_SENTRY_SERVICE, "false")); if ("true".equalsIgnoreCase(System.getProperty(ENABLE_SENTRY_HA, "false"))) { enableSentryHA = true; } @@ -243,6 +284,12 @@ public static void setupTestStaticConfiguration() throws Exception { hiveServer = create(properties, baseDir, confDir, logDir, policyURI, fileSystem); hiveServer.start(); createContext(); + + // Create tmp as scratch dir if it doesn't exist + Path tmpPath = new Path("/tmp"); + if (!fileSystem.exists(tmpPath)) { + fileSystem.mkdirs(tmpPath, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); + } } public static HiveServer create(Map properties, @@ -263,8 +310,10 @@ public static HiveServer create(Map properties, protected static void writePolicyFile(PolicyFile policyFile) throws Exception { policyFile.write(context.getPolicyFile()); if(policyOnHdfs) { + LOGGER.info("use policy file on HDFS"); dfs.writePolicyFile(context.getPolicyFile()); } else if(useSentryService) { + LOGGER.info("use sentry service, granting permissions"); grantPermissions(policyFile); } } @@ -277,16 +326,20 @@ private static void grantPermissions(PolicyFile policyFile) throws Exception { ResultSet resultSet = statement.executeQuery("SHOW ROLES"); while( resultSet.next()) { Statement statement1 = context.createStatement(connection); - if(!resultSet.getString(1).equalsIgnoreCase("admin_role")) { - statement1.execute("DROP ROLE " + resultSet.getString(1)); + String roleName = resultSet.getString(1).trim(); + if(!roleName.equalsIgnoreCase("admin_role")) { + LOGGER.info("Dropping role :" + roleName); + statement1.execute("DROP ROLE " + roleName); } } // create roles and add privileges for (Map.Entry> roleEntry : policyFile.getRolesToPermissions() .asMap().entrySet()) { + String roleName = roleEntry.getKey(); if(!roleEntry.getKey().equalsIgnoreCase("admin_role")){ - statement.execute("CREATE ROLE " + roleEntry.getKey()); + LOGGER.info("Creating role : " + roleName); + statement.execute("CREATE ROLE " + roleName); for (String privilege : roleEntry.getValue()) { addPrivilege(roleEntry.getKey(), privilege, statement); } @@ -297,7 +350,9 @@ private static void grantPermissions(PolicyFile policyFile) throws Exception { .entrySet()) { for (String roleNames : groupEntry.getValue()) { for (String roleName : roleNames.split(",")) { - statement.execute("GRANT ROLE " + roleName + " TO GROUP " + groupEntry.getKey()); + String sql = "GRANT ROLE " + roleName + " TO GROUP " + groupEntry.getKey(); + LOGGER.info("Granting role to group: " + sql); + statement.execute(sql); } } } @@ -337,21 +392,31 @@ private static void addPrivilege(String roleName, String privileges, Statement s } } + LOGGER.info("addPrivilege"); if (columnName != null) { statement.execute("CREATE DATABASE IF NOT EXISTS " + dbName); statement.execute("USE " + dbName); - statement.execute("GRANT " + action + " ( " + columnName + " ) ON TABLE " + tableName + " TO ROLE " + roleName); + String sql = "GRANT " + action + " ( " + columnName + " ) ON TABLE " + tableName + " TO ROLE " + roleName; + LOGGER.info("Granting column level privilege: database = " + dbName + ", sql = " + sql); + statement.execute(sql); } else if (tableName != null) { statement.execute("CREATE DATABASE IF NOT EXISTS " + dbName); statement.execute("USE " + dbName); - statement.execute("GRANT " + action + " ON TABLE " + tableName + " TO ROLE " + roleName); + String sql = "GRANT " + action + " ON TABLE " + tableName + " TO ROLE " + roleName; + LOGGER.info("Granting table level privilege: database = " + dbName + ", sql = " + sql); + statement.execute(sql); } else if (dbName != null) { - statement.execute("GRANT " + action + " ON DATABASE " + dbName + " TO ROLE " + roleName); + String sql = "GRANT " + action + " ON DATABASE " + dbName + " TO ROLE " + roleName; + LOGGER.info("Granting db level privilege: " + sql); + statement.execute(sql); } else if (uriPath != null) { - statement.execute("GRANT " + action + " ON URI '" + uriPath + "' TO ROLE " + roleName);//ALL? + String sql = "GRANT " + action + " ON URI '" + uriPath + "' TO ROLE " + roleName; + LOGGER.info("Granting uri level privilege: " + sql); + statement.execute(sql);//ALL? } else if (serverName != null) { - statement.execute("GRANT ALL ON SERVER " + serverName + " TO ROLE " + roleName); - ; + String sql = "GRANT ALL ON SERVER " + serverName + " TO ROLE " + roleName; + LOGGER.info("Granting server level privilege: " + sql); + statement.execute(sql); } } } @@ -375,6 +440,7 @@ private static void setupSentryService() throws Exception { properties.put(ServerConfig.SENTRY_STORE_JDBC_URL, "jdbc:derby:;databaseName=" + baseDir.getPath() + "/sentrystore_db;create=true"); + properties.put(ServerConfig.SENTRY_STORE_JDBC_PASS, "dummy"); properties.put(ServerConfig.SENTRY_STORE_GROUP_MAPPING, ServerConfig.SENTRY_STORE_LOCAL_GROUP_MAPPING); properties.put(ServerConfig.SENTRY_STORE_GROUP_MAPPING_RESOURCE, policyFileLocation.getPath()); properties.put(ServerConfig.RPC_MIN_THREADS, "3"); @@ -400,6 +466,7 @@ private static void setupSentryService() throws Exception { } startSentryService(); if (setMetastoreListener) { + LOGGER.info("setMetastoreListener is enabled"); properties.put(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS.varname, SentryMetastorePostEventListener.class.getName()); } @@ -417,48 +484,127 @@ public static SentryPolicyServiceClient getSentryClient() throws Exception { return SentryServiceClientFactory.create(sentryServer.get(0).getConf()); } + /** + * Get Sentry authorized client to communicate with sentry server, + * the client can be for a minicluster, real distributed cluster, + * sentry server can use policy file or it's a service. + * @param clientShortName: principal prefix string + * @param clientKeyTabDir: authorization key path + * @return sentry client to talk to sentry server + * @throws Exception + */ + public static SentryPolicyServiceClient getSentryClient(String clientShortName, + String clientKeyTabDir) throws Exception { + if (!useSentryService) { + LOGGER.info("Running on a minicluser env."); + return getSentryClient(); + } + + if (clientKerberos) { + if (sentryConf == null ) { + sentryConf = new Configuration(false); + } + final String SENTRY_HOST = System.getProperty("sentry.host", SERVER_HOST); + final String SERVER_KERBEROS_PRINCIPAL = "sentry/" + SENTRY_HOST + "@" + REALM; + sentryConf.set(ServerConfig.PRINCIPAL, SERVER_KERBEROS_PRINCIPAL); + sentryConf.set(ServerConfig.KEY_TAB, SERVER_KEY_TAB); + sentryConf.set(ServerConfig.ALLOW_CONNECT, "hive"); + sentryConf.set(ServerConfig.SECURITY_USE_UGI_TRANSPORT, "false"); + sentryConf.set(ClientConfig.SERVER_RPC_ADDRESS, + System.getProperty("sentry.service.server.rpc.address")); + sentryConf.set(ClientConfig.SERVER_RPC_PORT, + System.getProperty("sentry.service.server.rpc.port", "8038")); + sentryConf.set(ClientConfig.SERVER_RPC_CONN_TIMEOUT, "720000"); //millis + Subject clientSubject = getClientSubject(clientShortName, clientKeyTabDir); + client = Subject.doAs(clientSubject, + new PrivilegedExceptionAction() { + @Override + public SentryPolicyServiceClient run() throws Exception { + return SentryServiceClientFactory.create(sentryConf); + } + }); + } else { + client = getSentryClient(); + } + return client; + } + @Before public void setup() throws Exception{ + LOGGER.info("AbstractTestStaticConfiguration setup"); dfs.createBaseDir(); + if (clearDbPerTest) { + LOGGER.info("Before per test run clean up"); + clearAll(true); + } } @After - public void clearDB() throws Exception { + public void clearAfterPerTest() throws Exception { + LOGGER.info("AbstractTestStaticConfiguration clearAfterPerTest"); + if (clearDbPerTest) { + LOGGER.info("After per test run clean up"); + clearAll(true); + } + } + + protected static void clearAll(boolean clearDb) throws Exception { + LOGGER.info("About to run clearAll"); ResultSet resultSet; Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); - if (clearDbAfterPerTest) { - String[] dbs = { DB1, DB2, DB3 }; - for (String db : dbs) { - statement.execute("DROP DATABASE if exists " + db + " CASCADE"); + if (clearDb) { + LOGGER.info("About to clear all databases and default database tables"); + resultSet = execQuery(statement, "SHOW DATABASES"); + while (resultSet.next()) { + String db = resultSet.getString(1); + if (!db.equalsIgnoreCase("default")) { + try (Statement statement1 = context.createStatement(connection)) { + exec(statement1, "DROP DATABASE IF EXISTS " + db + " CASCADE"); + } catch (Exception ex) { + // For database and tables managed by other processes than Sentry + // drop them might run into exception + LOGGER.error("Exception: " + ex); + } + } } - statement.execute("USE default"); - resultSet = statement.executeQuery("SHOW tables"); + if (resultSet != null) { resultSet.close(); } + exec(statement, "USE default"); + resultSet = execQuery(statement, "SHOW TABLES"); while (resultSet.next()) { - Statement statement2 = context.createStatement(connection); - statement2.execute("DROP table " + resultSet.getString(1)); - statement2.close(); + try (Statement statement1 = context.createStatement(connection)) { + exec(statement1, "DROP TABLE IF EXISTS " + resultSet.getString(1)); + } catch (Exception ex) { + // For table managed by other processes than Sentry + // drop it might run into exception + LOGGER.error("Exception: " + ex); + } } + if (resultSet != null) { resultSet.close(); } } if(useSentryService) { - resultSet = statement.executeQuery("SHOW roles"); - List roles = new ArrayList(); + LOGGER.info("About to clear all roles"); + resultSet = execQuery(statement, "SHOW ROLES"); while (resultSet.next()) { - roles.add(resultSet.getString(1)); - } - for (String role : roles) { - statement.execute("DROP Role " + role); + try (Statement statement1 = context.createStatement(connection)) { + String role = resultSet.getString(1); + if (!role.toLowerCase().contains("admin")) { + exec(statement1, "DROP ROLE " + role); + } + } } + if (resultSet != null) { resultSet.close(); } } - statement.close(); - connection.close(); + if (statement != null) { statement.close(); } + if (connection != null) { connection.close(); } } protected static void setupAdmin() throws Exception { if(useSentryService) { + LOGGER.info("setupAdmin to create admin_role"); Connection connection = context.createConnection(ADMIN1); Statement statement = connection.createStatement(); try { @@ -474,6 +620,14 @@ protected static void setupAdmin() throws Exception { } } + protected PolicyFile setupPolicy() throws Exception { + LOGGER.info("Pre create policy file with admin group mapping"); + PolicyFile policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP); + policyFile.setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + return policyFile; + } + @AfterClass public static void tearDownTestStaticConfiguration() throws Exception { if(hiveServer != null) { @@ -507,4 +661,68 @@ public static void tearDownTestStaticConfiguration() throws Exception { public static SentrySrv getSentrySrv() { return sentryServer; } + + /** + * A convenience method to validate: + * if expected is equivalent to returned; + * Firstly check if each expected item is in the returned list; + * Secondly check if each returned item in in the expected list. + */ + protected void validateReturnedResult(List expected, List returned) { + for (String obj : expected) { + Assert.assertTrue("expected " + obj + " not found in the returned list: " + returned.toString(), + returned.contains(obj)); + } + for (String obj : returned) { + Assert.assertTrue("returned " + obj + " not found in the expected list: " + expected.toString(), + expected.contains(obj)); + } + } + + /** + * A convenient function to run a sequence of sql commands + * @param user + * @param sqls + * @throws Exception + */ + protected static void execBatch(String user, List sqls) throws Exception { + Connection conn = context.createConnection(user); + Statement stmt = context.createStatement(conn); + for (String sql : sqls) { + exec(stmt, sql); + } + if (stmt != null) { + stmt.close(); + } + if (conn != null) { + conn.close(); + } + } + + /** + * A convenient funciton to run one sql with log + * @param stmt + * @param sql + * @throws Exception + */ + protected static void exec(Statement stmt, String sql) throws Exception { + if (stmt == null) { + LOGGER.error("Statement is null"); + return; + } + LOGGER.info("Running [" + sql + "]"); + stmt.execute(sql); + } + + /** + * A convenient funciton to execute query with log then return ResultSet + * @param stmt + * @param sql + * @return ResetSet + * @throws Exception + */ + protected static ResultSet execQuery(Statement stmt, String sql) throws Exception { + LOGGER.info("Running [" + sql + "]"); + return stmt.executeQuery(sql); + } } diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/Context.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/Context.java index f600fdf8c..2e508d637 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/Context.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/Context.java @@ -36,13 +36,12 @@ import java.sql.Statement; import java.util.Set; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.security.UserGroupInformation; import org.apache.pig.ExecType; import org.apache.pig.PigServer; @@ -76,7 +75,7 @@ public class Context { private final Set statements; public Context(HiveServer hiveServer, FileSystem fileSystem, - File baseDir, File confDir, File dataDir, File policyFile) throws Exception { + File baseDir, File dataDir, File policyFile) throws Exception { this.hiveServer = hiveServer; this.fileSystem = fileSystem; this.baseDir = baseDir; @@ -192,7 +191,7 @@ public void assertSentryException(Statement statement, String query, String exce Assert.fail("Expected SQLException for '" + query + "'"); } catch (SQLException e) { verifyAuthzExceptionForState(e, AUTHZ_LINK_FAILURE_SQL_STATE); - Assert.assertTrue("Expected " + exceptionType + " : " + e.getMessage(), + assertTrue("Expected " + exceptionType + " : " + e.getMessage(), Strings.nullToEmpty(e.getMessage()).contains(exceptionType)); } } @@ -204,7 +203,7 @@ public void assertSentrySemanticException(Statement statement, String query, Str Assert.fail("Expected SQLException for '" + query + "'"); } catch (SQLException e) { verifyAuthzExceptionForState(e, AUTHZ_EXCEPTION_SQL_STATE); - Assert.assertTrue("Expected " + exceptionType + " : " + e.getMessage(), + assertTrue("Expected " + exceptionType + " : " + e.getMessage(), Strings.nullToEmpty(e.getMessage()).contains(exceptionType)); } } diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/DummySentryOnFailureHook.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/DummySentryOnFailureHook.java index 4838f76d4..99614e08c 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/DummySentryOnFailureHook.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/DummySentryOnFailureHook.java @@ -17,15 +17,12 @@ package org.apache.sentry.tests.e2e.hive; -import junit.framework.Assert; - import org.apache.hadoop.hive.ql.metadata.AuthorizationException; import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.sentry.binding.hive.SentryOnFailureHook; import org.apache.sentry.binding.hive.SentryOnFailureHookContext; import org.apache.sentry.core.model.db.Database; import org.apache.sentry.core.model.db.Table; -import org.apache.sentry.provider.db.SentryAccessDeniedException; public class DummySentryOnFailureHook implements SentryOnFailureHook { diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/PrivilegeResultSet.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/PrivilegeResultSet.java new file mode 100644 index 000000000..8818c4c2b --- /dev/null +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/PrivilegeResultSet.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.tests.e2e.hive; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; + +/** + * This class holds ResultSet after query sentry privileges + * header: contain result header information, which is a array of string + * privilegeResultSet: contain privilege results from query + */ +public class PrivilegeResultSet { + private static final Logger LOGGER = LoggerFactory + .getLogger(PrivilegeResultSet.class); + + protected int colNum = 0; + protected List header; + protected List> privilegeResultSet; + + public PrivilegeResultSet() { + header = new ArrayList(); + privilegeResultSet = new ArrayList>(); + } + + public PrivilegeResultSet(Statement stmt, String query) { + LOGGER.info("Getting result set for " + query); + this.header = new ArrayList(); + this.privilegeResultSet = new ArrayList>(); + ResultSet rs = null; + try { + rs = stmt.executeQuery(query); + ResultSetMetaData rsmd = rs.getMetaData(); + this.colNum = rsmd.getColumnCount(); + for (int i = 1; i <= this.colNum; i++) { + this.header.add(rsmd.getColumnName(i).trim()); + } + while (rs.next()) { + ArrayList row = new ArrayList(); + for (int i = 1; i <= colNum; i++) { + row.add(rs.getString(i).trim()); + } + this.privilegeResultSet.add(row); + } + } catch (Exception ex) { + LOGGER.info("Exception when executing query: " + ex); + } finally { + try { + rs.close(); + } catch (Exception ex) { + LOGGER.error("failed to close result set: " + ex.getStackTrace()); + } + } + } + + public List> getResultSet() { + return this.privilegeResultSet; + } + + public List getHeader() { + return this.header; + } + + /** + * Given a column name, validate if one of its values equals to given colVal + */ + public boolean verifyResultSetColumn(String colName, String colVal) { + for (int i = 0; i < this.colNum; i ++) { + if (this.header.get(i).equalsIgnoreCase(colName)) { + for (int j = 0; j < this.privilegeResultSet.size(); j ++) { + if (this.privilegeResultSet.get(j).get(i).equalsIgnoreCase(colVal)) { + LOGGER.info("Found " + colName + " contains a value = " + colVal); + return true; + } + } + } + } + LOGGER.error("Failed to detect " + colName + " contains a value = " + colVal); + return false; + } + + /** + * Unmarshall ResultSet into a string + */ + @Override + public String toString() { + String prettyPrintString = new String("\n"); + for (String h : this.header) { + prettyPrintString += h + ","; + } + prettyPrintString += "\n"; + for (ArrayList row : this.privilegeResultSet) { + for (String val : row) { + if (val.isEmpty()) { + val = "null"; + } + prettyPrintString += val + ","; + } + prettyPrintString += "\n"; + } + return prettyPrintString; + } +} diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestConfigTool.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestConfigTool.java index cd5a75f72..ac0a9cdd6 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestConfigTool.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestConfigTool.java @@ -33,7 +33,6 @@ import org.apache.sentry.core.common.SentryConfigurationException; import org.apache.sentry.core.common.Subject; import org.apache.sentry.provider.file.PolicyFile; -import org.junit.After; import org.junit.Before; import org.junit.Test; diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestCrossDbOps.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestCrossDbOps.java index 5d3a4f1ed..b123dcd00 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestCrossDbOps.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestCrossDbOps.java @@ -31,38 +31,46 @@ import java.util.ArrayList; import java.util.List; -import junit.framework.Assert; +import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import com.google.common.io.Resources; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /* Tests privileges at table scope with cross database access */ public class TestCrossDbOps extends AbstractTestWithStaticConfiguration { + private static final Logger LOGGER = LoggerFactory + .getLogger(TestCrossDbOps.class); + private File dataFile; private PolicyFile policyFile; private String loadData; @BeforeClass public static void setupTestStaticConfiguration() throws Exception{ + LOGGER.info("TestCrossDbOps setupTestStaticConfiguration"); policyOnHdfs = true; AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); + } @Before public void setup() throws Exception { + LOGGER.info("TestCrossDbOps setup"); + policyFile = super.setupPolicy(); + super.setup(); File dataDir = context.getDataDir(); // copy data file to test dir dataFile = new File(dataDir, SINGLE_TYPE_DATA_FILE_NAME); FileOutputStream to = new FileOutputStream(dataFile); Resources.copy(Resources.getResource(SINGLE_TYPE_DATA_FILE_NAME), to); to.close(); - policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP); loadData = "server=server1->uri=file://" + dataFile.getPath(); - } /* @@ -73,24 +81,9 @@ public void setup() throws Exception { */ @Test public void testShowDatabasesAndShowTables() throws Exception { - // edit policy file - policyFile - .addRolesToGroup(USERGROUP1, "select_tab1", "insert_tab2") - .addRolesToGroup(USERGROUP2, "select_tab3") - .addPermissionsToRole("select_tab1", "server=server1->db=" + DB1 + "->table=tab1->action=select") - .addPermissionsToRole("select_tab3", "server=server1->db=" + DB2 + "->table=tab3->action=select") - .addPermissionsToRole("insert_tab2", "server=server1->db=" + DB2 + "->table=tab2->action=insert") - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); - // admin create two databases Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); - statement.execute("DROP DATABASE IF EXISTS DB_1 CASCADE"); - statement.execute("DROP DATABASE IF EXISTS DB_2 CASCADE"); - statement.execute("DROP DATABASE IF EXISTS DB1 CASCADE"); - statement.execute("DROP DATABASE IF EXISTS DB2 CASCADE"); - statement.execute("CREATE DATABASE " + DB1); statement.execute("CREATE DATABASE " + DB2); statement.execute("USE " + DB1); @@ -100,78 +93,72 @@ public void testShowDatabasesAndShowTables() throws Exception { statement.execute("CREATE TABLE TAB2(id int)"); statement.execute("CREATE TABLE TAB3(id int)"); + // load policy file and grant role with privileges + policyFile + .addRolesToGroup(USERGROUP1, "select_tab1", "insert_tab2") + .addRolesToGroup(USERGROUP2, "select_tab3") + .addPermissionsToRole("select_tab1", "server=server1->db=" + DB1 + "->table=tab1->action=select") + .addPermissionsToRole("select_tab3", "server=server1->db=" + DB2 + "->table=tab3->action=select") + .addPermissionsToRole("insert_tab2", "server=server1->db=" + DB2 + "->table=tab2->action=insert") + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + + // show grant to validate roles and privileges + if(useSentryService) { + PrivilegeResultSet pRset = new PrivilegeResultSet(statement, "SHOW GRANT ROLE select_tab1 ON DATABASE " + DB1); + LOGGER.info("SHOW GRANT ROLE select_tab1 ON DATABASE " + DB1 + " : " + pRset.toString()); + pRset.verifyResultSetColumn("database", DB1); + pRset.verifyResultSetColumn("table", "tab1"); + + pRset = new PrivilegeResultSet(statement, "SHOW GRANT ROLE insert_tab2 ON DATABASE " + DB2); + LOGGER.info("SHOW GRANT ROLE insert_tab2 ON DATABASE " + DB2 + " : " + pRset.toString()); + pRset.verifyResultSetColumn("database", DB2); + pRset.verifyResultSetColumn("table", "tab2"); + + pRset = new PrivilegeResultSet(statement, "SHOW GRANT ROLE select_tab3 ON DATABASE " + DB2); + LOGGER.info("SHOW GRANT ROLE select_tab3 ON DATABASE " + DB2 + " : " + pRset.toString()); + pRset.verifyResultSetColumn("database", DB2); + pRset.verifyResultSetColumn("table", "tab3"); + } + // test show databases // show databases shouldn't filter any of the dbs from the resultset Connection conn = context.createConnection(USER1_1); Statement stmt = context.createStatement(conn); - ResultSet res = stmt.executeQuery("SHOW DATABASES"); - List result = new ArrayList(); - result.add(DB1); - result.add(DB2); - result.add("default"); - - while (res.next()) { - String dbName = res.getString(1); - assertTrue(dbName, result.remove(dbName)); - } - assertTrue(result.toString(), result.isEmpty()); - res.close(); + PrivilegeResultSet pRset = new PrivilegeResultSet(stmt, "SHOW DATABASES"); + LOGGER.info("found databases :" + pRset.toString()); + pRset.verifyResultSetColumn("database_name", DB1); + pRset.verifyResultSetColumn("database_name", DB2); // test show tables stmt.execute("USE " + DB1); - res = stmt.executeQuery("SHOW TABLES"); - result.clear(); - result.add("tab1"); - - while (res.next()) { - String tableName = res.getString(1); - assertTrue(tableName, result.remove(tableName)); - } - assertTrue(result.toString(), result.isEmpty()); - res.close(); + pRset = new PrivilegeResultSet(stmt, "SHOW TABLES"); + LOGGER.info("found tables :" + pRset.toString()); + pRset.verifyResultSetColumn("tab_name", "tab1"); stmt.execute("USE " + DB2); - res = stmt.executeQuery("SHOW TABLES"); - result.clear(); - result.add("tab2"); + pRset = new PrivilegeResultSet(stmt, "SHOW TABLES"); + LOGGER.info("found tables :" + pRset.toString()); + pRset.verifyResultSetColumn("tab_name", "tab2"); - while (res.next()) { - String tableName = res.getString(1); - assertTrue(tableName, result.remove(tableName)); + try { + stmt.close(); + conn.close(); + } catch (Exception ex) { + // nothing to do } - assertTrue(result.toString(), result.isEmpty()); - res.close(); - - stmt.close(); - conn.close(); // test show databases and show tables for user2_1 conn = context.createConnection(USER2_1); stmt = context.createStatement(conn); - res = stmt.executeQuery("SHOW DATABASES"); - result.clear(); - result.add(DB2); - result.add("default"); - while (res.next()) { - String dbName = res.getString(1); - assertTrue(dbName, result.remove(dbName)); - } - assertTrue(result.toString(), result.isEmpty()); - res.close(); + pRset = new PrivilegeResultSet(stmt, "SHOW DATABASES"); + pRset.verifyResultSetColumn("database_name", DB2); // test show tables stmt.execute("USE " + DB2); - res = stmt.executeQuery("SHOW TABLES"); - result.clear(); - result.add("tab3"); - - while (res.next()) { - String tableName = res.getString(1); - assertTrue(tableName, result.remove(tableName)); - } - assertTrue(result.toString(), result.isEmpty()); - res.close(); + pRset = new PrivilegeResultSet(stmt, "SHOW TABLES"); + pRset.verifyResultSetColumn("tab_name", "tab3"); try { stmt.execute("USE " + DB1); @@ -179,6 +166,7 @@ public void testShowDatabasesAndShowTables() throws Exception { } catch (SQLException e) { context.verifyAuthzException(e); } + context.close(); } @@ -190,24 +178,9 @@ public void testShowDatabasesAndShowTables() throws Exception { */ @Test public void testJDBCGetSchemasAndGetTables() throws Exception { - // edit policy file - policyFile.addRolesToGroup(USERGROUP1, "select_tab1", "insert_tab2") - .addRolesToGroup(USERGROUP2, "select_tab3") - .addPermissionsToRole("select_tab1", "server=server1->db=" + DB1 + "->table=tab1->action=select") - .addPermissionsToRole("select_tab3", "server=server1->db=" + DB2 + "->table=tab3->action=select") - .addPermissionsToRole("insert_tab2", "server=server1->db=" + DB2 + "->table=tab2->action=insert") - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); - - // admin create two databases Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); - statement.execute("DROP DATABASE IF EXISTS DB_1 CASCADE"); - statement.execute("DROP DATABASE IF EXISTS DB_2 CASCADE"); - statement.execute("DROP DATABASE IF EXISTS DB1 CASCADE"); - statement.execute("DROP DATABASE IF EXISTS DB2 CASCADE"); - statement.execute("CREATE DATABASE " + DB1); statement.execute("CREATE DATABASE " + DB2); statement.execute("USE " + DB1); @@ -217,72 +190,84 @@ public void testJDBCGetSchemasAndGetTables() throws Exception { statement.execute("CREATE TABLE TAB2(id int)"); statement.execute("CREATE TABLE TAB3(id int)"); + // edit policy file + policyFile.addRolesToGroup(USERGROUP1, "select_tab1", "insert_tab2") + .addRolesToGroup(USERGROUP2, "select_tab3") + .addPermissionsToRole("select_tab1", "server=server1->db=" + DB1 + "->table=tab1->action=select") + .addPermissionsToRole("select_tab3", "server=server1->db=" + DB2 + "->table=tab3->action=select") + .addPermissionsToRole("insert_tab2", "server=server1->db=" + DB2 + "->table=tab2->action=insert") + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + // test show databases // show databases shouldn't filter any of the dbs from the resultset Connection conn = context.createConnection(USER1_1); - List result = new ArrayList(); - + Statement stmt = context.createStatement(conn); // test direct JDBC metadata API - ResultSet res = conn.getMetaData().getSchemas(); + ResultSet res = stmt.executeQuery("SHOW DATABASES"); + res = conn.getMetaData().getSchemas(); ResultSetMetaData resMeta = res.getMetaData(); assertEquals(2, resMeta.getColumnCount()); assertEquals("TABLE_SCHEM", resMeta.getColumnName(1)); assertEquals("TABLE_CATALOG", resMeta.getColumnName(2)); - result.add(DB1); - result.add(DB2); - result.add("default"); + List expectedResult = new ArrayList(); + List returnedResult = new ArrayList(); + expectedResult.add(DB1); + expectedResult.add(DB2); + expectedResult.add("default"); while (res.next()) { - String dbName = res.getString(1); - assertTrue(dbName, result.remove(dbName)); + returnedResult.add(res.getString(1).trim()); } - assertTrue(result.toString(), result.isEmpty()); + validateReturnedResult(expectedResult, returnedResult); + returnedResult.clear(); + expectedResult.clear(); res.close(); // test direct JDBC metadata API res = conn.getMetaData().getTables(null, DB1, "tab%", null); - result.add("tab1"); - + expectedResult.add("tab1"); while (res.next()) { - String tableName = res.getString(3); - assertTrue(tableName, result.remove(tableName)); + returnedResult.add(res.getString(3).trim()); } - assertTrue(result.toString(), result.isEmpty()); + validateReturnedResult(expectedResult, returnedResult); + returnedResult.clear(); + expectedResult.clear(); res.close(); // test direct JDBC metadata API res = conn.getMetaData().getTables(null, DB2, "tab%", null); - result.add("tab2"); - + expectedResult.add("tab2"); while (res.next()) { - String tableName = res.getString(3); - assertTrue(tableName, result.remove(tableName)); + returnedResult.add(res.getString(3).trim()); } - assertTrue(result.toString(), result.isEmpty()); + validateReturnedResult(expectedResult, returnedResult); + returnedResult.clear(); + expectedResult.clear(); res.close(); res = conn.getMetaData().getTables(null, "DB%", "tab%", null); - result.add("tab2"); - result.add("tab1"); - + expectedResult.add("tab2"); + expectedResult.add("tab1"); while (res.next()) { - String tableName = res.getString(3); - assertTrue(tableName, result.remove(tableName)); + returnedResult.add(res.getString(3).trim()); } - assertTrue(result.toString(), result.isEmpty()); + validateReturnedResult(expectedResult, returnedResult); + returnedResult.clear(); + expectedResult.clear(); res.close(); //test show columns res = conn.getMetaData().getColumns(null, "DB%", "tab%","i%" ); - result.add("id"); - result.add("id"); + expectedResult.add("id"); while (res.next()) { - String columnName = res.getString(4); - assertTrue(columnName, result.remove(columnName)); + returnedResult.add(res.getString(4).trim()); } - assertTrue(result.toString(), result.isEmpty()); + validateReturnedResult(expectedResult, returnedResult); + returnedResult.clear(); + expectedResult.clear(); res.close(); conn.close(); @@ -297,46 +282,51 @@ public void testJDBCGetSchemasAndGetTables() throws Exception { assertEquals("TABLE_SCHEM", resMeta.getColumnName(1)); assertEquals("TABLE_CATALOG", resMeta.getColumnName(2)); - result.add(DB2); - result.add("default"); + expectedResult.add(DB2); + expectedResult.add("default"); while (res.next()) { - String dbName = res.getString(1); - assertTrue(dbName, result.remove(dbName)); + returnedResult.add(res.getString(1).trim()); } - assertTrue(result.toString(), result.isEmpty()); + validateReturnedResult(expectedResult, returnedResult); + returnedResult.clear(); + expectedResult.clear(); res.close(); // test JDBC direct API res = conn.getMetaData().getTables(null, "DB%", "tab%", null); - result.add("tab3"); + expectedResult.add("tab3"); while (res.next()) { - String tableName = res.getString(3); - assertTrue(tableName, result.remove(tableName)); + returnedResult.add(res.getString(3).trim()); } - assertTrue(result.toString(), result.isEmpty()); + validateReturnedResult(expectedResult, returnedResult); + returnedResult.clear(); + expectedResult.clear(); res.close(); + //test show columns res = conn.getMetaData().getColumns(null, "DB%", "tab%","i%" ); - result.add("id"); + expectedResult.add("id"); while (res.next()) { - String columnName = res.getString(4); - assertTrue(columnName, result.remove(columnName)); + returnedResult.add(res.getString(4).trim()); } - assertTrue(result.toString(), result.isEmpty()); + validateReturnedResult(expectedResult, returnedResult); + returnedResult.clear(); + expectedResult.clear(); res.close(); //test show columns res = conn.getMetaData().getColumns(null, DB1, "tab%","i%" ); while (res.next()) { - String columnName = res.getString(4); - assertTrue(columnName, result.remove(columnName)); + returnedResult.add(res.getString(4).trim()); } - assertTrue(result.toString(), result.isEmpty()); + validateReturnedResult(expectedResult, returnedResult); + returnedResult.clear(); + expectedResult.clear(); res.close(); context.close(); @@ -350,16 +340,16 @@ public void testJDBCGetSchemasAndGetTables() throws Exception { */ @Test public void testDbPrivileges() throws Exception { + createDb(ADMIN1, DB1, DB2); + // edit policy file policyFile.addRolesToGroup(USERGROUP1, "db1_all,db2_all, load_data") - .addPermissionsToRole("db1_all", "server=server1->db=" + DB1) - .addPermissionsToRole("db2_all", "server=server1->db=" + DB2) - .addPermissionsToRole("load_data", "server=server1->URI=file://" + dataFile.getPath()) - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + .addPermissionsToRole("db1_all", "server=server1->db=" + DB1) + .addPermissionsToRole("db2_all", "server=server1->db=" + DB2) + .addPermissionsToRole("load_data", "server=server1->URI=file://" + dataFile.getPath()) + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); writePolicyFile(policyFile); - dropDb(ADMIN1, DB1, DB2); - createDb(ADMIN1, DB1, DB2); for (String user : new String[]{USER1_1, USER1_2}) { for (String dbName : new String[]{DB1, DB2}) { Connection userConn = context.createConnection(user); @@ -385,12 +375,12 @@ public void testDbPrivileges() throws Exception { */ @Test public void testAdminDbPrivileges() throws Exception { + createDb(ADMIN1, DB1); + policyFile - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); writePolicyFile(policyFile); - dropDb(ADMIN1, DB1); - createDb(ADMIN1, DB1); Connection adminCon = context.createConnection(ADMIN1); Statement adminStmt = context.createStatement(adminCon); String tabName = DB1 + "." + "admin_tab1"; @@ -412,21 +402,22 @@ public void testAdminDbPrivileges() throws Exception { */ @Test public void testNegativeUserPrivileges() throws Exception { - // edit policy file - policyFile.addRolesToGroup(USERGROUP1, "db1_tab1_insert", "db1_tab2_all") - .addPermissionsToRole("db1_tab2_all", "server=server1->db=" + DB1 + "->table=table_2") - .addPermissionsToRole("db1_tab1_insert", "server=server1->db=" + DB1 + "->table=table_1->action=insert") - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); - Connection adminCon = context.createConnection(ADMIN1); Statement adminStmt = context.createStatement(adminCon); adminStmt.execute("use default"); - adminStmt.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE"); adminStmt.execute("CREATE DATABASE " + DB1); adminStmt.execute("create table " + DB1 + ".table_1 (id int)"); + adminStmt.execute("create table " + DB1 + ".table_2 (id int)"); adminStmt.close(); adminCon.close(); + + // edit policy file + policyFile.addRolesToGroup(USERGROUP1, "db1_tab1_insert", "db1_tab2_all") + .addPermissionsToRole("db1_tab2_all", "server=server1->db=" + DB1 + "->table=table_2") + .addPermissionsToRole("db1_tab1_insert", "server=server1->db=" + DB1 + "->table=table_1->action=insert") + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + Connection userConn = context.createConnection(USER1_1); Statement userStmt = context.createStatement(userConn); context.assertAuthzException(userStmt, "select * from " + DB1 + ".table_1"); @@ -442,13 +433,6 @@ public void testNegativeUserPrivileges() throws Exception { */ @Test public void testNegativeUserDMLPrivileges() throws Exception { - policyFile - .addPermissionsToRole("db1_tab2_all", "server=server1->db=" + DB1 + "->table=table_2") - .addRolesToGroup(USERGROUP1, "db1_tab2_all") - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); - - dropDb(ADMIN1, DB1); createDb(ADMIN1, DB1); Connection adminCon = context.createConnection(ADMIN1); Statement adminStmt = context.createStatement(adminCon); @@ -456,6 +440,13 @@ public void testNegativeUserDMLPrivileges() throws Exception { adminStmt.execute("create table " + DB1 + ".table_2 (id int)"); adminStmt.close(); adminCon.close(); + + policyFile + .addPermissionsToRole("db1_tab2_all", "server=server1->db=" + DB1 + "->table=table_2") + .addRolesToGroup(USERGROUP1, "db1_tab2_all") + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + Connection userConn = context.createConnection(USER1_1); Statement userStmt = context.createStatement(userConn); context.assertAuthzException(userStmt, "insert overwrite table " + DB1 @@ -485,15 +476,6 @@ public void testNegativeUserDMLPrivileges() throws Exception { */ @Test public void testNegUserPrivilegesAll() throws Exception { - - policyFile - .addRolesToGroup(USERGROUP1, "db1_all") - .addRolesToGroup(USERGROUP2, "db1_tab1_select") - .addPermissionsToRole("db1_all", "server=server1->db=" + DB1) - .addPermissionsToRole("db1_tab1_select", "server=server1->db=" + DB1 + "->table=table_1->action=select") - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); - // create dbs Connection adminCon = context.createConnection(ADMIN1); Statement adminStmt = context.createStatement(adminCon); @@ -503,7 +485,6 @@ public void testNegUserPrivilegesAll() throws Exception { adminStmt .execute("load data local inpath '" + dataFile.getPath() + "' into table table_def"); - adminStmt.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE"); adminStmt.execute("CREATE DATABASE " + DB1); adminStmt.execute("use " + DB1); @@ -521,6 +502,14 @@ public void testNegUserPrivilegesAll() throws Exception { adminStmt.close(); adminCon.close(); + policyFile + .addRolesToGroup(USERGROUP1, "db1_all") + .addRolesToGroup(USERGROUP2, "db1_tab1_select") + .addPermissionsToRole("db1_all", "server=server1->db=" + DB1) + .addPermissionsToRole("db1_tab1_select", "server=server1->db=" + DB1 + "->table=table_1->action=select") + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + Connection userConn = context.createConnection(USER2_1); Statement userStmt = context.createStatement(userConn); @@ -564,15 +553,14 @@ public void testNegUserPrivilegesAll() throws Exception { */ @Test public void testSandboxOpt9() throws Exception { + createDb(ADMIN1, DB1, DB2); + policyFile - .addPermissionsToRole(GROUP1_ROLE, ALL_DB1, ALL_DB2, loadData) - .addRolesToGroup(USERGROUP1, GROUP1_ROLE) - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + .addPermissionsToRole(GROUP1_ROLE, ALL_DB1, ALL_DB2, loadData) + .addRolesToGroup(USERGROUP1, GROUP1_ROLE) + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); writePolicyFile(policyFile); - dropDb(ADMIN1, DB1, DB2); - createDb(ADMIN1, DB1, DB2); - Connection connection = context.createConnection(USER1_1); Statement statement = context.createStatement(connection); @@ -614,8 +602,6 @@ public void testSandboxOpt9() throws Exception { context.assertAuthzException(statement, "CREATE TABLE " + DB1 + "." + TBL2 + " AS SELECT value from " + DB2 + "." + TBL2 + " LIMIT 10"); - - statement.close(); connection.close(); } @@ -633,18 +619,7 @@ public void testSandboxOpt9() throws Exception { */ @Test public void testCrossDbViewOperations() throws Exception { - // edit policy file - policyFile - .addRolesToGroup(USERGROUP1, "all_db1", "load_data", "select_tb2") - .addPermissionsToRole("all_db1", "server=server1->db=" + DB1) - .addPermissionsToRole("all_db2", "server=server1->db=" + DB2) - .addPermissionsToRole("select_tb2", "server=server1->db=" + DB2 + "->table=tb_1->action=select") - .addPermissionsToRole("load_data", "server=server1->URI=file://" + dataFile.getPath()) - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); - // admin create two databases - dropDb(ADMIN1, DB1, DB2); createDb(ADMIN1, DB1, DB2); Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); @@ -656,6 +631,16 @@ public void testCrossDbViewOperations() throws Exception { .execute("CREATE TABLE " + DB2 + "." + TBL2 + "(id int)"); context.close(); + // edit policy file + policyFile + .addRolesToGroup(USERGROUP1, "all_db1", "load_data", "select_tb2") + .addPermissionsToRole("all_db1", "server=server1->db=" + DB1) + .addPermissionsToRole("all_db2", "server=server1->db=" + DB2) + .addPermissionsToRole("select_tb2", "server=server1->db=" + DB2 + "->table=tb_1->action=select") + .addPermissionsToRole("load_data", "server=server1->URI=file://" + dataFile.getPath()) + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + connection = context.createConnection(USER1_1); statement = context.createStatement(connection); diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestCustomSerdePrivileges.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestCustomSerdePrivileges.java new file mode 100644 index 000000000..27238154b --- /dev/null +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestCustomSerdePrivileges.java @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.tests.e2e.hive; + +import com.google.common.collect.Maps; +import org.apache.sentry.binding.hive.conf.HiveAuthzConf; +import org.apache.sentry.provider.file.PolicyFile; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.security.CodeSource; +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Map; + +public class TestCustomSerdePrivileges extends AbstractTestWithHiveServer { + private static Context context; + private static Map properties; + private PolicyFile policyFile; + + @BeforeClass + public static void setUp() throws Exception { + properties = Maps.newHashMap(); + + // Start the Hive Server without buildin Serde, such as + // "org.apache.hadoop.hive.serde2.OpenCSVSerde". Instead, + // used a bogus class name for testing. + properties.put(HiveAuthzConf.HIVE_SENTRY_SERDE_WHITELIST, "org.example.com"); + properties.put(HiveAuthzConf.HIVE_SENTRY_SERDE_URI_PRIVILIEGES_ENABLED, "true"); + context = createContext(properties); + } + + @AfterClass + public static void tearDown() throws Exception { + if(context != null) { + context.close(); + } + } + + @Before + public void setupPolicyFile() throws Exception { + policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP); + } + + /** + * User with db level access and Uri privileges on the Serde Jar should be able + * to create tables with Serde. + * User with db level access but without Uri privileges on the Serde Jar will fail + * on creating tables with Serde. + */ + @Test + public void testSerdePrivilegesWithoutBuildinJar() throws Exception { + String db = "db1"; + String tableName1 = "tab1"; + + String serdeClassName = "org.apache.hadoop.hive.serde2.OpenCSVSerde"; + CodeSource serdeSrc = Class.forName(serdeClassName).getProtectionDomain().getCodeSource(); + String serdeLocation = serdeSrc.getLocation().getPath(); + + policyFile + .addRolesToGroup(USERGROUP1, "db1_all") + .addRolesToGroup(USERGROUP2, "db1_all", "SERDE_JAR") + .addPermissionsToRole("db1_all", "server=server1->db=" + db) + .addPermissionsToRole("db1_tab1", "server=server1->db=" + db + "->table=" + tableName1) + .addPermissionsToRole("SERDE_JAR", "server=server1->uri=file://" + serdeLocation) + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + policyFile.write(context.getPolicyFile()); + + Connection connection = context.createConnection(ADMIN1); + Statement statement = context.createStatement(connection); + statement.execute("DROP DATABASE IF EXISTS " + db + " CASCADE"); + statement.execute("CREATE DATABASE " + db); + context.close(); + + // User1 does not have the URI privileges to use the Serde Jar. + // The table creation will fail. + connection = context.createConnection(USER1_1); + statement = context.createStatement(connection); + statement.execute("USE " + db); + try { + statement.execute("create table " + db + "." + tableName1 + " (a string, b string) " + + "ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.OpenCSVSerde' " + " STORED AS TEXTFILE"); + Assert.fail("Expect create table with Serde to fail"); + } catch (SQLException e) { + context.verifyAuthzException(e); + } + context.close(); + + // User2 has the URI privileges to use the Serde Jar. + // The table creation will succeed. + connection = context.createConnection(USER2_1); + statement = context.createStatement(connection); + statement.execute("USE " + db); + statement.execute("create table " + db + "." + tableName1 + " (a string, b string) ROW FORMAT" + + " SERDE 'org.apache.hadoop.hive.serde2.OpenCSVSerde' " + " STORED AS TEXTFILE"); + context.close(); + } +} diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestExportImportPrivileges.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestExportImportPrivileges.java index b9e4da9e0..5242bb12c 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestExportImportPrivileges.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestExportImportPrivileges.java @@ -26,23 +26,34 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.junit.Before; +import org.junit.BeforeClass; import org.junit.Test; import com.google.common.io.Resources; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestExportImportPrivileges extends AbstractTestWithStaticConfiguration { + private static final Logger LOGGER = LoggerFactory. + getLogger(TestExportImportPrivileges.class); private File dataFile; private PolicyFile policyFile; + @BeforeClass + public static void setupTestStaticConfiguration () throws Exception { + LOGGER.info("TestExportImportPrivileges setupTestStaticConfiguration"); + AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); + } + @Before public void setup() throws Exception { + LOGGER.info("TestExportImportPrivileges setup"); + policyFile = super.setupPolicy(); + super.setup(); dataFile = new File(dataDir, SINGLE_TYPE_DATA_FILE_NAME); FileOutputStream to = new FileOutputStream(dataFile); Resources.copy(Resources.getResource(SINGLE_TYPE_DATA_FILE_NAME), to); to.close(); - policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP); - policyFile.setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); } @Test @@ -51,18 +62,17 @@ public void testInsertToDirPrivileges() throws Exception { Statement statement = null; String dumpDir = dfs.getBaseDir() + "/hive_data_dump"; - policyFile - .addRolesToGroup(USERGROUP1, "db1_read", "db1_write", "data_dump") - .addRolesToGroup(USERGROUP2, "db1_read", "db1_write") - .addPermissionsToRole("db1_write", "server=server1->db=" + DB1 + "->table=" + TBL1 + "->action=INSERT") - .addPermissionsToRole("db1_read", "server=server1->db=" + DB1 + "->table=" + TBL1 + "->action=SELECT") - .addPermissionsToRole("data_dump", "server=server1->URI=" + dumpDir); - writePolicyFile(policyFile); - - dropDb(ADMIN1, DB1); createDb(ADMIN1, DB1); createTable(ADMIN1, DB1, dataFile, TBL1); + policyFile + .addRolesToGroup(USERGROUP1, "db1_read", "db1_write", "data_dump") + .addRolesToGroup(USERGROUP2, "db1_read", "db1_write") + .addPermissionsToRole("db1_write", "server=server1->db=" + DB1 + "->table=" + TBL1 + "->action=INSERT") + .addPermissionsToRole("db1_read", "server=server1->db=" + DB1 + "->table=" + TBL1 + "->action=SELECT") + .addPermissionsToRole("data_dump", "server=server1->URI=" + dumpDir); + writePolicyFile(policyFile); + // Negative test, user2 doesn't have access to write to dir connection = context.createConnection(USER2_1); statement = context.createStatement(connection); @@ -94,7 +104,6 @@ public void testExportImportPrivileges() throws Exception { Connection connection = null; Statement statement = null; String exportDir = dfs.getBaseDir() + "/hive_export1"; - dropDb(ADMIN1, DB1); createDb(ADMIN1, DB1); createTable(ADMIN1, DB1, dataFile, TBL1); diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestJDBCInterface.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestJDBCInterface.java new file mode 100644 index 000000000..bc5c08be4 --- /dev/null +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestJDBCInterface.java @@ -0,0 +1,228 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.tests.e2e.hive; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; + +import org.apache.sentry.provider.file.PolicyFile; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TestJDBCInterface extends AbstractTestWithStaticConfiguration { + private static final Logger LOGGER = LoggerFactory. + getLogger(TestJDBCInterface.class); + private static PolicyFile policyFile; + + @BeforeClass + public static void setupTestStaticConfiguration() throws Exception { + LOGGER.info("TestJDBCInterface setupTestStaticConfiguration"); + policyOnHdfs = true; + AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); + } + + @Before + public void setup() throws Exception { + LOGGER.info("TestJDBCInterface setup"); + policyFile = super.setupPolicy(); + super.setup(); + } + + /* + * Admin creates DB_1, DB2, tables (tab_1 ) and (tab_2, tab_3) in DB_1 and + * DB_2 respectively. User user1 has select on DB_1.tab_1, insert on + * DB2.tab_2 User user2 has select on DB2.tab_3 Test show database and show + * tables for both user1 and user2 + */ + @Test + public void testJDBCGetSchemasAndGetTables() throws Exception { + // admin create two databases + Connection connection = context.createConnection(ADMIN1); + Statement statement = context.createStatement(connection); + statement.execute("DROP DATABASE IF EXISTS DB_1 CASCADE"); + statement.execute("DROP DATABASE IF EXISTS DB_2 CASCADE"); + statement.execute("DROP DATABASE IF EXISTS DB1 CASCADE"); + statement.execute("DROP DATABASE IF EXISTS DB2 CASCADE"); + + statement.execute("CREATE DATABASE " + DB1); + statement.execute("CREATE DATABASE " + DB2); + statement.execute("USE " + DB1); + statement.execute("CREATE TABLE TAB1(id int)"); + statement.executeQuery("SHOW TABLES"); + statement.execute("USE " + DB2); + statement.execute("CREATE TABLE TAB2(id int)"); + statement.execute("CREATE TABLE TAB3(id int)"); + + // edit policy file + policyFile + .addRolesToGroup(USERGROUP1, "select_tab1", "insert_tab2") + .addRolesToGroup(USERGROUP2, "select_tab3") + .addPermissionsToRole("select_tab1", + "server=server1->db=" + DB1 + "->table=tab1->action=select") + .addPermissionsToRole("select_tab3", + "server=server1->db=" + DB2 + "->table=tab3->action=select") + .addPermissionsToRole("insert_tab2", + "server=server1->db=" + DB2 + "->table=tab2->action=insert"); + writePolicyFile(policyFile); + + // test show databases + // show databases shouldn't filter any of the dbs from the resultset + Connection conn = context.createConnection(USER1_1); + List expectedResult = new ArrayList(); + List returnedResult = new ArrayList(); + + // test direct JDBC metadata API + ResultSet res = conn.getMetaData().getSchemas(); + ResultSetMetaData resMeta = res.getMetaData(); + assertEquals(2, resMeta.getColumnCount()); + assertEquals("TABLE_SCHEM", resMeta.getColumnName(1)); + assertEquals("TABLE_CATALOG", resMeta.getColumnName(2)); + + expectedResult.add(DB1); + expectedResult.add(DB2); + expectedResult.add("default"); + + while (res.next()) { + returnedResult.add(res.getString(1)); + } + validateReturnedResult(expectedResult, returnedResult); + expectedResult.clear(); + returnedResult.clear(); + res.close(); + + // test direct JDBC metadata API + res = conn.getMetaData().getTables(null, DB1, "tab%", null); + expectedResult.add("tab1"); + + while (res.next()) { + returnedResult.add(res.getString(3)); + } + validateReturnedResult(expectedResult, returnedResult); + expectedResult.clear(); + returnedResult.clear(); + res.close(); + + // test direct JDBC metadata API + res = conn.getMetaData().getTables(null, DB2, "tab%", null); + expectedResult.add("tab2"); + + while (res.next()) { + returnedResult.add(res.getString(3)); + } + validateReturnedResult(expectedResult, returnedResult); + expectedResult.clear(); + returnedResult.clear(); + res.close(); + + res = conn.getMetaData().getTables(null, "DB%", "tab%", null); + expectedResult.add("tab2"); + expectedResult.add("tab1"); + + while (res.next()) { + returnedResult.add(res.getString(3)); + } + validateReturnedResult(expectedResult, returnedResult); + expectedResult.clear(); + returnedResult.clear(); + res.close(); + + // test show columns + res = conn.getMetaData().getColumns(null, "DB%", "tab%", "i%"); + expectedResult.add("id"); + expectedResult.add("id"); + + while (res.next()) { + returnedResult.add(res.getString(4)); + } + validateReturnedResult(expectedResult, returnedResult); + expectedResult.clear(); + returnedResult.clear(); + res.close(); + + conn.close(); + + // test show databases and show tables for user2 + conn = context.createConnection(USER2_1); + + // test direct JDBC metadata API + res = conn.getMetaData().getSchemas(); + resMeta = res.getMetaData(); + assertEquals(2, resMeta.getColumnCount()); + assertEquals("TABLE_SCHEM", resMeta.getColumnName(1)); + assertEquals("TABLE_CATALOG", resMeta.getColumnName(2)); + + expectedResult.add(DB2); + expectedResult.add("default"); + + while (res.next()) { + returnedResult.add(res.getString(1)); + } + validateReturnedResult(expectedResult, returnedResult); + expectedResult.clear(); + returnedResult.clear(); + res.close(); + + // test JDBC direct API + res = conn.getMetaData().getTables(null, "DB%", "tab%", null); + expectedResult.add("tab3"); + + while (res.next()) { + returnedResult.add(res.getString(3)); + } + validateReturnedResult(expectedResult, returnedResult); + expectedResult.clear(); + returnedResult.clear(); + res.close(); + + // test show columns + res = conn.getMetaData().getColumns(null, "DB%", "tab%", "i%"); + expectedResult.add("id"); + + while (res.next()) { + returnedResult.add(res.getString(4)); + } + validateReturnedResult(expectedResult, returnedResult); + expectedResult.clear(); + returnedResult.clear(); + res.close(); + + // test show columns + res = conn.getMetaData().getColumns(null, DB1, "tab%", "i%"); + + while (res.next()) { + returnedResult.add(res.getString(4)); + } + assertTrue("returned result shouldn't contain any value, actually returned result = " + returnedResult.toString(), + returnedResult.isEmpty()); + res.close(); + + context.close(); + } + +} diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestMetadataObjectRetrieval.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestMetadataObjectRetrieval.java index fbfb0312d..fb0ef19a5 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestMetadataObjectRetrieval.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestMetadataObjectRetrieval.java @@ -28,17 +28,30 @@ import java.sql.Statement; import org.junit.Before; +import org.junit.BeforeClass; import org.junit.Test; import com.google.common.io.Resources; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestMetadataObjectRetrieval extends AbstractTestWithStaticConfiguration { + private static final Logger LOGGER = LoggerFactory + .getLogger(TestMetadataObjectRetrieval.class); private PolicyFile policyFile; private File dataFile; + @BeforeClass + public static void setupTestStaticConfiguration () throws Exception { + LOGGER.info("TestMetadataObjectRetrieval setupTestStaticConfiguration"); + AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); + } + @Before public void setup() throws Exception { - policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP); + LOGGER.info("TestMetadataObjectRetrieval setup"); + policyFile = super.setupPolicy(); + super.setup(); dataFile = new File(dataDir, SINGLE_TYPE_DATA_FILE_NAME); FileOutputStream to = new FileOutputStream(dataFile); Resources.copy(Resources.getResource(SINGLE_TYPE_DATA_FILE_NAME), to); @@ -53,7 +66,7 @@ public void setup() throws Exception { * show create table table * show tblproperties table * - * The table is assumed to have two colums under_col int and value string. + * The table is assumed to have two columns under_col int and value string. */ private void positiveDescribeShowTests(String user, String db, String table) throws Exception { Connection connection = context.createConnection(user); @@ -78,6 +91,27 @@ private void positiveDescribeShowTests(String user, String db, String table) thr assertTrue("describe table fail", rs.getString(1).trim().equals("value")); assertTrue("describe table fail", rs.getString(2).trim().equals("string")); + rs = statement.executeQuery("DESCRIBE EXTENDED " + table); + assertTrue(rs.next()); + assertTrue(rs.getString(1), rs.getString(1).contains("under_col")); + assertTrue(rs.getString(2), rs.getString(2).contains("int")); + assertTrue(rs.next()); + assertTrue(rs.getString(1), rs.getString(1).contains("value")); + assertTrue(rs.getString(2), rs.getString(2).contains("string")); + assertTrue(rs.next()); + + rs = statement.executeQuery("DESCRIBE FORMATTED " + table); + // Skip the header + assertTrue(rs.next()); + assertTrue(rs.next()); + assertTrue(rs.next()); + assertTrue(rs.getString(1), rs.getString(1).contains("under_col")); + assertTrue(rs.getString(2), rs.getString(2).contains("int")); + assertTrue(rs.next()); + assertTrue(rs.getString(1), rs.getString(1).contains("value")); + assertTrue(rs.getString(2), rs.getString(2).contains("string")); + assertTrue(rs.next()); + rs = statement.executeQuery("SHOW COLUMNS FROM " + table); assertTrue(rs.next()); assertTrue("show columns from fail", rs.getString(1).trim().equals("under_col")); @@ -107,9 +141,10 @@ private void negativeDescribeShowTests(String user, String db, String table) thr Connection connection = context.createConnection(user); Statement statement = context.createStatement(connection); statement.execute("USE " + db); - context.assertAuthzException(statement, "DESCRIBE " + table); context.assertAuthzException(statement, "DESCRIBE " + table + " under_col"); context.assertAuthzException(statement, "DESCRIBE " + table + " value"); + context.assertAuthzException(statement, "DESCRIBE FORMATTED " + table); + context.assertAuthzException(statement, "DESCRIBE EXTENDED " + table); context.assertAuthzException(statement, "SHOW COLUMNS FROM " + table); context.assertAuthzException(statement, "SHOW CREATE TABLE " + table); context.assertAuthzException(statement, "SHOW TBLPROPERTIES " + table); @@ -142,15 +177,17 @@ private void negativeDescribeShowTests(String user, String db, String table) thr @Test public void testAllOnServerSelectInsertNegativeNoneAllOnDifferentTable() throws Exception { - policyFile - .addPermissionsToRole(GROUP1_ROLE, "server=server1->db=" + DB1 + "->table=" + TBL2) - .addRolesToGroup(USERGROUP1, GROUP1_ROLE) - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); - dropDb(ADMIN1, DB1); createDb(ADMIN1, DB1); createTable(ADMIN1, DB1, dataFile, TBL1); + createTable(ADMIN1, DB1, dataFile, TBL2); positiveDescribeShowTests(ADMIN1, DB1, TBL1); + + policyFile + .addPermissionsToRole(GROUP1_ROLE, "server=server1->db=" + DB1 + "->table=" + TBL2) + .addRolesToGroup(USERGROUP1, GROUP1_ROLE) + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + negativeDescribeShowTests(USER1_1, DB1, TBL1); policyFile.addPermissionsToRole(GROUP1_ROLE, SELECT_DB1_TBL1); @@ -159,7 +196,7 @@ public void testAllOnServerSelectInsertNegativeNoneAllOnDifferentTable() policyFile.removePermissionsFromRole(GROUP1_ROLE, SELECT_DB1_TBL1); policyFile - .addPermissionsToRole(GROUP1_ROLE, INSERT_DB1_TBL1); + .addPermissionsToRole(GROUP1_ROLE, INSERT_DB1_TBL1); writePolicyFile(policyFile); positiveDescribeShowTests(USER1_1, DB1, TBL1); } @@ -181,16 +218,16 @@ public void testAllOnServerSelectInsertNegativeNoneAllOnDifferentTable() */ @Test public void testAllOnServerAndAllOnDb() throws Exception { - policyFile - .addPermissionsToRole(GROUP1_ROLE, "server=server1->db=" + DB1) - .addRolesToGroup(USERGROUP1, GROUP1_ROLE) - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); - - dropDb(ADMIN1, DB1); createDb(ADMIN1, DB1); createTable(ADMIN1, DB1, dataFile, TBL1); positiveDescribeShowTests(ADMIN1, DB1, TBL1); + + policyFile + .addPermissionsToRole(GROUP1_ROLE, "server=server1->db=" + DB1) + .addRolesToGroup(USERGROUP1, GROUP1_ROLE) + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + positiveDescribeShowTests(USER1_1, DB1, TBL1); } @@ -212,12 +249,6 @@ public void testAllOnServerAndAllOnDb() throws Exception { */ @Test public void testAllOnServerNegativeAllOnView() throws Exception { - policyFile - .addPermissionsToRole(GROUP1_ROLE, "server=server1->db=" + DB1 + "->table=" + VIEW1) - .addRolesToGroup(USERGROUP1, GROUP1_ROLE) - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); - dropDb(ADMIN1, DB1); createDb(ADMIN1, DB1); createTable(ADMIN1, DB1, dataFile, TBL1); Connection connection = context.createConnection(ADMIN1); @@ -228,6 +259,13 @@ public void testAllOnServerNegativeAllOnView() throws Exception { positiveDescribeShowTests(ADMIN1, DB1, TBL1); statement.close(); connection.close(); + + policyFile + .addPermissionsToRole(GROUP1_ROLE, "server=server1->db=" + DB1 + "->table=" + VIEW1) + .addRolesToGroup(USERGROUP1, GROUP1_ROLE) + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + negativeDescribeShowTests(USER1_1, DB1, TBL1); } @@ -248,15 +286,16 @@ public void testAllOnServerNegativeAllOnView() throws Exception { */ @Test public void testAllOnServerAndAllOnTable() throws Exception { - policyFile - .addPermissionsToRole(GROUP1_ROLE, "server=server1->db=" + DB1 + "->table=" + TBL1) - .addRolesToGroup(USERGROUP1, GROUP1_ROLE) - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); - dropDb(ADMIN1, DB1); createDb(ADMIN1, DB1); createTable(ADMIN1, DB1, dataFile, TBL1); positiveDescribeShowTests(ADMIN1, DB1, TBL1); + + policyFile + .addPermissionsToRole(GROUP1_ROLE, "server=server1->db=" + DB1 + "->table=" + TBL1) + .addRolesToGroup(USERGROUP1, GROUP1_ROLE) + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + positiveDescribeShowTests(USER1_1, DB1, TBL1); } @@ -269,14 +308,15 @@ public void testAllOnServerAndAllOnTable() throws Exception { @Test public void testDescribeDatabasesWithAllOnServerAndAllOnDb() throws Exception { + dropDb(ADMIN1, DB1, DB2); + createDb(ADMIN1, DB1, DB2); + createTable(ADMIN1, DB1, dataFile, TBL1); + createTable(ADMIN1, DB2, dataFile, TBL1); policyFile .addPermissionsToRole(GROUP1_ROLE, "server=server1->db=" + DB1) .addRolesToGroup(USERGROUP1, GROUP1_ROLE) .setUserGroupMapping(StaticUserGroup.getStaticMapping()); writePolicyFile(policyFile); - dropDb(ADMIN1, DB1, DB2); - createDb(ADMIN1, DB1, DB2); - createTable(ADMIN1, DB1, dataFile, TBL1); Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); @@ -305,14 +345,9 @@ public void testDescribeDatabasesWithAllOnServerAndAllOnDb() */ @Test public void testDescribeDefaultDatabase() throws Exception { - policyFile - .addPermissionsToRole(GROUP1_ROLE, "server=server1->db=default->table=" + TBL1 + "->action=select", - "server=server1->db=" + DB1 + "->table=" + TBL1 + "->action=select") - .addRolesToGroup(USERGROUP1, GROUP1_ROLE) - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); - dropDb(ADMIN1, DB1, DB2); createDb(ADMIN1, DB1, DB2); + createTable(ADMIN1, "default", dataFile, TBL1); + createTable(ADMIN1, DB1, dataFile, TBL1); Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); assertTrue(statement.executeQuery("DESCRIBE DATABASE default").next()); @@ -323,6 +358,13 @@ public void testDescribeDefaultDatabase() throws Exception { statement.close(); connection.close(); + policyFile + .addPermissionsToRole(GROUP1_ROLE, "server=server1->db=default->table=" + TBL1 + "->action=select", + "server=server1->db=" + DB1 + "->table=" + TBL1 + "->action=select") + .addRolesToGroup(USERGROUP1, GROUP1_ROLE) + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + connection = context.createConnection(USER1_1); statement = context.createStatement(connection); context.assertAuthzException(statement, "DESCRIBE DATABASE default"); @@ -340,14 +382,9 @@ public void testDescribeDefaultDatabase() throws Exception { */ @Test public void testShowIndexes1() throws Exception { - // grant privilege to non-existent table to allow use db1 - policyFile.addPermissionsToRole(GROUP1_ROLE, SELECT_DB1_NONTABLE) - .addRolesToGroup(USERGROUP1, GROUP1_ROLE) - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); - dropDb(ADMIN1, DB1); createDb(ADMIN1, DB1); createTable(ADMIN1, DB1, dataFile, TBL1); + createTable(ADMIN1, DB1, dataFile, TBL2); Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); statement.execute("USE " + DB1); @@ -362,6 +399,13 @@ public void testShowIndexes1() throws Exception { statement.execute("CREATE VIEW " + VIEW1 + " (value) AS SELECT value from " + TBL1 + " LIMIT 10"); statement.close(); connection.close(); + + // grant privilege to table2 to allow use db1 + policyFile.addPermissionsToRole(GROUP1_ROLE, SELECT_DB1_TBL2) + .addRolesToGroup(USERGROUP1, GROUP1_ROLE) + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + connection = context.createConnection(USER1_1); statement = context.createStatement(connection); statement.execute("USE " + DB1); @@ -402,16 +446,13 @@ private void verifyIndex(Statement statement, String dbName, String table, Strin */ @Test public void testShowPartitions1() throws Exception { - // grant privilege to non-existent table to allow use db1 - policyFile.addPermissionsToRole(GROUP1_ROLE, SELECT_DB1_NONTABLE) - .addRolesToGroup(USERGROUP1, GROUP1_ROLE) - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); - dropDb(ADMIN1, DB1); createDb(ADMIN1, DB1); Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); statement.execute("USE " + DB1); + statement.execute("DROP TABLE IF EXISTS " + TBL2); + statement.execute("create table " + TBL2 + + " (under_col int, value string) PARTITIONED BY (dt INT)"); statement.execute("DROP TABLE IF EXISTS " + TBL1); statement.execute("create table " + TBL1 + " (under_col int, value string) PARTITIONED BY (dt INT)"); @@ -421,6 +462,13 @@ public void testShowPartitions1() throws Exception { statement.execute("CREATE VIEW " + VIEW1 + " (value) AS SELECT value from " + TBL1 + " LIMIT 10"); statement.close(); connection.close(); + + // grant privilege to table2 to allow use db1 + policyFile.addPermissionsToRole(GROUP1_ROLE, SELECT_DB1_TBL2) + .addRolesToGroup(USERGROUP1, GROUP1_ROLE) + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + connection = context.createConnection(USER1_1); statement = context.createStatement(connection); statement.execute("USE " + DB1); @@ -446,7 +494,7 @@ public void testShowPartitions1() throws Exception { } private void verifyParition(Statement statement, String table) throws Exception { - ResultSet rs = statement.executeQuery("SHOW PARTITIONS " + TBL1); + ResultSet rs = statement.executeQuery("SHOW PARTITIONS " + table); assertTrue(rs.next()); assertEquals("dt=3", rs.getString(1).trim()); } diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestMetadataPermissions.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestMetadataPermissions.java index 25d1f8c44..05420d1e7 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestMetadataPermissions.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestMetadataPermissions.java @@ -19,7 +19,7 @@ import java.sql.Connection; import java.sql.Statement; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.sentry.provider.file.PolicyFile; import org.junit.Before; @@ -32,13 +32,7 @@ public class TestMetadataPermissions extends AbstractTestWithStaticConfiguration @Before public void setup() throws Exception { policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP); - - policyFile - .addRolesToGroup(USERGROUP1, "db1_all", "db2_all") - .addRolesToGroup(USERGROUP2, "db1_all") - .addPermissionsToRole("db1_all", "server=server1->db=" + DB1) - .addPermissionsToRole("db2_all", "server=server1->db=" + DB2) - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + policyFile.setUserGroupMapping(StaticUserGroup.getStaticMapping()); writePolicyFile(policyFile); Connection adminCon = context.createConnection(ADMIN1); @@ -52,6 +46,14 @@ public void setup() throws Exception { adminStmt.execute("CREATE TABLE " + tabName + " (id int)"); } } + + policyFile + .addRolesToGroup(USERGROUP1, "db1_all", "db2_all") + .addRolesToGroup(USERGROUP2, "db1_all") + .addPermissionsToRole("db1_all", "server=server1->db=" + DB1) + .addPermissionsToRole("db2_all", "server=server1->db=" + DB2); + + writePolicyFile(policyFile); } /** diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestOperations.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestOperations.java index 2fbdfa65e..7d3db2b13 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestOperations.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestOperations.java @@ -26,6 +26,7 @@ import java.util.HashMap; import java.util.Map; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.sentry.provider.file.PolicyFile; import static org.junit.Assert.assertTrue; import org.junit.Before; @@ -33,8 +34,13 @@ import org.junit.Test; import com.google.common.io.Resources; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestOperations extends AbstractTestWithStaticConfiguration { + private static final Logger LOGGER = LoggerFactory + .getLogger(TestOperations.class); + private PolicyFile policyFile; final String tableName = "tb1"; @@ -131,6 +137,35 @@ public void testCreateOnServer() throws Exception{ } + @Test + public void testInsertInto() throws Exception{ + File dataFile; + dataFile = new File(dataDir, SINGLE_TYPE_DATA_FILE_NAME); + FileOutputStream to = new FileOutputStream(dataFile); + Resources.copy(Resources.getResource(SINGLE_TYPE_DATA_FILE_NAME), to); + to.close(); + + adminCreate(DB1, null); + policyFile + .addPermissionsToRole("all_db1", privileges.get("all_db1")) + .addPermissionsToRole("all_uri", "server=server1->uri=file://" + dataDir) + .addRolesToGroup(USERGROUP1, "all_db1", "all_uri"); + + + writePolicyFile(policyFile); + + Connection connection = context.createConnection(USER1_1); + Statement statement = context.createStatement(connection); + statement.execute("Use " + DB1); + statement.execute("create table bar (key int)"); + statement.execute("load data local inpath '" + dataFile.getPath() + "' into table bar"); + statement.execute("create table foo (key int) partitioned by (part int) stored as parquet"); + statement.execute("insert into table foo PARTITION(part=1) select key from bar"); + + statement.close(); + connection.close(); + } + /* Test all operations that require create on Database alone 1. Create table : HiveOperation.CREATETABLE */ @@ -189,13 +224,13 @@ public void testDropOnDatabase() throws Exception{ statement.close(); connection.close(); + adminCreate(DB1, null); + policyFile .addPermissionsToRole("all_db1", privileges.get("all_db1")) .addRolesToGroup(USERGROUP2, "all_db1"); writePolicyFile(policyFile); - adminCreate(DB1, null); - connection = context.createConnection(USER2_1); statement = context.createStatement(connection); statement.execute("DROP DATABASE " + DB1); @@ -259,7 +294,7 @@ public void testAlterOnDatabase() throws Exception{ */ @Test public void testDescDB() throws Exception { - adminCreate(DB1, null); + adminCreate(DB1, tableName); policyFile .addPermissionsToRole("select_db1", privileges.get("select_db1")) .addPermissionsToRole("insert_db1", privileges.get("insert_db1")) @@ -293,7 +328,7 @@ public void testDescDB() throws Exception { } private void assertSemanticException(Statement stmt, String command) throws SQLException{ - context.assertSentrySemanticException(stmt,command, semanticException); + context.assertSentrySemanticException(stmt, command, semanticException); } /* @@ -445,13 +480,6 @@ public void testInsertOnTable() throws Exception { @Test public void testAlterTable() throws Exception { adminCreate(DB1, tableName, true); - policyFile - .addPermissionsToRole("alter_db1_tb1", privileges.get("alter_db1_tb1")) - .addPermissionsToRole("alter_db1_ptab", privileges.get("alter_db1_ptab")) - .addRolesToGroup(USERGROUP1, "alter_db1_tb1", "alter_db1_ptab") - .addPermissionsToRole("insert_db1_tb1", privileges.get("insert_db1_tb1")) - .addRolesToGroup(USERGROUP2, "insert_db1_tb1"); - writePolicyFile(policyFile); Connection connection; Statement statement; @@ -461,7 +489,17 @@ public void testAlterTable() throws Exception { statement.execute("Use " + DB1); statement.execute("ALTER TABLE tb1 ADD IF NOT EXISTS PARTITION (b = '10') "); statement.execute("ALTER TABLE tb1 ADD IF NOT EXISTS PARTITION (b = '1') "); + statement.execute("DROP TABLE IF EXISTS ptab"); statement.execute("CREATE TABLE ptab (a int) STORED AS PARQUET"); + + policyFile + .addPermissionsToRole("alter_db1_tb1", privileges.get("alter_db1_tb1")) + .addPermissionsToRole("alter_db1_ptab", privileges.get("alter_db1_ptab")) + .addRolesToGroup(USERGROUP1, "alter_db1_tb1", "alter_db1_ptab") + .addPermissionsToRole("insert_db1_tb1", privileges.get("insert_db1_tb1")) + .addRolesToGroup(USERGROUP2, "insert_db1_tb1"); + writePolicyFile(policyFile); + //Negative test cases connection = context.createConnection(USER2_1); statement = context.createStatement(connection); @@ -897,6 +935,8 @@ public void testCTAS() throws Exception { adminCreate(DB1, tableName); adminCreate(DB2, null); + String location = dfs.getBaseDir() + "/" + Math.random(); + Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); statement.execute("Use " + DB1); @@ -905,19 +945,27 @@ public void testCTAS() throws Exception { connection.close(); policyFile - .addPermissionsToRole("select_db1_tb1", privileges.get("select_db1_tb1")) - .addPermissionsToRole("select_db1_view1", privileges.get("select_db1_view1")) - .addPermissionsToRole("create_db2", privileges.get("create_db2")) - .addRolesToGroup(USERGROUP1, "select_db1_tb1", "create_db2") - .addRolesToGroup(USERGROUP2, "select_db1_view1", "create_db2"); + .addPermissionsToRole("select_db1_tb1", privileges.get("select_db1_tb1")) + .addPermissionsToRole("select_db1_view1", privileges.get("select_db1_view1")) + .addPermissionsToRole("create_db2", privileges.get("create_db2")) + .addPermissionsToRole("all_uri", "server=server1->uri=" + location) + .addRolesToGroup(USERGROUP1, "select_db1_tb1", "create_db2") + .addRolesToGroup(USERGROUP2, "select_db1_view1", "create_db2") + .addRolesToGroup(USERGROUP3, "select_db1_tb1", "create_db2,all_uri"); writePolicyFile(policyFile); connection = context.createConnection(USER1_1); statement = context.createStatement(connection); statement.execute("Use " + DB2); - statement.execute("create table tb2 as select a from " + DB1 + ".tb1" ); + statement.execute("create table tb2 as select a from " + DB1 + ".tb1"); + //Ensure CTAS fails without URI + context.assertSentrySemanticException(statement, "create table tb3 location '" + location + + "' as select a from " + DB1 + ".tb1", + semanticException); context.assertSentrySemanticException(statement, "create table tb3 as select a from " + DB1 + ".view1", - semanticException); + semanticException); + + statement.close(); connection.close(); @@ -926,12 +974,24 @@ public void testCTAS() throws Exception { statement.execute("Use " + DB2); statement.execute("create table tb3 as select a from " + DB1 + ".view1" ); context.assertSentrySemanticException(statement, "create table tb4 as select a from " + DB1 + ".tb1", - semanticException); + semanticException); statement.close(); connection.close(); + + connection = context.createConnection(USER3_1); + statement = context.createStatement(connection); + //CTAS is valid with URI + statement.execute("Use " + DB2); + statement.execute("create table tb4 location '" + location + + "' as select a from " + DB1 + ".tb1"); + + statement.close(); + connection.close(); + } + /* 1. INSERT : IP: select on table, OP: insert on table + all on uri(optional) */ @@ -961,7 +1021,7 @@ public void testInsert() throws Exception { Connection connection = context.createConnection(USER1_1); Statement statement = context.createStatement(connection); - assertSemanticException(statement, "insert overwrite directory '" + location + "' select * from " + DB1 + ".tb1" ); + assertSemanticException(statement, "insert overwrite directory '" + location + "' select * from " + DB1 + ".tb1"); statement.execute("insert overwrite table " + DB2 + ".tb2 select * from " + DB1 + ".tb1"); statement.close(); connection.close(); @@ -969,7 +1029,7 @@ public void testInsert() throws Exception { connection = context.createConnection(USER2_1); statement = context.createStatement(connection); statement.execute("insert overwrite directory '" + location + "' select * from " + DB1 + ".tb1" ); - assertSemanticException(statement,"insert overwrite table " + DB2 + ".tb2 select * from " + DB1 + ".tb1"); + assertSemanticException(statement, "insert overwrite table " + DB2 + ".tb2 select * from " + DB1 + ".tb1"); statement.close(); connection.close(); } @@ -1019,4 +1079,33 @@ public void testExternalTables() throws Exception{ } + + @Test + public void testCaseSensitivity() throws Exception { + Statement statement = null; + Connection connection = null; + try { + createDb(ADMIN1, DB1); + String scratchLikeDir = context.getProperty(HiveConf.ConfVars.SCRATCHDIR.varname); + String extParentDir = dfs.assertCreateDir(scratchLikeDir + "/ABC/hhh").toUri().toString(); + String extTableDir = dfs.assertCreateDir(scratchLikeDir + "/abc/hhh").toUri().toString(); + LOGGER.info("Created extParentDir = " + extParentDir + ", extTableDir = " + extTableDir); + policyFile + .addPermissionsToRole("all_db1", privileges.get("all_db1")) + .addPermissionsToRole("all_uri", "server=server1->uri=" + extParentDir) + .addRolesToGroup(USERGROUP1, "all_db1", "all_uri"); + writePolicyFile(policyFile); + connection = context.createConnection(USER1_1); + statement = context.createStatement(connection); + assertSemanticException(statement, + "create external table " + DB1 + ".tb1(a int) location '" + extTableDir + "'"); + } finally { + if (statement != null) { + statement.close(); + } + if (connection != null) { + connection.close(); + } + } + } } diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestPerDBConfiguration.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestPerDBConfiguration.java index 30541d99c..985f96987 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestPerDBConfiguration.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestPerDBConfiguration.java @@ -50,7 +50,6 @@ public class TestPerDBConfiguration extends AbstractTestWithStaticConfiguration @BeforeClass public static void setupTestStaticConfiguration() throws Exception { AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); - prepareDBDataForTest(); } @Before @@ -64,14 +63,12 @@ public void setup() throws Exception { prefix = "file://" + context.getPolicyFile().getParent() + "/"; } - policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP); + policyFile = super.setupPolicy(); + super.setup(); + prepareDBDataForTest(); } protected static void prepareDBDataForTest() throws Exception { - clearDbAfterPerTest = false; - PolicyFile policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP).setUserGroupMapping( - StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); // copy data file to test dir dataDir = context.getDataDir(); dataFile = new File(dataDir, MULTI_TYPE_DATA_FILE_NAME); diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestPolicyImport.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestPolicyImport.java deleted file mode 100644 index 7ebc0e40c..000000000 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestPolicyImport.java +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Copyright 2014 The Apache Software Foundation. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.sentry.tests.e2e.hive; - -import static org.junit.Assert.*; - -import java.util.Arrays; -import java.util.HashSet; -import java.util.Set; -import org.apache.sentry.SentryUserException; -import org.apache.sentry.binding.hive.authz.SentryConfigTool; -import org.apache.sentry.core.model.db.AccessConstants; -import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient; -import org.apache.sentry.provider.db.service.thrift.TSentryPrivilege; -import org.apache.sentry.provider.db.service.thrift.TSentryRole; -import org.apache.sentry.provider.file.PolicyFile; -import org.apache.sentry.service.thrift.SentryServiceClientFactory; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; - -public class TestPolicyImport extends AbstractTestWithStaticConfiguration { - - private static String prefix; - private PolicyFile policyFile; - private SentryConfigTool configTool; - - @BeforeClass - public static void setupTestStaticConfiguration() throws Exception{ - useSentryService = true; - AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); - } - - @Before - public void setup() throws Exception { - policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP); - policyFile.addGroupsToUser("hive", ADMINGROUP); - policyFile.addGroupsToUser(ADMIN1, ADMINGROUP); - - configTool = new SentryConfigTool(); - String hiveServer2 = System.getProperty("sentry.e2etest.hiveServer2Type", - "InternalHiveServer2"); - String policyOnHDFS = System.getProperty( - "sentry.e2etest.hive.policyOnHDFS", "true"); - if (policyOnHDFS.trim().equalsIgnoreCase("true") - && (hiveServer2.equals("UnmanagedHiveServer2"))) { - String policyLocation = System.getProperty( - "sentry.e2etest.hive.policy.location", "/user/hive/sentry"); - prefix = "hdfs://" + policyLocation + "/"; - } else { - prefix = "file://" + context.getPolicyFile().getParent() + "/"; - } - - } - - @Test - public void testImportPolicy() throws Exception { - policyFile.addRolesToGroup("analyst", "analyst_role", "customers_select_role", "analyst_salary_role"); - policyFile.addRolesToGroup("jranalyst", "junior_analyst_role"); - policyFile.addRolesToGroup("manager", "analyst_role", "junior_analyst_role", - "customers_insert_role", "customers_select_role"); - policyFile.addRolesToGroup("customers_admin", "customers_admin_role"); - - policyFile.addPermissionsToRole("analyst_role", "server=server1->db=analyst_db", - "server=server1->db=jranalyst_db->table=*->action=select"); - policyFile.addPermissionsToRole("junior_analyst_role", "server=server1->db=jranalyst_db"); - policyFile.addPermissionsToRole("customers_admin_role", "server=server1->db=customers"); - policyFile.addPermissionsToRole("customers_insert_role", "server=server1->db=customers->table=*->action=insert"); - policyFile.addPermissionsToRole("customers_select_role", "server=server1->db=customers->table=*->action=select"); - policyFile.addPermissionsToRole("analyst_salary_role", "server=server1->db=customers->table=customer_info->column=salary->action=select"); - - policyFile.write(context.getPolicyFile()); - - configTool.setImportPolicy(true); - configTool.setPolicyFile(context.getPolicyFile().getPath()); - configTool.setupConfig(); - - configTool.importPolicy(); - - SentryPolicyServiceClient client = SentryServiceClientFactory.create(configTool.getAuthzConf()); - verifyRoles(client, "analyst", "analyst_role", "customers_select_role", "analyst_salary_role"); - verifyRoles(client, "jranalyst", "junior_analyst_role"); - verifyRoles(client, "manager", "analyst_role", "junior_analyst_role", - "customers_insert_role", "customers_select_role"); - verifyRoles(client, "customers_admin", "customers_admin_role"); - - verifyPrivileges(client, "analyst_role", - createPrivilege(AccessConstants.ALL, "analyst_db", null, null), - createPrivilege(AccessConstants.SELECT, "jranalyst_db", null, null)); - verifyPrivileges(client, "junior_analyst_role", - createPrivilege(AccessConstants.ALL, "jranalyst_db", null, null)); - verifyPrivileges(client, "customers_admin_role", - createPrivilege(AccessConstants.ALL, "customers", null, null)); - verifyPrivileges(client, "customers_insert_role", - createPrivilege(AccessConstants.INSERT, "customers", null, null)); - verifyPrivileges(client, "customers_select_role", - createPrivilege(AccessConstants.SELECT, "customers", null, null)); - verifyPrivileges(client, "analyst_salary_role", - createPrivilege(AccessConstants.SELECT, "customers", "customer_info", "salary", null)); - } - - private void verifyRoles(SentryPolicyServiceClient client, String group, String ... roles) throws SentryUserException { - Set expectedRoles = new HashSet(Arrays.asList(roles)); - Set actualRoles = new HashSet(); - - Set groupRoles = client.listRolesByGroupName("hive", group); - for (TSentryRole role : groupRoles) { - actualRoles.add(role.getRoleName()); - } - - assertEquals("Expected roles don't match.", expectedRoles, actualRoles); - } - - private void verifyPrivileges(SentryPolicyServiceClient client, String role, TSentryPrivilege ... privileges) throws SentryUserException { - Set expectedPrivileges = new HashSet(Arrays.asList(privileges)); - Set actualPrivileges = client.listAllPrivilegesByRoleName("hive", role); - for (TSentryPrivilege privilege : actualPrivileges) { - privilege.unsetCreateTime(); - } - - assertEquals("Expected privileges don't match.", expectedPrivileges, actualPrivileges); - } - - private TSentryPrivilege createPrivilege(String action, String dbName, String tableName, String uri) { - String scope = "SERVER"; - if (uri != null) { - scope = "URI"; - } else if (dbName != null) { - if (tableName != null) { - scope = "TABLE"; - } else { - scope = "DATABASE"; - } - } - - TSentryPrivilege privilege = new TSentryPrivilege(scope, "server1", action); - if (dbName != null) { - privilege.setDbName(dbName); - } - - if (tableName != null) { - privilege.setDbName(tableName); - } - - if (uri != null) { - privilege.setURI(uri); - } - - return privilege; - } - - private TSentryPrivilege createPrivilege(String action, String dbName, String tableName, String columnName, String uri) { - String scope = "SERVER"; - if (uri != null) { - scope = "URI"; - } else if (dbName != null) { - if (columnName != null) { - scope = "COLUMN"; - } else if (tableName != null) { - scope = "TABLE"; - } else { - scope = "DATABASE"; - } - } - - TSentryPrivilege privilege = new TSentryPrivilege(scope, "server1", action); - if (dbName != null) { - privilege.setDbName(dbName); - } - - if (tableName != null) { - privilege.setTableName(tableName); - } - - if (columnName != null) { - privilege.setColumnName(columnName); - } - - if (uri != null) { - privilege.setURI(uri); - } - - return privilege; - } -} diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestPolicyImportExport.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestPolicyImportExport.java new file mode 100644 index 000000000..c72aea323 --- /dev/null +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestPolicyImportExport.java @@ -0,0 +1,195 @@ +/* + * Copyright 2014 The Apache Software Foundation. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.tests.e2e.hive; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.FileOutputStream; +import java.util.Map; +import java.util.Set; + +import org.apache.sentry.binding.hive.SentryPolicyFileFormatFactory; +import org.apache.sentry.binding.hive.SentryPolicyFileFormatter; +import org.apache.sentry.binding.hive.authz.SentryConfigTool; +import org.apache.sentry.policy.common.PolicyConstants; +import org.apache.sentry.provider.common.PolicyFileConstants; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import com.google.common.io.Resources; + +public class TestPolicyImportExport extends AbstractTestWithStaticConfiguration { + + // resources/testPolicyImport.ini is used for the import test and all the following + // privileges(PRIVILIEGE1...8) are defined the same as in testPolicyImport.ini, used for verifying + // the test result. + public static String PRIVILIEGE1 = "server=server1"; + public static String PRIVILIEGE2 = "server=server1->action=select->grantoption=false"; + public static String PRIVILIEGE3 = "server=server1->db=db2->action=insert->grantoption=true"; + public static String PRIVILIEGE4 = "server=server1->db=db1->table=tbl1->action=insert"; + public static String PRIVILIEGE5 = "server=server1->db=db1->table=tbl2->column=col1->action=insert"; + public static String PRIVILIEGE6 = "server=server1->db=db1->table=tbl3->column=col1->action=*->grantoption=true"; + public static String PRIVILIEGE7 = "server=server1->db=db1->table=tbl4->column=col1->action=all->grantoption=true"; + public static String PRIVILIEGE8 = "server=server1->uri=hdfs://testserver:9999/path2->action=insert"; + + private SentryConfigTool configTool; + private Map>> policyFileMappingData; + + @BeforeClass + public static void setupTestStaticConfiguration() throws Exception{ + useSentryService = true; + // add current user to admin group to get the permission for import/export + String requestorUserName = System.getProperty("user.name", ""); + StaticUserGroup.getStaticMapping().put(requestorUserName, ADMINGROUP); + AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); + } + + @Before + public void setup() throws Exception { + configTool = new SentryConfigTool(); + configTool.setPolicyFile(context.getPolicyFile().getPath()); + configTool.setupConfig(); + importAdminPrivilege(); + } + + private void importAdminPrivilege() throws Exception { + prepareForImport("testPolicyImportAdmin.ini"); + configTool.importPolicy(); + } + + private void prepareExceptedData() { + // test data for: + // [groups] + // group1=roleImport1,roleImport2 + // group2=roleImport1,roleImport2,roleImport3 + // group3=roleImport2,roleImport3 + // [roles] + // roleImport1=privilege1,privilege2,privilege3,privilege4 + // roleImport2=privilege3,privilege4,privilege5,privilege6 + // roleImport3=privilege5,privilege6,privilege7,privilege8 + policyFileMappingData = Maps.newHashMap(); + Map> groupRolesMap = Maps.newHashMap(); + Map> rolePrivilegesMap = Maps.newHashMap(); + groupRolesMap.put("group1", Sets.newHashSet("roleimport1", "roleimport2")); + groupRolesMap.put("group2", Sets.newHashSet("roleimport1", "roleimport2", "roleimport3")); + groupRolesMap.put("group3", Sets.newHashSet("roleimport2", "roleimport3")); + // the adminrole is defined in testPolicyImportAdmin.ini + groupRolesMap.put("admin", Sets.newHashSet("adminrole")); + rolePrivilegesMap.put("roleimport1", + Sets.newHashSet(PRIVILIEGE1, PRIVILIEGE2, PRIVILIEGE3, PRIVILIEGE4)); + rolePrivilegesMap.put("roleimport2", + Sets.newHashSet(PRIVILIEGE3, PRIVILIEGE4, PRIVILIEGE5, PRIVILIEGE6)); + rolePrivilegesMap.put("roleimport3", + Sets.newHashSet(PRIVILIEGE5, PRIVILIEGE6, PRIVILIEGE7, PRIVILIEGE8)); + // the adminrole is defined in testPolicyImportAdmin.ini + rolePrivilegesMap.put("adminrole", Sets.newHashSet(PRIVILIEGE1)); + policyFileMappingData.put(PolicyFileConstants.GROUPS, groupRolesMap); + policyFileMappingData.put(PolicyFileConstants.ROLES, rolePrivilegesMap); + + } + + @Test + public void testImportExportPolicy() throws Exception { + String importFileName = "testPolicyImport.ini"; + String exportFileName = "testPolicyExport.ini"; + File importFile = new File(dataDir, importFileName); + File exportFile = new File(dataDir, exportFileName); + FileOutputStream to = new FileOutputStream(importFile); + Resources.copy(Resources.getResource(importFileName), to); + to.close(); + configTool.setImportPolicyFilePath(importFile.getAbsolutePath()); + configTool.importPolicy(); + + configTool.setExportPolicyFilePath(exportFile.getAbsolutePath()); + configTool.exportPolicy(); + + SentryPolicyFileFormatter sentryPolicyFileFormatter = SentryPolicyFileFormatFactory + .createFileFormatter(configTool.getAuthzConf()); + Map>> exportMappingData = sentryPolicyFileFormatter.parse( + exportFile.getAbsolutePath(), configTool.getAuthzConf()); + + prepareExceptedData(); + validateSentryMappingData(exportMappingData, policyFileMappingData); + } + + @Test + public void testImportExportPolicyForError() throws Exception { + prepareForImport("testPolicyImportError.ini"); + try { + configTool.importPolicy(); + fail("IllegalArgumentException should be thrown for: Invalid key value: server [server]"); + } catch (IllegalArgumentException ex) { + // ignore + } + } + + private void prepareForImport(String resorceName) throws Exception { + File importFile = new File(dataDir, resorceName); + FileOutputStream to = new FileOutputStream(importFile); + Resources.copy(Resources.getResource(resorceName), to); + to.close(); + configTool.setImportPolicyFilePath(importFile.getAbsolutePath()); + } + + // verify the mapping data + public void validateSentryMappingData(Map>> actualMappingData, + Map>> expectedMappingData) { + validateGroupRolesMap(actualMappingData.get(PolicyFileConstants.GROUPS), + expectedMappingData.get(PolicyFileConstants.GROUPS)); + validateRolePrivilegesMap(actualMappingData.get(PolicyFileConstants.ROLES), + expectedMappingData.get(PolicyFileConstants.ROLES)); + } + + // verify the mapping data for [group,role] + private void validateGroupRolesMap(Map> actualMap, + Map> expectedMap) { + assertEquals(expectedMap.keySet().size(), actualMap.keySet().size()); + for (String groupName : actualMap.keySet()) { + Set actualRoles = actualMap.get(groupName); + Set expectedRoles = expectedMap.get(groupName); + assertEquals(actualRoles.size(), expectedRoles.size()); + assertTrue(actualRoles.equals(expectedRoles)); + } + } + + // verify the mapping data for [role,privilege] + private void validateRolePrivilegesMap(Map> actualMap, + Map> expectedMap) { + assertEquals(expectedMap.keySet().size(), actualMap.keySet().size()); + for (String roleName : actualMap.keySet()) { + Set actualPrivileges = actualMap.get(roleName); + Set exceptedPrivileges = expectedMap.get(roleName); + assertEquals(exceptedPrivileges.size(), actualPrivileges.size()); + for (String actualPrivilege : actualPrivileges) { + boolean isFound = exceptedPrivileges.contains(actualPrivilege); + if (!isFound) { + String withOptionPrivilege = PolicyConstants.AUTHORIZABLE_JOINER.join(actualPrivilege, + PolicyConstants.KV_JOINER.join(PolicyFileConstants.PRIVILEGE_GRANT_OPTION_NAME, + "false")); + isFound = exceptedPrivileges.contains(withOptionPrivilege); + } + assertTrue(isFound); + } + } + } +} diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestPrivilegesAtColumnScope.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestPrivilegesAtColumnScope.java index 9eeed608a..c2fee2af1 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestPrivilegesAtColumnScope.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestPrivilegesAtColumnScope.java @@ -23,7 +23,7 @@ import java.sql.SQLException; import java.sql.Statement; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.sentry.provider.file.PolicyFile; import org.junit.Before; @@ -42,26 +42,17 @@ public class TestPrivilegesAtColumnScope extends AbstractTestWithStaticConfigura @Before public void setup() throws Exception { - if (useSentryService) { - policyFile = new PolicyFile(); - } else { - policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP); - } + policyFile = super.setupPolicy(); + super.setup(); + prepareDBDataForTest(); } @BeforeClass public static void setupTestStaticConfiguration() throws Exception { AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); - prepareDBDataForTest(); } private static void prepareDBDataForTest() throws Exception { - clearDbAfterPerTest = false; - // if use sentry service, need setup admin role first - setupAdmin(); - PolicyFile policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP).setUserGroupMapping( - StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); // copy data file to test dir File dataDir = context.getDataDir(); File dataFile = new File(dataDir, MULTI_TYPE_DATA_FILE_NAME); @@ -82,6 +73,12 @@ private static void prepareDBDataForTest() throws Exception { statement.execute("CREATE TABLE TAB_2(A STRING, B STRING)"); statement.execute("LOAD DATA LOCAL INPATH '" + dataFile.getPath() + "' INTO TABLE TAB_2"); statement.execute("CREATE VIEW VIEW_2(A,B) AS SELECT A,B FROM TAB_2"); + //create table with partitions + statement.execute("CREATE TABLE TAB_3 (A STRING, B STRING) partitioned by (C STRING)"); + statement.execute("ALTER TABLE TAB_3 ADD PARTITION (C=1)"); + statement.execute("ALTER TABLE TAB_3 ADD PARTITION (C=2)"); + statement.execute("LOAD DATA LOCAL INPATH '" + dataFile.getPath() + "' INTO TABLE TAB_3 PARTITION (C=1)"); + statement.execute("LOAD DATA LOCAL INPATH '" + dataFile.getPath() + "' INTO TABLE TAB_3 PARTITION (C=2)"); statement.close(); connection.close(); } @@ -460,4 +457,61 @@ public void testSelectColumnOnTableViewJoin() throws Exception { statement.close(); connection.close(); } + + @Test + public void testPartition() throws Exception{ + policyFile + .addRolesToGroup(USERGROUP1, "select_tab3_A", "select_tab3_C") + .addRolesToGroup(USERGROUP2, "select_tab3_A") + .addRolesToGroup(USERGROUP3, "select_tab3_C") + .addPermissionsToRole("select_tab3_A", "server=server1->db=DB_1->table=TAB_3->column=A->action=select") + .addPermissionsToRole("select_tab3_C", "server=server1->db=DB_1->table=TAB_3->column=C->action=select") + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + + // Users with privileges on partition column can access it + String [] positiveUsers = {USER1_1, USER3_1}; + for(String user:positiveUsers) { + Connection connection = context.createConnection(user); + Statement statement = context.createStatement(connection); + statement.execute("USE DB_1"); + statement.execute("SELECT C FROM TAB_3"); + statement.close(); + connection.close(); + } + + // Users with out privileges on partition column can not access it + Connection connection = context.createConnection(USER2_1); + Statement statement = context.createStatement(connection); + statement.execute("USE DB_1"); + try { + statement.execute("SELECT C FROM TAB_3"); + } catch (SQLException e) { + context.verifyAuthzException(e); + } + statement.close(); + connection.close(); + } + + @Test + public void testMultipleColsPerRole() throws Exception { + + policyFile + .addRolesToGroup(USERGROUP1, "select_tab1_AB") + .addPermissionsToRole("select_tab1_AB", "server=server1->db=DB_1->table=TAB_1->column=A->action=select") + .addPermissionsToRole("select_tab1_AB", "server=server1->db=DB_1->table=TAB_1->column=B->action=select") + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + + // test execution on user1 + Connection connection = context.createConnection(USER1_1); + Statement statement = context.createStatement(connection); + statement.execute("USE DB_1"); + + // test user can execute query count on column A on tab_1 + statement.executeQuery("SELECT A,B FROM TAB_1"); + + statement.close(); + connection.close(); + } } diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestPrivilegesAtDatabaseScope.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestPrivilegesAtDatabaseScope.java index 7c9a66dd7..b28b6f464 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestPrivilegesAtDatabaseScope.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestPrivilegesAtDatabaseScope.java @@ -30,9 +30,10 @@ import java.util.HashMap; import java.util.Map; -import junit.framework.Assert; +import org.junit.Assert; import org.junit.Before; +import org.junit.BeforeClass; import org.junit.Test; import com.google.common.io.Resources; @@ -41,36 +42,39 @@ */ public class TestPrivilegesAtDatabaseScope extends AbstractTestWithStaticConfiguration { - private PolicyFile policyFile; Map testProperties; private static final String SINGLE_TYPE_DATA_FILE_NAME = "kv1.dat"; + @BeforeClass + public static void setupTestStaticConfiguration () throws Exception { + AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); + } + @Override @Before public void setup() throws Exception { + policyFile = super.setupPolicy(); + super.setup(); testProperties = new HashMap(); - - policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP); } // SENTRY-285 test @Test public void testAllOnDb() throws Exception { - - policyFile - .addRolesToGroup(USERGROUP1, "all_db1") - .addPermissionsToRole("all_db1", "server=server1->db=" + DB1 + "->action=all") - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); - // setup db objects needed by the test Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); statement.execute("create database " + DB1); statement.execute("create table " + DB1 + ".tab1(a int)"); + policyFile + .addRolesToGroup(USERGROUP1, "all_db1") + .addPermissionsToRole("all_db1", "server=server1->db=" + DB1 + "->action=all") + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + connection = context.createConnection(USER1_1); statement = context.createStatement(connection); statement.execute("use " + DB1); @@ -97,25 +101,23 @@ public void testAllPrivilege() throws Exception { Resources.copy(Resources.getResource(SINGLE_TYPE_DATA_FILE_NAME), to); to.close(); - policyFile - .addRolesToGroup(USERGROUP1, "all_db1", "load_data") - .addRolesToGroup(USERGROUP2, "all_db2") - .addPermissionsToRole("all_db1", "server=server1->db=" + DB1) - .addPermissionsToRole("all_db2", "server=server1->db=" + DB2) - .addPermissionsToRole("load_data", "server=server1->uri=file://" + dataFile.getPath()) - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); - // setup db objects needed by the test Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); - statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE"); - statement.execute("DROP DATABASE IF EXISTS " + DB2 + " CASCADE"); statement.execute("CREATE DATABASE " + DB1); statement.execute("CREATE DATABASE " + DB2); statement.close(); connection.close(); + policyFile + .addRolesToGroup(USERGROUP1, "all_db1", "load_data") + .addRolesToGroup(USERGROUP2, "all_db2") + .addPermissionsToRole("all_db1", "server=server1->db=" + DB1) + .addPermissionsToRole("all_db2", "server=server1->db=" + DB2) + .addPermissionsToRole("load_data", "server=server1->uri=file://" + dataFile.getPath()) + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + // test execution connection = context.createConnection(USER1_1); statement = context.createStatement(connection); @@ -182,14 +184,6 @@ public void testAllPrivilege() throws Exception { statement.close(); connection.close(); - - //test cleanup - connection = context.createConnection(ADMIN1); - statement = context.createStatement(connection); - statement.execute("DROP DATABASE " + DB2 + " CASCADE"); - statement.close(); - connection.close(); - context.close(); } /* Admin creates database DB_1, creates table TAB_1, loads data into it @@ -206,21 +200,9 @@ public void testAllPrivilegeOnObjectOwnedByAdmin() throws Exception { Resources.copy(Resources.getResource(SINGLE_TYPE_DATA_FILE_NAME), to); to.close(); - policyFile - .addRolesToGroup(USERGROUP1, "all_db1", "load_data", "exttab") - .addRolesToGroup(USERGROUP2, "all_db2") - .addPermissionsToRole("all_db1", "server=server1->db=" + DB1) - .addPermissionsToRole("all_db2", "server=server1->db=" + DB2) - .addPermissionsToRole("exttab", "server=server1->uri=file://" + dataDir.getPath()) - .addPermissionsToRole("load_data", "server=server1->uri=file://" + dataFile.getPath()) - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); - // setup db objects needed by the test Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); - statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE"); - statement.execute("DROP DATABASE IF EXISTS " + DB2 + " CASCADE"); statement.execute("CREATE DATABASE " + DB1); statement.execute("CREATE DATABASE " + DB2); statement.execute("USE " + DB1); @@ -232,6 +214,16 @@ public void testAllPrivilegeOnObjectOwnedByAdmin() throws Exception { statement.close(); connection.close(); + policyFile + .addRolesToGroup(USERGROUP1, "all_db1", "load_data", "exttab") + .addRolesToGroup(USERGROUP2, "all_db2") + .addPermissionsToRole("all_db1", "server=server1->db=" + DB1) + .addPermissionsToRole("all_db2", "server=server1->db=" + DB2) + .addPermissionsToRole("exttab", "server=server1->uri=file://" + dataDir.getPath()) + .addPermissionsToRole("load_data", "server=server1->uri=file://" + dataFile.getPath()) + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + // test execution connection = context.createConnection(USER1_1); statement = context.createStatement(connection); @@ -289,15 +281,6 @@ public void testAllPrivilegeOnObjectOwnedByAdmin() throws Exception { statement.close(); connection.close(); - - //test cleanup - connection = context.createConnection(ADMIN1); - statement = context.createStatement(connection); - statement.execute("DROP DATABASE " + DB1 + " CASCADE"); - statement.execute("DROP DATABASE " + DB2 + " CASCADE"); - statement.close(); - connection.close(); - context.close(); } /** @@ -310,32 +293,27 @@ public void testAllPrivilegeOnObjectOwnedByAdmin() throws Exception { */ @Test public void testUseDbPrivilege() throws Exception { - - policyFile - .addRolesToGroup(USERGROUP1, "all_db1") - .addRolesToGroup(USERGROUP2, "select_db2") - .addRolesToGroup(USERGROUP3, "all_db3") - .addPermissionsToRole("all_db1", "server=server1->db=" + DB1) - .addPermissionsToRole("select_db2", "server=server1->db=" + DB2 + "->table=tab_2->action=select") - .addPermissionsToRole("all_db3", "server=server1->db=DB_3") - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); - - - // setup db objects needed by the test Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); - statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE"); statement.execute("CREATE DATABASE " + DB1); statement.execute("use " + DB1); statement.execute("CREATE TABLE TAB_1(A STRING)"); - statement.execute("DROP DATABASE IF EXISTS " + DB2 + " CASCADE"); statement.execute("CREATE DATABASE " + DB2); - statement.execute("use " + DB1); + statement.execute("use " + DB2); statement.execute("CREATE TABLE TAB_2(A STRING)"); context.close(); + policyFile + .addRolesToGroup(USERGROUP1, "all_db1") + .addRolesToGroup(USERGROUP2, "select_db2") + .addRolesToGroup(USERGROUP3, "all_db3") + .addPermissionsToRole("all_db1", "server=server1->db=" + DB1) + .addPermissionsToRole("select_db2", "server=server1->db=" + DB2 + "->table=tab_2->action=select") + .addPermissionsToRole("all_db3", "server=server1->db=DB_3") + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + // user1 should be able to connect db_1 connection = context.createConnection(USER1_1); statement = context.createStatement(connection); @@ -374,24 +352,28 @@ public void testUseDbPrivilege() throws Exception { */ @Test public void testDefaultDbPrivilege() throws Exception { - - policyFile - .addRolesToGroup(USERGROUP1, "all_db1") - .addRolesToGroup(USERGROUP2, "select_db2") - .addRolesToGroup(USERGROUP3, "all_default") - .addPermissionsToRole("all_db1", "server=server1->db=" + DB1) - .addPermissionsToRole("select_db2", "server=server1->db=" + DB2 + "->table=tab_2->action=select") - .addPermissionsToRole("all_default", "server=server1->db=default") - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); - - Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); statement.execute("use default"); statement.execute("create table tab1(a int)"); + statement.execute("CREATE DATABASE " + DB1); + statement.execute("use " + DB1); + statement.execute("CREATE TABLE TAB_1(A STRING)"); + statement.execute("CREATE DATABASE " + DB2); + statement.execute("use " + DB2); + statement.execute("CREATE TABLE TAB_2(A STRING)"); context.close(); + policyFile + .addRolesToGroup(USERGROUP1, "all_db1") + .addRolesToGroup(USERGROUP2, "select_db2") + .addRolesToGroup(USERGROUP3, "all_default") + .addPermissionsToRole("all_db1", "server=server1->db=" + DB1) + .addPermissionsToRole("select_db2", "server=server1->db=" + DB2 + "->table=tab_2->action=select") + .addPermissionsToRole("all_default", "server=server1->db=default") + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + connection = context.createConnection(USER1_1); statement = context.createStatement(connection); statement.execute("use default"); diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestPrivilegesAtFunctionScope.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestPrivilegesAtFunctionScope.java index 7bb199bd8..bb8d61d86 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestPrivilegesAtFunctionScope.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestPrivilegesAtFunctionScope.java @@ -18,11 +18,13 @@ package org.apache.sentry.tests.e2e.hive; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.fail; import java.io.File; import java.io.FileOutputStream; import java.security.CodeSource; import java.sql.Connection; +import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; @@ -32,7 +34,13 @@ import com.google.common.io.Resources; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + public class TestPrivilegesAtFunctionScope extends AbstractTestWithStaticConfiguration { + private static final Logger LOGGER = LoggerFactory + .getLogger(TestPrivilegesAtFunctionScope.class); + private final String SINGLE_TYPE_DATA_FILE_NAME = "kv1.dat"; private File dataDir; private File dataFile; @@ -92,10 +100,20 @@ public void testFuncPrivileges1() throws Exception { connection = context.createConnection(USER1_1); statement = context.createStatement(connection); statement.execute("USE " + DB1); - statement.execute( - "CREATE TEMPORARY FUNCTION printf_test AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFPrintf'"); - statement.execute("SELECT printf_test(value) FROM " + tableName1); - statement.execute("DROP TEMPORARY FUNCTION printf_test"); + + try { + statement.execute("CREATE TEMPORARY FUNCTION printf_test AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFPrintf'"); + LOGGER.info("Testing select from temp func printf_test"); + ResultSet res = statement.executeQuery("SELECT printf_test('%d', under_col) FROM " + tableName1); + while (res.next()) { + LOGGER.info(res.getString(1)); + } + res.close(); + statement.execute("DROP TEMPORARY FUNCTION printf_test"); + } catch (Exception ex) { + LOGGER.error("test temp func printf_test failed with reason: " + ex.getStackTrace() + " " + ex.getMessage()); + fail("fail to test temp func printf_test"); + } statement.execute( "CREATE FUNCTION printf_test_perm AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFPrintf' "); @@ -179,6 +197,14 @@ public void testFuncPrivileges1() throws Exception { public void testUdfWhiteList () throws Exception { String tableName1 = "tab1"; + Connection connection = context.createConnection(ADMIN1); + Statement statement = connection.createStatement(); + statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE"); + statement.execute("CREATE DATABASE " + DB1); + statement.execute("USE " + DB1); + statement.execute("create table " + tableName1 + + " (under_col int comment 'the under column', value string)"); + policyFile .addRolesToGroup(USERGROUP1, "db1_all", "UDF_JAR") .addRolesToGroup(USERGROUP2, "db1_tab1", "UDF_JAR") @@ -188,13 +214,6 @@ public void testUdfWhiteList () throws Exception { .addPermissionsToRole("UDF_JAR", "server=server1->uri=file://${user.home}/.m2"); writePolicyFile(policyFile); - Connection connection = context.createConnection(ADMIN1); - Statement statement = connection.createStatement(); - statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE"); - statement.execute("CREATE DATABASE " + DB1); - statement.execute("USE " + DB1); - statement.execute("create table " + tableName1 - + " (under_col int comment 'the under column', value string)"); statement.execute("LOAD DATA LOCAL INPATH '" + dataFile.getPath() + "' INTO TABLE " + DB1 + "." + tableName1); statement.execute("SELECT rand(), concat(value, '_foo') FROM " + tableName1); @@ -206,4 +225,38 @@ public void testUdfWhiteList () throws Exception { statement.close(); connection.close(); } + + /** + * User with db level access should be able to create/alter tables with buildin Serde. + */ + @Test + public void testSerdePrivileges() throws Exception { + String tableName1 = "tab1"; + String tableName2 = "tab2"; + + Connection connection = context.createConnection(ADMIN1); + Statement statement = context.createStatement(connection); + statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE"); + statement.execute("CREATE DATABASE " + DB1); + + context.close(); + + policyFile + .addRolesToGroup(USERGROUP1, "db1_all") + .addPermissionsToRole("db1_all", "server=server1->db=" + DB1); + writePolicyFile(policyFile); + + connection = context.createConnection(USER1_1); + statement = context.createStatement(connection); + statement.execute("USE " + DB1); + statement.execute("create table " + DB1 + "." + tableName1 + + " (a string, b string) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.OpenCSVSerde' " + + " STORED AS TEXTFILE"); + + statement.execute("create table " + DB1 + "." + tableName2 + " (a string, b string)"); + statement.execute("alter table " + DB1 + "." + tableName2 + + " SET SERDE 'org.apache.hadoop.hive.serde2.OpenCSVSerde'"); + + context.close(); + } } diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestPrivilegesAtTableScope.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestPrivilegesAtTableScope.java index 7abc684b6..4c1cd8e80 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestPrivilegesAtTableScope.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestPrivilegesAtTableScope.java @@ -19,6 +19,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; import java.io.File; import java.io.FileOutputStream; @@ -27,7 +28,7 @@ import java.sql.SQLException; import java.sql.Statement; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.sentry.provider.file.PolicyFile; import org.junit.Before; @@ -44,19 +45,19 @@ public class TestPrivilegesAtTableScope extends AbstractTestWithStaticConfigurat private static PolicyFile policyFile; private final static String MULTI_TYPE_DATA_FILE_NAME = "emp.dat"; + @Before + public void setup() throws Exception { + policyFile = super.setupPolicy(); + super.setup(); + prepareDBDataForTest(); + } + @BeforeClass public static void setupTestStaticConfiguration() throws Exception { AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); - prepareDBDataForTest(); } protected static void prepareDBDataForTest() throws Exception { - clearDbAfterPerTest = false; - policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP).setUserGroupMapping( - StaticUserGroup.getStaticMapping()); - // The setupAdmin is for TestDbPrivilegesAtTableScope to add role admin_role - setupAdmin(); - writePolicyFile(policyFile); // copy data file to test dir File dataDir = context.getDataDir(); File dataFile = new File(dataDir, MULTI_TYPE_DATA_FILE_NAME); @@ -72,37 +73,30 @@ protected static void prepareDBDataForTest() throws Exception { statement.execute("CREATE DATABASE DB_1"); statement.execute("USE DB_1"); - statement.execute("CREATE TABLE TAB_1(B INT, A STRING) " + statement.execute("CREATE TABLE " + TBL1 + "(B INT, A STRING) " + " row format delimited fields terminated by '|' stored as textfile"); - statement.execute("LOAD DATA LOCAL INPATH '" + dataFile.getPath() + "' INTO TABLE TAB_1"); - statement.execute("CREATE TABLE TAB_2(B INT, A STRING) " + statement.execute("LOAD DATA LOCAL INPATH '" + dataFile.getPath() + "' INTO TABLE " + TBL1); + statement.execute("CREATE TABLE " + TBL2 + "(B INT, A STRING) " + " row format delimited fields terminated by '|' stored as textfile"); - statement.execute("LOAD DATA LOCAL INPATH '" + dataFile.getPath() + "' INTO TABLE TAB_2"); - statement.execute("CREATE VIEW VIEW_1 AS SELECT A, B FROM TAB_1"); + statement.execute("LOAD DATA LOCAL INPATH '" + dataFile.getPath() + "' INTO TABLE " + TBL2); + statement.execute("CREATE VIEW VIEW_1 AS SELECT A, B FROM " + TBL1); statement.close(); connection.close(); } - @Before - public void setup() throws Exception { - policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP) - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); - } - /* - * Admin creates database DB_1, table TAB_1, TAB_2 in DB_1, loads data into - * TAB_1, TAB_2 Admin grants SELECT on TAB_1, TAB_2, INSERT on TAB_1 to + * Admin creates database DB_1, table TBL1, TBL2 in DB_1, loads data into + * TBL1, TBL2 Admin grants SELECT on TBL1, TBL2, INSERT on TBL1 to * USER_GROUP of which user1 is a member. */ @Test public void testInsertAndSelect() throws Exception { policyFile .addRolesToGroup(USERGROUP1, "select_tab1", "insert_tab1", "select_tab2") - .addPermissionsToRole("select_tab1", "server=server1->db=DB_1->table=TAB_1->action=select") - .addPermissionsToRole("insert_tab1", "server=server1->db=DB_1->table=TAB_1->action=insert") - .addPermissionsToRole("select_tab2", "server=server1->db=DB_1->table=TAB_2->action=select") + .addPermissionsToRole("select_tab1", "server=server1->db=DB_1->table=" + TBL1 + "->action=select") + .addPermissionsToRole("insert_tab1", "server=server1->db=DB_1->table=" + TBL1 + "->action=insert") + .addPermissionsToRole("select_tab2", "server=server1->db=DB_1->table=" + TBL2 + "->action=select") .setUserGroupMapping(StaticUserGroup.getStaticMapping()); writePolicyFile(policyFile); @@ -111,12 +105,12 @@ public void testInsertAndSelect() throws Exception { Statement statement = context.createStatement(connection); statement.execute("USE DB_1"); // test user can insert - statement.execute("INSERT INTO TABLE TAB_1 SELECT A, B FROM TAB_2"); + statement.execute("INSERT INTO TABLE " + TBL1 + " SELECT A, B FROM " + TBL2); // test user can query table - statement.executeQuery("SELECT A FROM TAB_2"); + statement.executeQuery("SELECT A FROM " + TBL2); // negative test: test user can't drop try { - statement.execute("DROP TABLE TAB_1"); + statement.execute("DROP TABLE " + TBL1); Assert.fail("Expected SQL exception"); } catch (SQLException e) { context.verifyAuthzException(e); @@ -124,20 +118,20 @@ public void testInsertAndSelect() throws Exception { statement.close(); connection.close(); - // connect as admin and drop tab_1 + // connect as admin and drop TBL1 connection = context.createConnection(ADMIN1); statement = context.createStatement(connection); statement.execute("USE DB_1"); - statement.execute("DROP TABLE TAB_1"); + statement.execute("DROP TABLE " + TBL1); statement.close(); connection.close(); - // negative test: connect as user1 and try to recreate tab_1 + // negative test: connect as user1 and try to recreate TBL1 connection = context.createConnection(USER1_1); statement = context.createStatement(connection); statement.execute("USE DB_1"); try { - statement.execute("CREATE TABLE TAB_1(A STRING)"); + statement.execute("CREATE TABLE " + TBL1 + "(A STRING)"); Assert.fail("Expected SQL exception"); } catch (SQLException e) { context.verifyAuthzException(e); @@ -146,29 +140,29 @@ public void testInsertAndSelect() throws Exception { statement.close(); connection.close(); - // connect as admin to restore the tab_1 + // connect as admin to restore the TBL1 connection = context.createConnection(ADMIN1); statement = context.createStatement(connection); statement.execute("USE DB_1"); - statement.execute("CREATE TABLE TAB_1(B INT, A STRING) " + statement.execute("CREATE TABLE " + TBL1 + "(B INT, A STRING) " + " row format delimited fields terminated by '|' stored as textfile"); - statement.execute("INSERT INTO TABLE TAB_1 SELECT A, B FROM TAB_2"); + statement.execute("INSERT INTO TABLE " + TBL1 + " SELECT A, B FROM " + TBL2); statement.close(); connection.close(); } /* - * Admin creates database DB_1, table TAB_1, TAB_2 in DB_1, loads data into - * TAB_1, TAB_2. Admin grants INSERT on TAB_1, SELECT on TAB_2 to USER_GROUP + * Admin creates database DB_1, table TBL1, TBL2 in DB_1, loads data into + * TBL1, TBL2. Admin grants INSERT on TBL1, SELECT on TBL2 to USER_GROUP * of which user1 is a member. */ @Test public void testInsert() throws Exception { policyFile .addRolesToGroup(USERGROUP1, "insert_tab1", "select_tab2") - .addPermissionsToRole("insert_tab1", "server=server1->db=DB_1->table=TAB_1->action=insert") - .addPermissionsToRole("select_tab2", "server=server1->db=DB_1->table=TAB_2->action=select") + .addPermissionsToRole("insert_tab1", "server=server1->db=DB_1->table=" + TBL1 + "->action=insert") + .addPermissionsToRole("select_tab2", "server=server1->db=DB_1->table=" + TBL2 + "->action=select") .setUserGroupMapping(StaticUserGroup.getStaticMapping()); writePolicyFile(policyFile); @@ -177,11 +171,11 @@ public void testInsert() throws Exception { Statement statement = context.createStatement(connection); statement.execute("USE " + DB1); // test user can execute insert on table - statement.execute("INSERT INTO TABLE TAB_1 SELECT A, B FROM TAB_2"); + statement.execute("INSERT INTO TABLE " + TBL1 + " SELECT A, B FROM " + TBL2); // negative test: user can't query table try { - statement.executeQuery("SELECT A FROM TAB_1"); + statement.executeQuery("SELECT A FROM " + TBL1); Assert.fail("Expected SQL exception"); } catch (SQLException e) { context.verifyAuthzException(e); @@ -206,7 +200,7 @@ public void testInsert() throws Exception { // negative test: test user can't create a new view try { - statement.executeQuery("CREATE VIEW VIEW_2(A) AS SELECT A FROM TAB_1"); + statement.executeQuery("CREATE VIEW VIEW_2(A) AS SELECT A FROM " + TBL1); Assert.fail("Expected SQL Exception"); } catch (SQLException e) { context.verifyAuthzException(e); @@ -216,17 +210,17 @@ public void testInsert() throws Exception { } /* - * Admin creates database DB_1, table TAB_1, TAB_2 in DB_1, loads data into - * TAB_1, TAB_2. Admin grants SELECT on TAB_1, TAB_2 to USER_GROUP of which + * Admin creates database DB_1, table TBL1, TBL2 in DB_1, loads data into + * TBL1, TBL2. Admin grants SELECT on TBL1, TBL2 to USER_GROUP of which * user1 is a member. */ @Test public void testSelect() throws Exception { policyFile .addRolesToGroup(USERGROUP1, "select_tab1", "select_tab2") - .addPermissionsToRole("select_tab1", "server=server1->db=DB_1->table=TAB_1->action=select") - .addPermissionsToRole("insert_tab1", "server=server1->db=DB_1->table=TAB_1->action=insert") - .addPermissionsToRole("select_tab2", "server=server1->db=DB_1->table=TAB_2->action=select") + .addPermissionsToRole("select_tab1", "server=server1->db=DB_1->table=" + TBL1 + "->action=select") + .addPermissionsToRole("insert_tab1", "server=server1->db=DB_1->table=" + TBL1 + "->action=insert") + .addPermissionsToRole("select_tab2", "server=server1->db=DB_1->table=" + TBL2 + "->action=select") .setUserGroupMapping(StaticUserGroup.getStaticMapping()); writePolicyFile(policyFile); @@ -235,11 +229,11 @@ public void testSelect() throws Exception { Statement statement = context.createStatement(connection); statement.execute("USE " + DB1); // test user can execute query on table - statement.executeQuery("SELECT A FROM TAB_1"); + statement.executeQuery("SELECT A FROM " + TBL1); // negative test: test insert into table try { - statement.executeQuery("INSERT INTO TABLE TAB_1 SELECT A, B FROM TAB_2"); + statement.executeQuery("INSERT INTO TABLE " + TBL1 + " SELECT A, B FROM " + TBL2); Assert.fail("Expected SQL exception"); } catch (SQLException e) { context.verifyAuthzException(e); @@ -255,7 +249,7 @@ public void testSelect() throws Exception { // negative test: test user can't create a new view try { - statement.executeQuery("CREATE VIEW VIEW_2(A) AS SELECT A FROM TAB_1"); + statement.executeQuery("CREATE VIEW VIEW_2(A) AS SELECT A FROM " + TBL1); Assert.fail("Expected SQL Exception"); } catch (SQLException e) { context.verifyAuthzException(e); @@ -265,16 +259,16 @@ public void testSelect() throws Exception { } /* - * Admin creates database DB_1, table TAB_1, TAB_2 in DB_1, VIEW_1 on TAB_1 - * loads data into TAB_1, TAB_2. Admin grants SELECT on TAB_1,TAB_2 to + * Admin creates database DB_1, table TBL1, TBL2 in DB_1, VIEW_1 on TBL1 + * loads data into TBL1, TBL2. Admin grants SELECT on TBL1,TBL2 to * USER_GROUP of which user1 is a member. */ @Test public void testTableViewJoin() throws Exception { policyFile .addRolesToGroup(USERGROUP1, "select_tab1", "select_tab2") - .addPermissionsToRole("select_tab1", "server=server1->db=DB_1->table=TAB_1->action=select") - .addPermissionsToRole("select_tab2", "server=server1->db=DB_1->table=TAB_2->action=select") + .addPermissionsToRole("select_tab1", "server=server1->db=DB_1->table=" + TBL1 + "->action=select") + .addPermissionsToRole("select_tab2", "server=server1->db=DB_1->table=" + TBL2 + "->action=select") .setUserGroupMapping(StaticUserGroup.getStaticMapping()); writePolicyFile(policyFile); @@ -282,12 +276,12 @@ public void testTableViewJoin() throws Exception { Connection connection = context.createConnection(USER1_1); Statement statement = context.createStatement(connection); statement.execute("USE " + DB1); - // test user can execute query TAB_1 JOIN TAB_2 - statement.executeQuery("SELECT T1.B FROM TAB_1 T1 JOIN TAB_2 T2 ON (T1.B = T2.B)"); + // test user can execute query TBL1 JOIN TBL2 + statement.executeQuery("SELECT T1.B FROM " + TBL1 + " T1 JOIN " + TBL2 + " T2 ON (T1.B = T2.B)"); - // negative test: test user can't execute query VIEW_1 JOIN TAB_2 + // negative test: test user can't execute query VIEW_1 JOIN TBL2 try { - statement.executeQuery("SELECT V1.B FROM VIEW_1 V1 JOIN TAB_2 T2 ON (V1.B = T2.B)"); + statement.executeQuery("SELECT V1.B FROM VIEW_1 V1 JOIN " + TBL2 + " T2 ON (V1.B = T2.B)"); Assert.fail("Expected SQL exception"); } catch (SQLException e) { context.verifyAuthzException(e); @@ -298,16 +292,16 @@ public void testTableViewJoin() throws Exception { } /* - * Admin creates database DB_1, table TAB_1, TAB_2 in DB_1, VIEW_1 on TAB_1 - * loads data into TAB_1, TAB_2. Admin grants SELECT on TAB_2 to USER_GROUP of + * Admin creates database DB_1, table TBL1, TBL2 in DB_1, VIEW_1 on TBL1 + * loads data into TBL1, TBL2. Admin grants SELECT on TBL2 to USER_GROUP of * which user1 is a member. */ @Test public void testTableViewJoin2() throws Exception { policyFile .addRolesToGroup(USERGROUP1, "select_tab2") - .addPermissionsToRole("select_tab1", "server=server1->db=DB_1->table=TAB_1->action=select") - .addPermissionsToRole("select_tab2", "server=server1->db=DB_1->table=TAB_2->action=select") + .addPermissionsToRole("select_tab1", "server=server1->db=DB_1->table=" + TBL1 + "->action=select") + .addPermissionsToRole("select_tab2", "server=server1->db=DB_1->table=" + TBL2 + "->action=select") .setUserGroupMapping(StaticUserGroup.getStaticMapping()); writePolicyFile(policyFile); @@ -315,20 +309,20 @@ public void testTableViewJoin2() throws Exception { Connection connection = context.createConnection(USER1_1); Statement statement = context.createStatement(connection); statement.execute("USE " + DB1); - // test user can execute query on TAB_2 - statement.executeQuery("SELECT A FROM TAB_2"); + // test user can execute query on TBL2 + statement.executeQuery("SELECT A FROM " + TBL2); - // negative test: test user can't execute query VIEW_1 JOIN TAB_2 + // negative test: test user can't execute query VIEW_1 JOIN TBL2 try { - statement.executeQuery("SELECT VIEW_1.B FROM VIEW_1 JOIN TAB_2 ON (VIEW_1.B = TAB_2.B)"); + statement.executeQuery("SELECT VIEW_1.B FROM VIEW_1 JOIN " + TBL2 + " ON (VIEW_1.B = " + TBL2 + ".B)"); Assert.fail("Expected SQL exception"); } catch (SQLException e) { context.verifyAuthzException(e); } - // negative test: test user can't execute query TAB_1 JOIN TAB_2 + // negative test: test user can't execute query TBL1 JOIN TBL2 try { - statement.executeQuery("SELECT TAB_1.B FROM TAB_1 JOIN TAB_2 ON (TAB_1.B = TAB_2.B)"); + statement.executeQuery("SELECT " + TBL1 + ".B FROM " + TBL1 + " JOIN " + TBL2 + " ON (" + TBL1 + ".B = " + TBL2 + ".B)"); Assert.fail("Expected SQL exception"); } catch (SQLException e) { context.verifyAuthzException(e); @@ -339,8 +333,8 @@ public void testTableViewJoin2() throws Exception { } /* - * Admin creates database DB_1, table TAB_1, TAB_2 in DB_1, VIEW_1 on TAB_1 - * loads data into TAB_1, TAB_2. Admin grants SELECT on TAB_2, VIEW_1 to + * Admin creates database DB_1, table TBL1, TBL2 in DB_1, VIEW_1 on TBL1 + * loads data into TBL1, TBL2. Admin grants SELECT on TBL2, VIEW_1 to * USER_GROUP of which user1 is a member. */ @Test @@ -348,7 +342,7 @@ public void testTableViewJoin3() throws Exception { policyFile .addRolesToGroup(USERGROUP1, "select_tab2", "select_view1") .addPermissionsToRole("select_view1", "server=server1->db=DB_1->table=VIEW_1->action=select") - .addPermissionsToRole("select_tab2", "server=server1->db=DB_1->table=TAB_2->action=select") + .addPermissionsToRole("select_tab2", "server=server1->db=DB_1->table=" + TBL2 + "->action=select") .setUserGroupMapping(StaticUserGroup.getStaticMapping()); writePolicyFile(policyFile); @@ -356,18 +350,18 @@ public void testTableViewJoin3() throws Exception { Connection connection = context.createConnection(USER1_1); Statement statement = context.createStatement(connection); statement.execute("USE " + DB1); - // test user can execute query on TAB_2 - statement.executeQuery("SELECT A FROM TAB_2"); + // test user can execute query on TBL2 + statement.executeQuery("SELECT A FROM " + TBL2); - // test user can execute query VIEW_1 JOIN TAB_2 - statement.executeQuery("SELECT V1.B FROM VIEW_1 V1 JOIN TAB_2 T2 ON (V1.B = T2.B)"); + // test user can execute query VIEW_1 JOIN TBL2 + statement.executeQuery("SELECT V1.B FROM VIEW_1 V1 JOIN " + TBL2 + " T2 ON (V1.B = T2.B)"); // test user can execute query on VIEW_1 statement.executeQuery("SELECT A FROM VIEW_1"); - // negative test: test user can't execute query TAB_1 JOIN TAB_2 + // negative test: test user can't execute query TBL1 JOIN TBL2 try { - statement.executeQuery("SELECT T1.B FROM TAB_1 T1 JOIN TAB_2 T2 ON (T1.B = T2.B)"); + statement.executeQuery("SELECT T1.B FROM " + TBL1 + " T1 JOIN " + TBL2 + " T2 ON (T1.B = T2.B)"); Assert.fail("Expected SQL exception"); } catch (SQLException e) { context.verifyAuthzException(e); @@ -378,8 +372,8 @@ public void testTableViewJoin3() throws Exception { } /* - * Admin creates database DB_1, table TAB_1, TAB_2 in DB_1, VIEW_1 on TAB_1 - * loads data into TAB_1, TAB_2. Admin grants SELECT on TAB_1, VIEW_1 to + * Admin creates database DB_1, table TBL1, TBL2 in DB_1, VIEW_1 on TBL1 + * loads data into TBL1, TBL2. Admin grants SELECT on TBL1, VIEW_1 to * USER_GROUP of which user1 is a member. */ @Test @@ -387,7 +381,7 @@ public void testTableViewJoin4() throws Exception { policyFile .addRolesToGroup(USERGROUP1, "select_tab1", "select_view1") .addPermissionsToRole("select_view1", "server=server1->db=DB_1->table=VIEW_1->action=select") - .addPermissionsToRole("select_tab1", "server=server1->db=DB_1->table=TAB_1->action=select") + .addPermissionsToRole("select_tab1", "server=server1->db=DB_1->table=" + TBL1 + "->action=select") .setUserGroupMapping(StaticUserGroup.getStaticMapping()); writePolicyFile(policyFile); @@ -396,12 +390,12 @@ public void testTableViewJoin4() throws Exception { Statement statement = context.createStatement(connection); statement.execute("USE " + DB1); - // test user can execute query VIEW_1 JOIN TAB_1 - statement.executeQuery("SELECT VIEW_1.B FROM VIEW_1 JOIN TAB_1 ON (VIEW_1.B = TAB_1.B)"); + // test user can execute query VIEW_1 JOIN TBL1 + statement.executeQuery("SELECT VIEW_1.B FROM VIEW_1 JOIN " + TBL1 + " ON (VIEW_1.B = " + TBL1 + ".B)"); - // negative test: test user can't execute query TAB_1 JOIN TAB_2 + // negative test: test user can't execute query TBL1 JOIN TBL2 try { - statement.executeQuery("SELECT TAB_1.B FROM TAB_1 JOIN TAB_2 ON (TAB_1.B = TAB_2.B)"); + statement.executeQuery("SELECT " + TBL1 + ".B FROM " + TBL1 + " JOIN " + TBL2 + " ON (" + TBL1 + ".B = " + TBL2 + ".B)"); Assert.fail("Expected SQL exception"); } catch (SQLException e) { context.verifyAuthzException(e); @@ -425,18 +419,7 @@ public void testTruncateTable() throws Exception { Resources.copy(Resources.getResource(MULTI_TYPE_DATA_FILE_NAME), to); to.close(); - policyFile - .addRolesToGroup(USERGROUP1, "all_tab1") - .addPermissionsToRole("all_tab1", - "server=server1->db=" + DB1 + "->table=" + TBL2) - .addRolesToGroup(USERGROUP2, "drop_tab1") - .addPermissionsToRole("drop_tab1", - "server=server1->db=" + DB1 + "->table=" + TBL3 + "->action=drop", - "server=server1->db=" + DB1 + "->table=" + TBL3 + "->action=select") - .addRolesToGroup(USERGROUP3, "select_tab1") - .addPermissionsToRole("select_tab1", - "server=server1->db=" + DB1 + "->table=" + TBL1 + "->action=select") - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + policyFile.setUserGroupMapping(StaticUserGroup.getStaticMapping()); writePolicyFile(policyFile); // setup db objects needed by the test @@ -444,6 +427,9 @@ public void testTruncateTable() throws Exception { Statement statement = context.createStatement(connection); statement.execute("USE " + DB1); + statement.execute("DROP TABLE if exists " + TBL1); + statement.execute("DROP TABLE if exists " + TBL2); + statement.execute("DROP TABLE if exists " + TBL3); statement.execute("CREATE TABLE " + TBL1 + "(B INT, A STRING) " + " row format delimited fields terminated by '|' stored as textfile"); statement.execute("CREATE TABLE " + TBL2 + "(B INT, A STRING) " @@ -460,32 +446,104 @@ public void testTruncateTable() throws Exception { // verify admin can execute truncate table statement.execute("TRUNCATE TABLE " + TBL1); assertFalse(hasData(statement, TBL1)); + statement.close(); connection.close(); - connection = context.createConnection(USER1_1); - statement = context.createStatement(connection); + // add roles and grant permissions + updatePolicyFile(); + + // test truncate table without partitions + truncateTableTests(false); + } + + /*** + * Verify truncate partitioned permissions for different users with different + * privileges + * @throws Exception + */ + @Test + public void testTruncatePartitionedTable() throws Exception { + File dataDir = context.getDataDir(); + // copy data file to test dir + File dataFile = new File(dataDir, MULTI_TYPE_DATA_FILE_NAME); + FileOutputStream to = new FileOutputStream(dataFile); + Resources.copy(Resources.getResource(MULTI_TYPE_DATA_FILE_NAME), to); + to.close(); + + policyFile.setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + + // create partitioned tables + Connection connection = context.createConnection(ADMIN1); + Statement statement = context.createStatement(connection); statement.execute("USE " + DB1); - // verify all on tab can truncate table - statement.execute("TRUNCATE TABLE " + TBL2); - assertFalse(hasData(statement, TBL2)); + statement.execute("DROP TABLE if exists " + TBL1); + statement.execute("CREATE TABLE " + TBL1 + " (i int) PARTITIONED BY (j int)"); + statement.execute("DROP TABLE if exists " + TBL2); + statement.execute("CREATE TABLE " + TBL2 + " (i int) PARTITIONED BY (j int)"); + statement.execute("DROP TABLE if exists " + TBL3); + statement.execute("CREATE TABLE " + TBL3 + " (i int) PARTITIONED BY (j int)"); + + // verify admin can execute truncate empty partitioned table + statement.execute("TRUNCATE TABLE " + TBL1); + assertFalse(hasData(statement, TBL1)); statement.close(); connection.close(); - connection = context.createConnection(USER2_1); + // add roles and grant permissions + updatePolicyFile(); + + // test truncate empty partitioned tables + truncateTableTests(false); + + // add partitions to tables + connection = context.createConnection(ADMIN1); statement = context.createStatement(connection); statement.execute("USE " + DB1); - // verify drop on tab can truncate table - statement.execute("TRUNCATE TABLE " + TBL3); - assertFalse(hasData(statement, TBL3)); + statement.execute("ALTER TABLE " + TBL1 + " ADD PARTITION (j=1) PARTITION (j=2)"); + statement.execute("ALTER TABLE " + TBL2 + " ADD PARTITION (j=1) PARTITION (j=2)"); + statement.execute("ALTER TABLE " + TBL3 + " ADD PARTITION (j=1) PARTITION (j=2)"); + + // verify admin can execute truncate NOT empty partitioned table + statement.execute("TRUNCATE TABLE " + TBL1 + " partition (j=1)"); + statement.execute("TRUNCATE TABLE " + TBL1); + assertFalse(hasData(statement, TBL1)); statement.close(); connection.close(); - connection = context.createConnection(USER3_1); - statement = context.createStatement(connection); + // test truncate NOT empty partitioned tables + truncateTableTests(true); + } + + /** + * Test queries without from clause. Hive rewrites the queries with dummy db and table + * entities which should not trip authorization check. + * @throws Exception + */ + @Test + public void testSelectWithoutFrom() throws Exception { + policyFile + .addRolesToGroup(USERGROUP1, "all_tab1") + .addPermissionsToRole("all_tab1", + "server=server1->db=" + DB1 + "->table=" + TBL1) + .addRolesToGroup(USERGROUP2, "select_tab1") + .addPermissionsToRole("select_tab1", + "server=server1->db=" + DB1 + "->table=" + TBL1) + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + + Connection connection = context.createConnection(USER1_1); + Statement statement = context.createStatement(connection); + + // test with implicit default database + assertTrue(statement.executeQuery("SELECT 1 ").next()); + assertTrue(statement.executeQuery("SELECT current_database()").next()); + + // test after switching database statement.execute("USE " + DB1); - // verify select on tab can NOT truncate table - context.assertAuthzException(statement, "TRUNCATE TABLE " + TBL3); + assertTrue(statement.executeQuery("SELECT 1 ").next()); + assertTrue(statement.executeQuery("SELECT current_database() ").next()); statement.close(); connection.close(); } @@ -497,4 +555,108 @@ private boolean hasData(Statement stmt, String tableName) throws Exception { rs1.close(); return hasResults; } + + @Test + public void testDummyPartition() throws Exception { + + policyFile.setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + + // setup db objects needed by the test + Connection connection = context.createConnection(ADMIN1); + Statement statement = context.createStatement(connection); + + statement.execute("USE " + DB1); + + statement.execute("DROP TABLE if exists " + TBL1); + statement.execute("CREATE table " + TBL1 + " (a int) PARTITIONED BY (b string, c string)"); + statement.execute("DROP TABLE if exists " + TBL3); + statement.execute("CREATE table " + TBL3 + " (a2 int) PARTITIONED BY (b2 string, c2 string)"); + statement.close(); + connection.close(); + + policyFile + .addRolesToGroup(USERGROUP1, "select_tab1", "select_tab2") + .addPermissionsToRole("select_tab1", "server=server1->db=DB_1->table=" + TBL1 + "->action=select") + .addPermissionsToRole("select_tab2", "server=server1->db=DB_1->table=" + TBL3 + "->action=insert"); + writePolicyFile(policyFile); + + connection = context.createConnection(USER1_1); + statement = context.createStatement(connection); + + statement.execute("USE " + DB1); + statement.execute("INSERT OVERWRITE TABLE " + TBL3 + " PARTITION(b2='abc', c2) select a, b as c2 from " + TBL1); + statement.close(); + connection.close(); + + } + + /** + * update policy file for truncate table tests + */ + private void updatePolicyFile() throws Exception{ + policyFile + .addRolesToGroup(USERGROUP1, "all_tab1") + .addPermissionsToRole("all_tab1", + "server=server1->db=" + DB1 + "->table=" + TBL2) + .addRolesToGroup(USERGROUP2, "drop_tab1") + .addPermissionsToRole("drop_tab1", + "server=server1->db=" + DB1 + "->table=" + TBL3 + "->action=drop", + "server=server1->db=" + DB1 + "->table=" + TBL3 + "->action=select") + .addRolesToGroup(USERGROUP3, "select_tab1") + .addPermissionsToRole("select_tab1", + "server=server1->db=" + DB1 + "->table=" + TBL1 + "->action=select"); + writePolicyFile(policyFile); + } + + /** + * Test truncate table with or without partitions for users with different privileges. + * Only test truncate table partition if truncPartition is true. + */ + private void truncateTableTests(boolean truncPartition) throws Exception{ + Connection connection = null; + Statement statement = null; + try { + connection = context.createConnection(USER1_1); + statement = context.createStatement(connection); + statement.execute("USE " + DB1); + // verify all privileges on table can truncate table + if (truncPartition) { + statement.execute("TRUNCATE TABLE " + TBL2 + " PARTITION (j=1)"); + } + statement.execute("TRUNCATE TABLE " + TBL2); + assertFalse(hasData(statement, TBL2)); + statement.close(); + connection.close(); + + connection = context.createConnection(USER2_1); + statement = context.createStatement(connection); + statement.execute("USE " + DB1); + // verify drop privilege on table can truncate table + if (truncPartition) { + statement.execute("TRUNCATE TABLE " + TBL3 + " partition (j=1)"); + } + statement.execute("TRUNCATE TABLE " + TBL3); + assertFalse(hasData(statement, TBL3)); + statement.close(); + connection.close(); + + connection = context.createConnection(USER3_1); + statement = context.createStatement(connection); + statement.execute("USE " + DB1); + // verify select privilege on table can NOT truncate table + if (truncPartition) { + context.assertAuthzException( + statement, "TRUNCATE TABLE " + TBL1 + " PARTITION (j=1)"); + } + context.assertAuthzException(statement, "TRUNCATE TABLE " + TBL1); + } finally { + if (statement != null) { + statement.close(); + } + if (connection != null) { + connection.close(); + } + } + } } diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestReloadPrivileges.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestReloadPrivileges.java new file mode 100644 index 000000000..6d4e8d303 --- /dev/null +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestReloadPrivileges.java @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.tests.e2e.hive; + +import java.sql.Connection; +import java.sql.Statement; + +import org.apache.sentry.provider.file.PolicyFile; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +public class TestReloadPrivileges extends AbstractTestWithStaticConfiguration { + private PolicyFile policyFile; + + @BeforeClass + public static void setupTestStaticConfiguration() throws Exception { + AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); + } + + @Before + public void setup() throws Exception { + policyFile = + PolicyFile.setAdminOnServer1(ADMINGROUP).setUserGroupMapping( + StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + } + + @Test + public void testReload() throws Exception { + Connection connection = context.createConnection(USER1_1); + Statement statement = context.createStatement(connection); + statement.execute("RELOAD"); + statement.close(); + connection.close(); + } + +} diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestRuntimeMetadataRetrieval.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestRuntimeMetadataRetrieval.java index c47686bc9..efb588ee2 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestRuntimeMetadataRetrieval.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestRuntimeMetadataRetrieval.java @@ -29,6 +29,7 @@ import org.apache.sentry.provider.file.PolicyFile; import org.junit.Assert; import org.junit.Before; +import org.junit.BeforeClass; import org.junit.Test; import com.google.common.io.Resources; @@ -44,16 +45,20 @@ public class TestRuntimeMetadataRetrieval extends AbstractTestWithStaticConfigur private File dataDir; private File dataFile; + @BeforeClass + public static void setupTestStaticConfiguration () throws Exception { + AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); + } + @Before public void setup() throws Exception { + policyFile = super.setupPolicy(); + super.setup(); dataDir = context.getDataDir(); dataFile = new File(dataDir, SINGLE_TYPE_DATA_FILE_NAME); FileOutputStream to = new FileOutputStream(dataFile); Resources.copy(Resources.getResource(SINGLE_TYPE_DATA_FILE_NAME), to); to.close(); - policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP) - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); } /** @@ -67,22 +72,10 @@ public void testShowTables1() throws Exception { String tableNames[] = {"tb_1", "tb_2", "tb_3", "tb_4"}; List tableNamesValidation = new ArrayList(); - policyFile - .addRolesToGroup(USERGROUP1, "tab1_priv,tab2_priv,tab3_priv") - .addPermissionsToRole("tab1_priv", "server=server1->db=" + DB1 + "->table=" - + tableNames[0] + "->action=select") - .addPermissionsToRole("tab2_priv", "server=server1->db=" + DB1 + "->table=" - + tableNames[1] + "->action=insert") - .addPermissionsToRole("tab3_priv", "server=server1->db=" + DB1 + "->table=" - + tableNames[2] + "->action=select") - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); - String user1TableNames[] = {"tb_1", "tb_2", "tb_3"}; Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); - statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE"); statement.execute("CREATE DATABASE " + DB1); statement.execute("USE " + DB1); createTabs(statement, DB1, tableNames); @@ -90,16 +83,27 @@ public void testShowTables1() throws Exception { ResultSet rs = statement.executeQuery("SHOW TABLES"); tableNamesValidation.addAll(Arrays.asList(tableNames)); - validateTables(rs, DB1, tableNamesValidation); + validateTables(rs, tableNamesValidation); statement.close(); + policyFile + .addRolesToGroup(USERGROUP1, "tab1_priv,tab2_priv,tab3_priv") + .addPermissionsToRole("tab1_priv", "server=server1->db=" + DB1 + "->table=" + + tableNames[0] + "->action=select") + .addPermissionsToRole("tab2_priv", "server=server1->db=" + DB1 + "->table=" + + tableNames[1] + "->action=insert") + .addPermissionsToRole("tab3_priv", "server=server1->db=" + DB1 + "->table=" + + tableNames[2] + "->action=select") + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + connection = context.createConnection(USER1_1); statement = context.createStatement(connection); statement.execute("USE " + DB1); // User1 should see tables with any level of access rs = statement.executeQuery("SHOW TABLES"); tableNamesValidation.addAll(Arrays.asList(user1TableNames)); - validateTables(rs, DB1, tableNamesValidation); + validateTables(rs, tableNamesValidation); statement.close(); } @@ -114,33 +118,32 @@ public void testShowTables2() throws Exception { String tableNames[] = {"tb_1", "tb_2", "tb_3", "tb_4"}; List tableNamesValidation = new ArrayList(); - policyFile - .addRolesToGroup(USERGROUP1, "db_priv") - .addPermissionsToRole("db_priv", "server=server1->db=" + DB1) - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); - String user1TableNames[] = {"tb_1", "tb_2", "tb_3", "tb_4"}; Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); - statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE"); statement.execute("CREATE DATABASE " + DB1); statement.execute("USE " + DB1); createTabs(statement, DB1, tableNames); // Admin should see all tables ResultSet rs = statement.executeQuery("SHOW TABLES"); tableNamesValidation.addAll(Arrays.asList(tableNames)); - validateTables(rs, DB1, tableNamesValidation); + validateTables(rs, tableNamesValidation); statement.close(); + policyFile + .addRolesToGroup(USERGROUP1, "db_priv") + .addPermissionsToRole("db_priv", "server=server1->db=" + DB1) + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + connection = context.createConnection(USER1_1); statement = context.createStatement(connection); statement.execute("USE " + DB1); // User1 should see tables with any level of access rs = statement.executeQuery("SHOW TABLES"); tableNamesValidation.addAll(Arrays.asList(user1TableNames)); - validateTables(rs, DB1, tableNamesValidation); + validateTables(rs, tableNamesValidation); statement.close(); } @@ -155,35 +158,34 @@ public void testShowTables3() throws Exception { String tableNames[] = {"tb_1", "tb_2", "tb_3", "newtab_3"}; List tableNamesValidation = new ArrayList(); - policyFile - .addRolesToGroup(USERGROUP1, "tab_priv") - .addPermissionsToRole("tab_priv", "server=server1->db=" + DB1 + "->table=" - + tableNames[3] + "->action=insert") - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); - String adminTableNames[] = {"tb_3", "newtab_3", "tb_2", "tb_1"}; String user1TableNames[] = {"newtab_3"}; Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); - statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE"); statement.execute("CREATE DATABASE " + DB1); statement.execute("USE " + DB1); createTabs(statement, DB1, tableNames); // Admin should see all tables ResultSet rs = statement.executeQuery("SHOW TABLES"); tableNamesValidation.addAll(Arrays.asList(adminTableNames)); - validateTables(rs, DB1, tableNamesValidation); + validateTables(rs, tableNamesValidation); statement.close(); + policyFile + .addRolesToGroup(USERGROUP1, "tab_priv") + .addPermissionsToRole("tab_priv", "server=server1->db=" + DB1 + "->table=" + + tableNames[3] + "->action=insert") + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + connection = context.createConnection(USER1_1); statement = context.createStatement(connection); statement.execute("USE " + DB1); // User1 should see tables with any level of access rs = statement.executeQuery("SHOW TABLES"); tableNamesValidation.addAll(Arrays.asList(user1TableNames)); - validateTables(rs, DB1, tableNamesValidation); + validateTables(rs, tableNamesValidation); statement.close(); } @@ -197,34 +199,33 @@ public void testShowTables4() throws Exception { String tableNames[] = {"tb_1", "tb_2", "tb_3", "newtab_3"}; List tableNamesValidation = new ArrayList(); - policyFile - .addRolesToGroup(USERGROUP1, "tab_priv") - .addPermissionsToRole("tab_priv", "server=server1->db=" + DB1) - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); - String adminTableNames[] = {"tb_3", "newtab_3", "tb_1", "tb_2"}; String user1TableNames[] = {"tb_3", "newtab_3", "tb_1", "tb_2"}; Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); - statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE"); statement.execute("CREATE DATABASE " + DB1); statement.execute("USE " + DB1); createTabs(statement, DB1, tableNames); // Admin should be able to see all tables ResultSet rs = statement.executeQuery("SHOW TABLES"); tableNamesValidation.addAll(Arrays.asList(adminTableNames)); - validateTables(rs, DB1, tableNamesValidation); + validateTables(rs, tableNamesValidation); statement.close(); + policyFile + .addRolesToGroup(USERGROUP1, "tab_priv") + .addPermissionsToRole("tab_priv", "server=server1->db=" + DB1) + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + connection = context.createConnection(USER1_1); statement = context.createStatement(connection); statement.execute("USE " + DB1); // User1 should see tables with any level of access rs = statement.executeQuery("SHOW TABLES"); tableNamesValidation.addAll(Arrays.asList(user1TableNames)); - validateTables(rs, DB1, tableNamesValidation); + validateTables(rs, tableNamesValidation); statement.close(); } @@ -236,13 +237,14 @@ public void testShowTables4() throws Exception { public void testShowTables5() throws Exception { String tableNames[] = {"tb_1", "tb_2", "tb_3", "tb_4"}; - policyFile - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); createTabs(statement, "default", tableNames); + policyFile + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + connection = context.createConnection(USER1_1); statement = context.createStatement(connection); // User1 should see tables with any level of access @@ -263,6 +265,14 @@ public void testShowTablesExtended() throws Exception { String tableNames[] = {"tb_1", "tb_2", "tb_3", "tb_4", "table_5"}; List tableNamesValidation = new ArrayList(); + String user1TableNames[] = {"tb_1", "tb_2", "tb_3"}; + + Connection connection = context.createConnection(ADMIN1); + Statement statement = context.createStatement(connection); + statement.execute("CREATE DATABASE " + DB1); + statement.execute("USE " + DB1); + createTabs(statement, DB1, tableNames); + policyFile .addRolesToGroup(USERGROUP1, "tab1_priv,tab2_priv,tab3_priv") .addPermissionsToRole("tab1_priv", "server=server1->db=" + DB1 + "->table=" @@ -274,18 +284,10 @@ public void testShowTablesExtended() throws Exception { .setUserGroupMapping(StaticUserGroup.getStaticMapping()); writePolicyFile(policyFile); - String user1TableNames[] = {"tb_1", "tb_2", "tb_3"}; - - Connection connection = context.createConnection(ADMIN1); - Statement statement = context.createStatement(connection); - statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE"); - statement.execute("CREATE DATABASE " + DB1); - statement.execute("USE " + DB1); - createTabs(statement, DB1, tableNames); // Admin should see all tables except table_5, the one does not match the pattern ResultSet rs = statement.executeQuery("SHOW TABLE EXTENDED IN " + DB1 + " LIKE 'tb*'"); tableNamesValidation.addAll(Arrays.asList(tableNames).subList(0, 4)); - validateTablesInRs(rs, DB1, tableNamesValidation); + validateTablesInRs(rs, tableNamesValidation); statement.close(); connection = context.createConnection(USER1_1); @@ -294,7 +296,7 @@ public void testShowTablesExtended() throws Exception { // User1 should see tables with any level of access rs = statement.executeQuery("SHOW TABLE EXTENDED IN " + DB1 + " LIKE 'tb*'"); tableNamesValidation.addAll(Arrays.asList(user1TableNames)); - validateTablesInRs(rs, DB1, tableNamesValidation); + validateTablesInRs(rs, tableNamesValidation); statement.close(); } @@ -309,12 +311,6 @@ public void testShowDatabases1() throws Exception { String[] dbNames = {DB1, DB2, DB3}; String[] user1DbNames = {DB1}; - policyFile - .addRolesToGroup(USERGROUP1, "db1_all") - .addPermissionsToRole("db1_all", "server=server1->db=" + DB1) - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); - createDb(ADMIN1, dbNames); dbNamesValidation.addAll(Arrays.asList(dbNames)); dbNamesValidation.add("default"); @@ -324,6 +320,12 @@ public void testShowDatabases1() throws Exception { validateDBs(rs, dbNamesValidation); // admin should see all dbs rs.close(); + policyFile + .addRolesToGroup(USERGROUP1, "db1_all") + .addPermissionsToRole("db1_all", "server=server1->db=" + DB1) + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + connection = context.createConnection(USER1_1); statement = context.createStatement(connection); rs = statement.executeQuery("SHOW DATABASES"); @@ -345,6 +347,7 @@ public void testShowDatabases2() throws Exception { String[] dbNames = {DB1, DB2, DB3}; List dbNamesValidation = new ArrayList(); String[] user1DbNames = {DB1, DB2}; + String tableNames[] = {"tb_1"}; // verify by SQL // 1, 2 @@ -353,6 +356,8 @@ public void testShowDatabases2() throws Exception { dbNamesValidation.add("default"); Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); + createTabs(statement, DB1, tableNames); + createTabs(statement, DB2, tableNames); ResultSet rs = statement.executeQuery("SHOW DATABASES"); validateDBs(rs, dbNamesValidation); // admin should see all dbs rs.close(); @@ -398,8 +403,7 @@ private void createTabs(Statement statement, String dbName, } // compare the table resultset with given array of table names - private void validateTables(ResultSet rs, String dbName, - List tableNames) throws SQLException { + private void validateTables(ResultSet rs, List tableNames) throws SQLException { while (rs.next()) { String tableName = rs.getString(1); Assert.assertTrue(tableName, tableNames.remove(tableName.toLowerCase())); @@ -411,8 +415,7 @@ private void validateTables(ResultSet rs, String dbName, // compare the tables in resultset with given array of table names // for some hive query like 'show table extended ...', the resultset does // not only contains tableName (See HIVE-8109) - private void validateTablesInRs(ResultSet rs, String dbName, - List tableNames) throws SQLException { + private void validateTablesInRs(ResultSet rs, List tableNames) throws SQLException { while (rs.next()) { String tableName = rs.getString(1); if (tableName.startsWith("tableName:")) { diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestSandboxOps.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestSandboxOps.java index 626fd405c..da3b90fff 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestSandboxOps.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestSandboxOps.java @@ -43,13 +43,12 @@ public class TestSandboxOps extends AbstractTestWithStaticConfiguration { @Before public void setup() throws Exception { + policyFile = super.setupPolicy(); + super.setup(); dataFile = new File(dataDir, SINGLE_TYPE_DATA_FILE_NAME); FileOutputStream to = new FileOutputStream(dataFile); Resources.copy(Resources.getResource(SINGLE_TYPE_DATA_FILE_NAME), to); to.close(); - policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP); - policyFile.setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); loadData = "server=server1->uri=file://" + dataFile.getPath(); } @@ -66,13 +65,14 @@ private PolicyFile addTwoUsersWithAllDb() throws Exception { */ @Test public void testDbPrivileges() throws Exception { - addTwoUsersWithAllDb(); - writePolicyFile(policyFile); String[] dbs = new String[] { DB1, DB2 }; for (String dbName : dbs) { - dropDb(ADMIN1, dbName); createDb(ADMIN1, dbName); } + + addTwoUsersWithAllDb(); + writePolicyFile(policyFile); + for (String user : new String[] { USER1_1, USER1_2 }) { for (String dbName : dbs) { Connection userConn = context.createConnection(user); @@ -90,12 +90,8 @@ public void testDbPrivileges() throws Exception { userConn.close(); } } - - for (String dbName : dbs) { - dropDb(ADMIN1, dbName); - } - } + /** * Test Case 2.11 admin user create a new database DB_1 and grant ALL to * himself on DB_1 should work @@ -105,7 +101,6 @@ public void testAdminDbPrivileges() throws Exception { Connection adminCon = context.createConnection(ADMIN1); Statement adminStmt = context.createStatement(adminCon); adminStmt.execute("use default"); - adminStmt.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE"); adminStmt.execute("CREATE DATABASE " + DB1); // access the new databases @@ -115,12 +110,6 @@ public void testAdminDbPrivileges() throws Exception { adminStmt.execute("load data local inpath '" + dataFile.getPath() + "' into table " + tabName); adminStmt.execute("select * from " + tabName); - - // cleanup - adminStmt.execute("use default"); - adminStmt.execute("DROP DATABASE " + DB1 + " CASCADE"); - adminStmt.close(); - adminCon.close(); } /** @@ -131,20 +120,21 @@ public void testAdminDbPrivileges() throws Exception { */ @Test public void testNegativeUserDMLPrivileges() throws Exception { - policyFile - .addPermissionsToRole("db1_tab2_all", "server=server1->db=" + DB1 + "->table=table_2") - .addRolesToGroup(USERGROUP1, "db1_tab2_all"); - writePolicyFile(policyFile); Connection adminCon = context.createConnection(ADMIN1); Statement adminStmt = context.createStatement(adminCon); adminStmt.execute("use default"); - adminStmt.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE"); adminStmt.execute("CREATE DATABASE " + DB1); adminStmt.execute("use " + DB1); adminStmt.execute("create table table_1 (id int)"); adminStmt.execute("create table table_2 (id int)"); adminStmt.close(); adminCon.close(); + + policyFile + .addPermissionsToRole("db1_tab2_all", "server=server1->db=" + DB1 + "->table=table_2") + .addRolesToGroup(USERGROUP1, "db1_tab2_all"); + writePolicyFile(policyFile); + Connection userConn = context.createConnection(USER1_1); Statement userStmt = context.createStatement(userConn); userStmt.execute("use " + DB1); @@ -173,17 +163,10 @@ public void testNegativeUserDMLPrivileges() throws Exception { */ @Test public void testNegUserPrivilegesAll() throws Exception { - policyFile - .addRolesToGroup(USERGROUP1, "db1_all") - .addRolesToGroup(USERGROUP2, "db1_tab1_select") - .addPermissionsToRole("db1_tab1_select", "server=server1->db="+ DB1 + "->table=table_1->action=select") - .addPermissionsToRole("db1_all", "server=server1->db=" + DB1); - writePolicyFile(policyFile); // create dbs Connection adminCon = context.createConnection(ADMIN1); Statement adminStmt = context.createStatement(adminCon); adminStmt.execute("use default"); - adminStmt.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE"); adminStmt.execute("CREATE DATABASE " + DB1); adminStmt.execute("use " + DB1); adminStmt.execute("create table table_1 (name string)"); @@ -197,6 +180,13 @@ public void testNegUserPrivilegesAll() throws Exception { adminStmt.close(); adminCon.close(); + policyFile + .addRolesToGroup(USERGROUP1, "db1_all") + .addRolesToGroup(USERGROUP2, "db1_tab1_select") + .addPermissionsToRole("db1_tab1_select", "server=server1->db="+ DB1 + "->table=table_1->action=select") + .addPermissionsToRole("db1_all", "server=server1->db=" + DB1); + writePolicyFile(policyFile); + Connection userConn = context.createConnection(USER2_1); Statement userStmt = context.createStatement(userConn); userStmt.execute("use " + DB1); @@ -247,15 +237,13 @@ public void testNegUserPrivilegesAll() throws Exception { */ @Test public void testSandboxOpt9() throws Exception { + createDb(ADMIN1, DB1, DB2); policyFile - .addPermissionsToRole(GROUP1_ROLE, ALL_DB1, ALL_DB2, loadData) - .addRolesToGroup(USERGROUP1, GROUP1_ROLE); + .addPermissionsToRole(GROUP1_ROLE, ALL_DB1, ALL_DB2, loadData) + .addRolesToGroup(USERGROUP1, GROUP1_ROLE); writePolicyFile(policyFile); - dropDb(ADMIN1, DB1, DB2); - createDb(ADMIN1, DB1, DB2); - Connection connection = context.createConnection(USER1_1); Statement statement = context.createStatement(connection); @@ -295,7 +283,6 @@ public void testSandboxOpt9() throws Exception { + " (value) AS SELECT value from " + DB2 + "." + TBL3 + " LIMIT 10"); statement.close(); connection.close(); - dropDb(ADMIN1, DB1, DB2); } /** @@ -316,14 +303,9 @@ public void testSandboxOpt9() throws Exception { */ @Test public void testSandboxOpt13() throws Exception { - // unrelated permission to allow user1 to connect to db1 - policyFile - .addPermissionsToRole(GROUP1_ROLE, SELECT_DB1_TBL2) - .addRolesToGroup(USERGROUP1, GROUP1_ROLE); - writePolicyFile(policyFile); - dropDb(ADMIN1, DB1); createDb(ADMIN1, DB1); createTable(ADMIN1, DB1, dataFile, TBL1); + createTable(ADMIN1, DB1, dataFile, TBL2); Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); statement.execute("USE " + DB1); @@ -332,6 +314,13 @@ public void testSandboxOpt13() throws Exception { + " (under_col) as 'COMPACT' WITH DEFERRED REBUILD"); statement.close(); connection.close(); + + // unrelated permission to allow user1 to connect to db1 + policyFile + .addPermissionsToRole(GROUP1_ROLE, SELECT_DB1_TBL2) + .addRolesToGroup(USERGROUP1, GROUP1_ROLE); + writePolicyFile(policyFile); + connection = context.createConnection(USER1_1); statement = context.createStatement(connection); statement.execute("USE " + DB1); @@ -342,7 +331,6 @@ public void testSandboxOpt13() throws Exception { statement.execute("USE " + DB1); assertTrue(statement.execute("SELECT * FROM " + TBL1 + " WHERE under_col == 5")); assertTrue(statement.execute("SHOW INDEXES ON " + TBL1)); - dropDb(ADMIN1, DB1, DB2); } /** @@ -372,19 +360,17 @@ public void testSandboxOpt13() throws Exception { */ @Test public void testSandboxOpt17() throws Exception { - - dropDb(ADMIN1, DB1); createDb(ADMIN1, DB1); + createTable(ADMIN1, DB1, dataFile, TBL1, TBL2); policyFile .addRolesToGroup(USERGROUP1, "all_db1", "load_data") .addRolesToGroup(USERGROUP2, "select_tb1") - .addPermissionsToRole("select_tb1", "server=server1->db=" + DB1 + "->table=tbl_1->action=select") + .addPermissionsToRole("select_tb1", "server=server1->db=" + DB1 + "->table=" + TBL1 + "->action=select") .addPermissionsToRole("all_db1", "server=server1->db=" + DB1) .addPermissionsToRole("load_data", "server=server1->uri=file://" + dataFile.toString()); writePolicyFile(policyFile); - createTable(USER1_1, DB1, dataFile, TBL1, TBL2); Connection connection = context.createConnection(USER1_1); Statement statement = context.createStatement(connection); // c @@ -436,17 +422,17 @@ public void testInsertOverwriteAndLoadData() throws Exception { //Hive needs write permissions on this local directory baseDir.setWritable(true, false); + createDb(ADMIN1, DB1); + createTable(ADMIN1, DB1, dataFile, TBL1); + policyFile - .addRolesToGroup(USERGROUP1, "all_db1", "load_data") - .addPermissionsToRole("all_db1", "server=server1->db=" + DB1) - .addPermissionsToRole("load_data", "server=server1->uri=file://" + allowedDir.getPath() + - ", server=server1->uri=file://" + allowedDir.getPath() + - ", server=server1->uri=" + allowedDfsDir.toString()); + .addRolesToGroup(USERGROUP1, "all_db1", "load_data") + .addPermissionsToRole("all_db1", "server=server1->db=" + DB1) + .addPermissionsToRole("load_data", "server=server1->uri=file://" + allowedDir.getPath() + + ", server=server1->uri=file://" + allowedDir.getPath() + + ", server=server1->uri=" + allowedDfsDir.toString()); writePolicyFile(policyFile); - dropDb(ADMIN1, DB1); - createDb(ADMIN1, DB1); - createTable(ADMIN1, DB1, dataFile, TBL1); Connection connection = context.createConnection(USER1_1); Statement statement = context.createStatement(connection); statement.execute("USE " + DB1); @@ -468,20 +454,18 @@ public void testInsertOverwriteAndLoadData() throws Exception { */ @Test public void testSandboxOpt10() throws Exception { - String rTab1 = "rtab_1"; String rTab2 = "rtab_2"; - policyFile - .addPermissionsToRole(GROUP1_ROLE, ALL_DB1, SELECT_DB2_TBL2, loadData) - .addRolesToGroup(USERGROUP1, GROUP1_ROLE); - writePolicyFile(policyFile); - - dropDb(ADMIN1, DB1, DB2); createDb(ADMIN1, DB1, DB2); createTable(ADMIN1, DB1, dataFile, TBL1); createTable(ADMIN1, DB2, dataFile, TBL2, TBL3); + policyFile + .addPermissionsToRole(GROUP1_ROLE, ALL_DB1, SELECT_DB2_TBL2, loadData) + .addRolesToGroup(USERGROUP1, GROUP1_ROLE); + writePolicyFile(policyFile); + // a Connection connection = context.createConnection(USER1_1); Statement statement = context.createStatement(connection); @@ -492,20 +476,11 @@ public void testSandboxOpt10() throws Exception { statement.close(); connection.close(); - dropDb(ADMIN1, DB1, DB2); } // Create per-db policy file on hdfs and global policy on local. @Test public void testPerDbPolicyOnDFS() throws Exception { - - policyFile - .addRolesToGroup(USERGROUP1, "select_tbl1") - .addRolesToGroup(USERGROUP2, "select_tbl2") - .addPermissionsToRole("select_tbl1", "server=server1->db=" + DB1 + "->table=tbl1->action=select") - .addDatabase(DB2, dfs.getBaseDir().toUri().toString() + "/" + DB2_POLICY_FILE); - writePolicyFile(policyFile); - File db2PolicyFileHandle = new File(baseDir.getPath(), DB2_POLICY_FILE); PolicyFile db2PolicyFile = new PolicyFile(); @@ -519,14 +494,11 @@ public void testPerDbPolicyOnDFS() throws Exception { Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); - statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE"); - statement.execute("DROP DATABASE IF EXISTS " + DB2 + " CASCADE"); statement.execute("CREATE DATABASE " + DB1); statement.execute("USE " + DB1); statement.execute("CREATE TABLE tbl1(B INT, A STRING) " + " row format delimited fields terminated by '|' stored as textfile"); statement.execute("LOAD DATA LOCAL INPATH '" + dataFile.getPath() + "' INTO TABLE tbl1"); - statement.execute("DROP DATABASE IF EXISTS " + DB2 + " CASCADE"); statement.execute("CREATE DATABASE " + DB2); statement.execute("USE " + DB2); statement.execute("CREATE TABLE tbl2(B INT, A STRING) " + @@ -535,6 +507,13 @@ public void testPerDbPolicyOnDFS() throws Exception { statement.close(); connection.close(); + policyFile + .addRolesToGroup(USERGROUP1, "select_tbl1") + .addRolesToGroup(USERGROUP2, "select_tbl2") + .addPermissionsToRole("select_tbl1", "server=server1->db=" + DB1 + "->table=tbl1->action=select") + .addDatabase(DB2, dfs.getBaseDir().toUri().toString() + "/" + DB2_POLICY_FILE); + writePolicyFile(policyFile); + // test per-db file for db2 connection = context.createConnection(USER2_1); @@ -545,15 +524,6 @@ public void testPerDbPolicyOnDFS() throws Exception { statement.close(); connection.close(); - - //test cleanup - connection = context.createConnection(ADMIN1); - statement = context.createStatement(connection); - statement.execute("DROP DATABASE " + DB1 + " CASCADE"); - - statement.execute("DROP DATABASE " + DB2 + " CASCADE"); - statement.close(); - connection.close(); } } diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestSentryOnFailureHookLoading.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestSentryOnFailureHookLoading.java index ad27238b2..4a64072f9 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestSentryOnFailureHookLoading.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestSentryOnFailureHookLoading.java @@ -28,7 +28,7 @@ import java.util.HashMap; import java.util.Map; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.sentry.binding.hive.conf.HiveAuthzConf; import org.apache.sentry.provider.file.PolicyFile; diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestServerConfiguration.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestServerConfiguration.java index d8ebea6c0..56e0e009d 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestServerConfiguration.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestServerConfiguration.java @@ -45,15 +45,16 @@ public class TestServerConfiguration extends AbstractTestWithHiveServer { - private Context context; - private Map properties; + // Context is created inside individual test cases, because the + // test cases for server configuration are properties based. + private static Context context; + private static Map properties; private PolicyFile policyFile; @Before - public void setup() throws Exception { + public void setupPolicyFile() throws Exception { properties = Maps.newHashMap(); policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP); - } @After @@ -61,6 +62,11 @@ public void tearDown() throws Exception { if(context != null) { context.close(); } + + if(hiveServer != null) { + hiveServer.shutdown(); + hiveServer = null; + } } /** @@ -70,7 +76,7 @@ public void tearDown() throws Exception { public void testImpersonationIsDisabled() throws Exception { properties.put(HiveServerFactory.ACCESS_TESTING_MODE, "false"); properties.put("hive.server2.enable.impersonation", "true"); - verifyInvalidConfigurationException(); + verifyInvalidConfigurationException(properties); } /** @@ -80,10 +86,10 @@ public void testImpersonationIsDisabled() throws Exception { public void testAuthenticationIsStrong() throws Exception { properties.put(HiveServerFactory.ACCESS_TESTING_MODE, "false"); properties.put("hive.server2.authentication", "NONE"); - verifyInvalidConfigurationException(); + verifyInvalidConfigurationException(properties); } - private void verifyInvalidConfigurationException() throws Exception{ + private void verifyInvalidConfigurationException(Map properties) throws Exception{ context = createContext(properties); policyFile .setUserGroupMapping(StaticUserGroup.getStaticMapping()) @@ -95,6 +101,10 @@ private void verifyInvalidConfigurationException() throws Exception{ Assert.fail("Expected SQLException"); } catch (SQLException e) { context.verifyInvalidConfigurationException(e); + } finally { + if (context != null) { + context.close(); + } } } @@ -107,6 +117,7 @@ public void testRemovalOfPolicyFile() throws Exception { Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); try { + statement.execute("DROP TABLE IF EXISTS test CASCADE"); statement.execute("create table test (a string)"); Assert.fail("Expected SQLException"); } catch (SQLException e) { @@ -127,6 +138,7 @@ public void testCorruptionOfPolicyFile() throws Exception { Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); try { + statement.execute("DROP TABLE IF EXISTS test CASCADE"); statement.execute("create table test (a string)"); Assert.fail("Expected SQLException"); } catch (SQLException e) { @@ -137,7 +149,6 @@ public void testCorruptionOfPolicyFile() throws Exception { @Test public void testAddDeleteDFSRestriction() throws Exception { context = createContext(properties); - policyFile .addRolesToGroup(USERGROUP1, "all_db1") .addRolesToGroup(USERGROUP2, "select_tb1") @@ -224,17 +235,14 @@ public void testDefaultDbRestrictivePrivilege() throws Exception { Connection connection = context.createConnection(ADMIN1); Statement statement = context.createStatement(connection); statement.execute("use default"); - context.close(); connection = context.createConnection(USER1_1); statement = context.createStatement(connection); statement.execute("use default"); - context.close(); connection = context.createConnection(USER2_1); statement = context.createStatement(connection); statement.execute("use default"); - context.close(); connection = context.createConnection(USER3_1); statement = context.createStatement(connection); diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestUriPermissions.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestUriPermissions.java index 7c7c63e78..a1b89ae1f 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestUriPermissions.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestUriPermissions.java @@ -21,14 +21,14 @@ import java.sql.Connection; import java.sql.ResultSet; import java.sql.Statement; -import java.util.Random; import com.google.common.io.Resources; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.sentry.provider.file.PolicyFile; import org.apache.sentry.tests.e2e.hive.hiveserver.HiveServerFactory; import org.junit.Before; +import org.junit.BeforeClass; import org.junit.Test; public class TestUriPermissions extends AbstractTestWithStaticConfiguration { @@ -36,12 +36,15 @@ public class TestUriPermissions extends AbstractTestWithStaticConfiguration { private File dataFile; private String loadData; + @BeforeClass + public static void setupTestStaticConfiguration () throws Exception { + AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); + } + @Before public void setup() throws Exception { - policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP); - policyFile.setUserGroupMapping(StaticUserGroup.getStaticMapping()); - writePolicyFile(policyFile); - + policyFile = super.setupPolicy(); + super.setup(); } // test load data into table @@ -57,24 +60,23 @@ public void testLoadPrivileges() throws Exception { Connection userConn = null; Statement userStmt = null; - policyFile - .addRolesToGroup(USERGROUP1, "db1_read", "db1_write", "data_read") - .addRolesToGroup(USERGROUP2, "db1_write") - .addPermissionsToRole("db1_write", "server=server1->db=" + DB1 + "->table=" + tabName + "->action=INSERT") - .addPermissionsToRole("db1_read", "server=server1->db=" + DB1 + "->table=" + tabName + "->action=SELECT") - .addPermissionsToRole("data_read", loadData); - writePolicyFile(policyFile); - // create dbs Connection adminCon = context.createConnection(ADMIN1); Statement adminStmt = context.createStatement(adminCon); adminStmt.execute("use default"); - adminStmt.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE"); adminStmt.execute("CREATE DATABASE " + DB1); adminStmt.execute("use " + DB1); adminStmt.execute("CREATE TABLE " + tabName + "(id int)"); context.close(); + policyFile + .addRolesToGroup(USERGROUP1, "db1_read", "db1_write", "data_read") + .addRolesToGroup(USERGROUP2, "db1_write") + .addPermissionsToRole("db1_write", "server=server1->db=" + DB1 + "->table=" + tabName + "->action=INSERT") + .addPermissionsToRole("db1_read", "server=server1->db=" + DB1 + "->table=" + tabName + "->action=SELECT") + .addPermissionsToRole("data_read", loadData); + writePolicyFile(policyFile); + // positive test, user1 has access to file being loaded userConn = context.createConnection(USER1_1); userStmt = context.createStatement(userConn); @@ -111,7 +113,6 @@ public void testAlterPartitionLocationPrivileges() throws Exception { Connection adminCon = context.createConnection(ADMIN1); Statement adminStmt = context.createStatement(adminCon); adminStmt.execute("use default"); - adminStmt.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE"); adminStmt.execute("CREATE DATABASE " + DB1); adminStmt.execute("use " + DB1); adminStmt.execute("CREATE TABLE " + tabName + " (id int) PARTITIONED BY (dt string)"); @@ -176,24 +177,23 @@ public void testAlterTableLocationPrivileges() throws Exception { Connection userConn = null; Statement userStmt = null; - policyFile - .addRolesToGroup(USERGROUP1, "server1_all") - .addRolesToGroup(USERGROUP2, "db1_all, data_read") - .addPermissionsToRole("db1_all", "server=server1->db=" + DB1) - .addPermissionsToRole("data_read", "server=server1->URI=" + tabDir) - .addPermissionsToRole("server1_all", "server=server1"); - writePolicyFile(policyFile); - // create dbs Connection adminCon = context.createConnection(ADMIN1); Statement adminStmt = context.createStatement(adminCon); adminStmt.execute("use default"); - adminStmt.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE"); adminStmt.execute("CREATE DATABASE " + DB1); adminStmt.execute("use " + DB1); adminStmt.execute("CREATE TABLE " + tabName + " (id int) PARTITIONED BY (dt string)"); adminCon.close(); + policyFile + .addRolesToGroup(USERGROUP1, "server1_all") + .addRolesToGroup(USERGROUP2, "db1_all, data_read") + .addPermissionsToRole("db1_all", "server=server1->db=" + DB1) + .addPermissionsToRole("data_read", "server=server1->URI=" + tabDir) + .addPermissionsToRole("server1_all", "server=server1"); + writePolicyFile(policyFile); + // positive test: user2 has privilege to alter table set partition userConn = context.createConnection(USER2_1); userStmt = context.createStatement(userConn); @@ -223,22 +223,21 @@ public void testExternalTablePrivileges() throws Exception { baseDir.setWritable(true, false); dataDir.setWritable(true, false); - policyFile - .addRolesToGroup(USERGROUP1, "db1_all", "data_read") - .addRolesToGroup(USERGROUP2, "db1_all") - .addPermissionsToRole("db1_all", "server=server1->db=" + DB1) - .addPermissionsToRole("data_read", "server=server1->URI=" + dataDirPath); - writePolicyFile(policyFile); - // create dbs Connection adminCon = context.createConnection(ADMIN1); Statement adminStmt = context.createStatement(adminCon); adminStmt.execute("use default"); - adminStmt.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE"); adminStmt.execute("CREATE DATABASE " + DB1); adminStmt.close(); adminCon.close(); + policyFile + .addRolesToGroup(USERGROUP1, "db1_all", "data_read") + .addRolesToGroup(USERGROUP2, "db1_all") + .addPermissionsToRole("db1_all", "server=server1->db=" + DB1) + .addPermissionsToRole("data_read", "server=server1->URI=" + dataDirPath); + writePolicyFile(policyFile); + // negative test: user2 doesn't have privilege to create external table in given path userConn = context.createConnection(USER2_1); userStmt = context.createStatement(userConn); diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestUserManagement.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestUserManagement.java index fa34c339c..02ac51454 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestUserManagement.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestUserManagement.java @@ -17,10 +17,9 @@ package org.apache.sentry.tests.e2e.hive; -import org.apache.sentry.provider.file.PolicyFile; -import org.junit.After; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.File; import java.io.FileOutputStream; @@ -29,12 +28,16 @@ import java.sql.Statement; import org.apache.hadoop.mapreduce.JobContext; +import org.apache.hive.service.cli.HiveSQLException; +import org.apache.sentry.provider.file.PolicyFile; +import org.junit.After; import org.junit.Before; import org.junit.Test; import com.google.common.io.Resources; public class TestUserManagement extends AbstractTestWithStaticConfiguration { + private static final String SINGLE_TYPE_DATA_FILE_NAME = "kv1.dat"; private static final String dbName = "db1"; private static final String tableName = "t1"; @@ -42,8 +45,11 @@ public class TestUserManagement extends AbstractTestWithStaticConfiguration { private File dataFile; private PolicyFile policyFile; + @Override @Before - public void setUp() throws Exception { + public void setup() throws Exception { + policyFile = super.setupPolicy(); + super.setup(); dataFile = new File(dataDir, SINGLE_TYPE_DATA_FILE_NAME); FileOutputStream to = new FileOutputStream(dataFile); Resources.copy(Resources.getResource(SINGLE_TYPE_DATA_FILE_NAME), to); @@ -51,7 +57,7 @@ public void setUp() throws Exception { } @Override @After - public void clearDB() throws Exception { + public void clearAfterPerTest() throws Exception { if (context != null) { context.close(); } @@ -340,6 +346,45 @@ public void testGroup8() throws Exception { } } + /** + * Tests that users without group information will cause the configuration exception + **/ + @Test + public void testGroup9() throws Exception { + policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP); + policyFile.addGroupsToUser("admin1", ADMINGROUP); + writePolicyFile(policyFile); + + Connection connection = context.createConnection("admin1"); + Statement statement = connection.createStatement(); + statement.execute("DROP DATABASE IF EXISTS db1 CASCADE"); + statement.execute("CREATE DATABASE db1"); + statement.execute("USE db1"); + statement.execute("CREATE TABLE t1 (under_col int)"); + statement.close(); + connection.close(); + + // user1 hasn't any group + connection = context.createConnection("user1"); + statement = context.createStatement(connection); + // for any sql need to be authorized, exception will be thrown if the uer hasn't any group + // information + try { + statement.execute("CREATE TABLE db1.t1 (under_col int, value string)"); + fail("User without group configuration, SentryGroupNotFoundException should be thrown "); + } catch (HiveSQLException hse) { + assertTrue(hse.getMessage().indexOf("SentryGroupNotFoundException") >= 0); + } + try { + statement.execute("SELECT under_col from db1.t1"); + fail("User without group configuration, SentryGroupNotFoundException should be thrown "); + } catch (HiveSQLException hse) { + assertTrue(hse.getMessage().indexOf("SentryGroupNotFoundException") >= 0); + } + statement.close(); + connection.close(); + } + @Test public void testMrAclsSetting() throws Exception { Connection connection = context.createConnection("admin1"); diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestViewPrivileges.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestViewPrivileges.java index c8a054fdf..79e9548f4 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestViewPrivileges.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestViewPrivileges.java @@ -28,12 +28,13 @@ import java.sql.Statement; import java.util.Map; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.sentry.provider.file.PolicyFile; -import org.junit.After; +import org.junit.AfterClass; import org.junit.Assume; import org.junit.Before; +import org.junit.BeforeClass; import org.junit.Test; import com.google.common.collect.Maps; @@ -42,25 +43,29 @@ public class TestViewPrivileges extends AbstractTestWithHiveServer { protected static final String SERVER_HOST = "localhost"; - private Context context; - private Map properties; + private static Context context; + private static Map properties; private PolicyFile policyFile; private final String SINGLE_TYPE_DATA_FILE_NAME = "kv1.dat"; - @Before - public void setUp() throws Exception { + @BeforeClass + public static void setUp() throws Exception { properties = Maps.newHashMap(); - policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP); context = createContext(properties); } - @After - public void tearDown() throws Exception { + @AfterClass + public static void tearDown() throws Exception { if(context != null) { context.close(); } } + + @Before + public void setupPolicyFile() throws Exception { + policyFile = PolicyFile.setAdminOnServer1(ADMINGROUP); + } @Test public void testPartitioned() throws Exception { @@ -91,13 +96,13 @@ public void testPartitioned() throws Exception { stmt.execute("load data local inpath '" + dataFile + "' into table " + tabName + " PARTITION (part=\"a\")"); stmt.execute("load data local inpath '" + dataFile + "' into table " + tabName + " PARTITION (part=\"b\")"); ResultSet res = stmt.executeQuery("select count(*) from " + tabName); - org.junit.Assert.assertThat(res, notNullValue()); + Assert.assertThat(res, notNullValue()); while(res.next()) { - Assume.assumeTrue(res.getInt(1) == new Integer(1000)); + Assume.assumeTrue(res.getInt(1) == Integer.valueOf(1000)); } stmt.execute("create view " + viewName + " as select * from " + tabName + " where id<100"); res = stmt.executeQuery("select count(*) from " + viewName); - org.junit.Assert.assertThat(res, notNullValue()); + Assert.assertThat(res, notNullValue()); int rowsInView = 0; while(res.next()) { rowsInView = res.getInt(1); @@ -109,9 +114,9 @@ public void testPartitioned() throws Exception { Statement userStmt = context.createStatement(userConn); userStmt.execute("use " + db); res = userStmt.executeQuery("select count(*) from " + viewName); - org.junit.Assert.assertThat(res, notNullValue()); + Assert.assertThat(res, notNullValue()); while(res.next()) { - org.junit.Assert.assertThat(res.getInt(1), is(rowsInView)); + Assert.assertThat(res.getInt(1), is(rowsInView)); } userStmt.close(); userConn.close(); diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/AbstractDFS.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/AbstractDFS.java index 8b1345d80..32aabb41a 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/AbstractDFS.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/AbstractDFS.java @@ -16,7 +16,7 @@ */ package org.apache.sentry.tests.e2e.hive.fs; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.slf4j.Logger; diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/DFS.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/DFS.java index 872a0849f..67ba3386d 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/DFS.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/DFS.java @@ -23,10 +23,10 @@ import java.io.IOException; public interface DFS { - public FileSystem getFileSystem(); - public void tearDown() throws Exception; - public Path assertCreateDir(String dir) throws Exception; - public Path getBaseDir(); - public void createBaseDir() throws Exception; - public void writePolicyFile(File srcFile) throws IOException; + FileSystem getFileSystem(); + void tearDown() throws Exception; + Path assertCreateDir(String dir) throws Exception; + Path getBaseDir(); + void createBaseDir() throws Exception; + void writePolicyFile(File srcFile) throws IOException; } diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/DFSFactory.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/DFSFactory.java index 3f03ef000..e1881b4bf 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/DFSFactory.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/DFSFactory.java @@ -18,8 +18,6 @@ import java.io.File; -import org.apache.sentry.tests.e2e.hive.hiveserver.HiveServerFactory.HiveServer2Type; - import com.google.common.annotations.VisibleForTesting; public class DFSFactory { diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/MiniDFS.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/MiniDFS.java index 3ba14f112..77af43296 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/MiniDFS.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/MiniDFS.java @@ -20,7 +20,7 @@ import java.io.IOException; import java.util.List; -import junit.framework.Assert; +import org.junit.Assert; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/HiveServer.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/HiveServer.java index ee6155bdc..175e84cea 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/HiveServer.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/HiveServer.java @@ -21,14 +21,14 @@ public interface HiveServer { - public void start() throws Exception; + void start() throws Exception; - public void shutdown() throws Exception; + void shutdown() throws Exception; - public String getURL(); + String getURL(); - public String getProperty(String key); + String getProperty(String key); - public Connection createConnection(String user, String password) throws Exception; + Connection createConnection(String user, String password) throws Exception; } diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/HiveServerFactory.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/HiveServerFactory.java index 101436137..847da45dc 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/HiveServerFactory.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/HiveServerFactory.java @@ -22,6 +22,7 @@ import java.io.OutputStream; import java.net.ServerSocket; import java.net.URL; +import java.nio.file.FileSystems; import java.util.Map; import org.apache.hadoop.fs.FileSystem; @@ -39,7 +40,6 @@ import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; -import com.google.common.io.Files; import com.google.common.io.Resources; public class HiveServerFactory { @@ -112,7 +112,7 @@ public static HiveServer create(HiveServer2Type type, fileSystem.mkdirs(new Path("/", "warehouse"), new FsPermission((short) 0777)); } } - Boolean policyOnHDFS = new Boolean(System.getProperty("sentry.e2etest.policyonhdfs", "false")); + Boolean policyOnHDFS = Boolean.valueOf(System.getProperty("sentry.e2etest.policyonhdfs", "false")); if (policyOnHDFS) { // Initialize "hive.exec.scratchdir", according the description of // "hive.exec.scratchdir", the permission should be (733). @@ -151,23 +151,28 @@ public static HiveServer create(HiveServer2Type type, properties.put(SUPPORT_CONCURRENCY, "false"); } if(!properties.containsKey(HADOOPBIN)) { - properties.put(HADOOPBIN, "./target/hadoop"); + properties.put(HADOOPBIN, "./target/test-classes/hadoop"); } + + // Modify the test resource to have executable permission + java.nio.file.Path hadoopPath = FileSystems.getDefault().getPath("target/test-classes", "hadoop"); + if (hadoopPath != null) { + hadoopPath.toFile().setExecutable(true); + } + properties.put(METASTORE_RAW_STORE_IMPL, "org.apache.sentry.binding.metastore.AuthorizingObjectStore"); - if (!properties.containsKey(METASTORE_URI)) { - if (HiveServer2Type.InternalMetastore.equals(type)) { - // The configuration sentry.metastore.service.users is for the user who - // has all access to get the metadata. - properties.put(METASTORE_BYPASS, "accessAllMetaUser"); - properties.put(METASTORE_URI, - "thrift://localhost:" + String.valueOf(findPort())); - if (!properties.containsKey(METASTORE_HOOK)) { - properties.put(METASTORE_HOOK, - "org.apache.sentry.binding.metastore.MetastoreAuthzBinding"); - } - properties.put(ConfVars.METASTORESERVERMINTHREADS.varname, "5"); + if (!properties.containsKey(METASTORE_URI) && HiveServer2Type.InternalMetastore.equals(type)) { + // The configuration sentry.metastore.service.users is for the user who + // has all access to get the metadata. + properties.put(METASTORE_BYPASS, "accessAllMetaUser"); + properties.put(METASTORE_URI, + "thrift://localhost:" + String.valueOf(findPort())); + if (!properties.containsKey(METASTORE_HOOK)) { + properties.put(METASTORE_HOOK, + "org.apache.sentry.binding.metastore.MetastoreAuthzBinding"); } + properties.put(ConfVars.METASTORESERVERMINTHREADS.varname, "5"); } // set the SentryMetaStoreFilterHook for HiveServer2 only, not for metastore @@ -188,7 +193,6 @@ public static HiveServer create(HiveServer2Type type, properties.put(METASTORE_CLIENT_TIMEOUT, "100"); properties.put(ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS.varname, "true"); - properties.put(ConfVars.HIVE_SERVER2_BUILTIN_UDF_BLACKLIST.varname, "reflect,reflect2,java_method"); properties.put(ConfVars.HIVESTATSAUTOGATHER.varname, "false"); properties.put(ConfVars.HIVE_STATS_COLLECT_SCANCOLS.varname, "true"); String hadoopBinPath = properties.get(HADOOPBIN); diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/InternalHiveServer.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/InternalHiveServer.java index 0e53d3d1d..45f0ef28b 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/InternalHiveServer.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/InternalHiveServer.java @@ -18,9 +18,7 @@ package org.apache.sentry.tests.e2e.hive.hiveserver; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.HiveMetaStore; import org.apache.hive.service.server.HiveServer2; -import org.fest.reflect.core.Reflection; public class InternalHiveServer extends AbstractHiveServer { diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/InternalMetastoreServer.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/InternalMetastoreServer.java index 4f739540e..bf4379813 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/InternalMetastoreServer.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/InternalMetastoreServer.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStore; import org.apache.hadoop.hive.shims.ShimLoader; -import org.fest.reflect.core.Reflection; public class InternalMetastoreServer extends AbstractHiveServer { private final HiveConf conf; diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/UnmanagedHiveServer.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/UnmanagedHiveServer.java index e8b3a2a34..beae8e83b 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/UnmanagedHiveServer.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/UnmanagedHiveServer.java @@ -60,7 +60,7 @@ private String getSystemAndConfigProperties(String hiveVar, String defaultVal){ }else { val = System.getProperty(hiveVar, defaultVal); } - Preconditions.checkNotNull(val, "Required system property missing: Provide it using -D"+ hiveVar); + Preconditions.checkNotNull(val, "Required system property is missed: Provide it using -D"+ hiveVar); LOGGER.info("Using from system property" + hiveVar + " = " + val ); }else { LOGGER.info("Using from hive-site.xml" + hiveVar + " = " + val ); diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/AbstractMetastoreTestWithStaticConfiguration.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/AbstractMetastoreTestWithStaticConfiguration.java index 23027d1b6..f1e6d75b2 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/AbstractMetastoreTestWithStaticConfiguration.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/AbstractMetastoreTestWithStaticConfiguration.java @@ -43,27 +43,19 @@ import org.apache.sentry.provider.file.PolicyFile; import org.apache.sentry.tests.e2e.hive.AbstractTestWithStaticConfiguration; import org.apache.sentry.tests.e2e.hive.hiveserver.HiveServerFactory.HiveServer2Type; -import org.junit.After; import org.junit.BeforeClass; -import com.google.common.collect.Maps; - public abstract class AbstractMetastoreTestWithStaticConfiguration extends AbstractTestWithStaticConfiguration { @BeforeClass public static void setupTestStaticConfiguration() throws Exception { useSentryService = true; + clearDbPerTest = false; testServerType = HiveServer2Type.InternalMetastore.name(); AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); } - @Override - @After - public void clearDB() throws Exception { - - } - protected static void writePolicyFile(PolicyFile policyFile) throws Exception { policyFile.write(context.getPolicyFile()); } @@ -112,7 +104,7 @@ public Table createMetastoreTableWithPartition(HiveMetaStoreClient client, public void addPartition(HiveMetaStoreClient client, String dbName, String tblName, List ptnVals, Table tbl) throws Exception { Partition part = makeMetastorePartitionObject(dbName, tblName, ptnVals, tbl); - Partition retp = client.add_partition(part); + client.add_partition(part); } public void addPartitionWithLocation(HiveMetaStoreClient client, diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/SentryPolicyProviderForDb.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/SentryPolicyProviderForDb.java index d0994b667..2507f83ad 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/SentryPolicyProviderForDb.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/SentryPolicyProviderForDb.java @@ -16,11 +16,9 @@ */ package org.apache.sentry.tests.e2e.metastore; -import static org.apache.sentry.provider.common.ProviderConstants.AUTHORIZABLE_SPLITTER; -import static org.apache.sentry.provider.common.ProviderConstants.PRIVILEGE_PREFIX; -import static org.apache.sentry.provider.common.ProviderConstants.ROLE_SPLITTER; -import static org.apache.sentry.tests.e2e.hive.StaticUserGroup.ADMIN1; -import static org.apache.sentry.tests.e2e.hive.StaticUserGroup.ADMINGROUP; +import static org.apache.sentry.policy.common.PolicyConstants.AUTHORIZABLE_SPLITTER; +import static org.apache.sentry.policy.common.PolicyConstants.PRIVILEGE_PREFIX; +import static org.apache.sentry.policy.common.PolicyConstants.ROLE_SPLITTER; import java.io.File; import java.io.IOException; @@ -37,6 +35,7 @@ import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient; import org.apache.sentry.provider.db.service.thrift.TSentryRole; import org.apache.sentry.provider.file.PolicyFile; +import org.apache.sentry.tests.e2e.hive.StaticUserGroup; import org.apache.tools.ant.util.StringUtils; import org.mortbay.log.Log; @@ -44,7 +43,7 @@ public class SentryPolicyProviderForDb extends PolicyFile { protected static final Set ADMIN_GROUP_SET = Sets - .newHashSet(ADMINGROUP); + .newHashSet(StaticUserGroup.ADMINGROUP); private SentryPolicyServiceClient sentryClient; protected SentryPolicyServiceClient getSentryClient() { @@ -72,14 +71,14 @@ public void write(File file) throws Exception { } // remove existing metadata - for (TSentryRole tRole : sentryClient.listRoles(ADMIN1)) { - sentryClient.dropRole(ADMIN1, tRole.getRoleName()); + for (TSentryRole tRole : sentryClient.listRoles(StaticUserGroup.ADMIN1)) { + sentryClient.dropRole(StaticUserGroup.ADMIN1, tRole.getRoleName()); } // create roles and add privileges for (Entry> roleEntry : rolesToPermissions .asMap().entrySet()) { - sentryClient.createRole(ADMIN1, roleEntry.getKey()); + sentryClient.createRole(StaticUserGroup.ADMIN1, roleEntry.getKey()); for (String privilege : roleEntry.getValue()) { addPrivilege(roleEntry.getKey(), privilege); } @@ -92,7 +91,7 @@ public void write(File file) throws Exception { for (String roleName : roleNames.split(",")) { try { sentryClient - .grantRoleToGroup(ADMIN1, groupEntry.getKey(), roleName); + .grantRoleToGroup(StaticUserGroup.ADMIN1, groupEntry.getKey(), roleName); } catch (SentryUserException e) { Log.warn("Error granting role " + roleName + " to group " + groupEntry.getKey()); @@ -141,18 +140,18 @@ private void addPrivilege(String roleName, String privileges) } if (columnName != null) { - sentryClient.grantColumnPrivilege(ADMIN1, roleName, serverName, dbName, + sentryClient.grantColumnPrivilege(StaticUserGroup.ADMIN1, roleName, serverName, dbName, tableName, columnName, action); } else if (tableName != null) { - sentryClient.grantTablePrivilege(ADMIN1, roleName, serverName, dbName, + sentryClient.grantTablePrivilege(StaticUserGroup.ADMIN1, roleName, serverName, dbName, tableName, action); } else if (dbName != null) { - sentryClient.grantDatabasePrivilege(ADMIN1, roleName, serverName, + sentryClient.grantDatabasePrivilege(StaticUserGroup.ADMIN1, roleName, serverName, dbName, action); } else if (uriPath != null) { - sentryClient.grantURIPrivilege(ADMIN1, roleName, serverName, uriPath); + sentryClient.grantURIPrivilege(StaticUserGroup.ADMIN1, roleName, serverName, uriPath); } else if (serverName != null) { - sentryClient.grantServerPrivilege(ADMIN1, roleName, serverName, action); + sentryClient.grantServerPrivilege(StaticUserGroup.ADMIN1, roleName, serverName, action); } } @@ -161,4 +160,4 @@ private void addPrivilege(String roleName, String privileges) private boolean usingSentryService() { return sentryClient != null; } -} \ No newline at end of file +} diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestAuthorizingObjectStore.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestAuthorizingObjectStore.java index 30041c582..3c28fd088 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestAuthorizingObjectStore.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestAuthorizingObjectStore.java @@ -33,6 +33,7 @@ import org.apache.sentry.tests.e2e.hive.StaticUserGroup; import org.apache.thrift.TException; import org.junit.Before; +import org.junit.BeforeClass; import org.junit.Test; import com.google.common.collect.Lists; @@ -53,42 +54,21 @@ public class TestAuthorizingObjectStore extends // this user is configured for sentry.metastore.service.users, // for this test, the value is set when creating the HiveServer. private static final String userWithoutAccess = "accessAllMetaUser"; - private boolean isSetup = false; + @BeforeClass + public static void setupTestStaticConfiguration () throws Exception { + AbstractMetastoreTestWithStaticConfiguration.setupTestStaticConfiguration(); + } + + @Override @Before public void setup() throws Exception { - if (isSetup) { - return; - } - isSetup = true; policyFile = setAdminOnServer1(ADMINGROUP); - policyFile - .addRolesToGroup(USERGROUP1, all_role) - .addRolesToGroup(USERGROUP2, db1_t1_role) - .addPermissionsToRole(all_role, "server=server1->db=" + dbName1) - .addPermissionsToRole(all_role, "server=server1->db=" + dbName2) - .addPermissionsToRole( - all_role, - "server=server1->db=" + dbName1 + "->table=" + tabName1 - + "->action=SELECT") - .addPermissionsToRole( - all_role, - "server=server1->db=" + dbName1 + "->table=" + tabName2 - + "->action=SELECT") - .addPermissionsToRole( - all_role, - "server=server1->db=" + dbName2 + "->table=" + tabName3 - + "->action=SELECT") - .addPermissionsToRole( - all_role, - "server=server1->db=" + dbName2 + "->table=" + tabName4 - + "->action=SELECT") - .addPermissionsToRole( - db1_t1_role, - "server=server1->db=" + dbName1 + "->table=" + tabName1 - + "->action=SELECT") - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + // add user ACCESSAllMETAUSER for the test case testPrivilegesForUserNameCaseSensitive + policyFile.addGroupsToUser(userWithoutAccess.toUpperCase(), "tempGroup").setUserGroupMapping( + StaticUserGroup.getStaticMapping()); writePolicyFile(policyFile); + super.setup(); HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1); client.dropDatabase(dbName1, true, true, true); @@ -117,6 +97,24 @@ public void setup() throws Exception { addPartition(client, dbName2, tabName4, Lists.newArrayList(partitionVal), tbl4); client.close(); + + policyFile + .addRolesToGroup(USERGROUP1, all_role) + .addRolesToGroup(USERGROUP2, db1_t1_role) + .addPermissionsToRole(all_role, "server=server1->db=" + dbName1) + .addPermissionsToRole(all_role, "server=server1->db=" + dbName2) + .addPermissionsToRole(all_role, + "server=server1->db=" + dbName1 + "->table=" + tabName1 + "->action=SELECT") + .addPermissionsToRole(all_role, + "server=server1->db=" + dbName1 + "->table=" + tabName2 + "->action=SELECT") + .addPermissionsToRole(all_role, + "server=server1->db=" + dbName2 + "->table=" + tabName3 + "->action=SELECT") + .addPermissionsToRole(all_role, + "server=server1->db=" + dbName2 + "->table=" + tabName4 + "->action=SELECT") + .addPermissionsToRole(db1_t1_role, + "server=server1->db=" + dbName1 + "->table=" + tabName1 + "->action=SELECT") + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); } /** diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestMetaStoreWithPigHCat.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestMetaStoreWithPigHCat.java index 9aa140c30..f406fd7fc 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestMetaStoreWithPigHCat.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestMetaStoreWithPigHCat.java @@ -19,11 +19,9 @@ package org.apache.sentry.tests.e2e.metastore; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; import java.io.File; import java.io.FileOutputStream; -import java.io.IOException; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hive.hcatalog.pig.HCatStorer; @@ -43,8 +41,9 @@ public class TestMetaStoreWithPigHCat extends private PolicyFile policyFile; private File dataFile; private static final String dbName = "db_1"; + private static final String tabName1 = "tab1"; + private static final String tabName2 = "tab2"; private static final String db_all_role = "all_db1"; - private static final String uri_role = "uri_role"; @BeforeClass public static void beforeClass() { @@ -64,7 +63,7 @@ public void setup() throws Exception { .addRolesToGroup(USERGROUP2, "read_db_role") .addPermissionsToRole(db_all_role, "server=server1->db=" + dbName) .addPermissionsToRole("read_db_role", - "server=server1->db=" + dbName + "->table=*->action=SELECT") + "server=server1->db=" + dbName + "->table=" + tabName2 + "->action=SELECT") .setUserGroupMapping(StaticUserGroup.getStaticMapping()); writePolicyFile(policyFile); @@ -82,34 +81,33 @@ public void setup() throws Exception { @Ignore @Test public void testPartionLoad() throws Exception { - String tabName = "tab1"; - execHiveSQL("CREATE TABLE " + dbName + "." + tabName - + " (id int) PARTITIONED BY (part_col STRING)", USER1_1); + execHiveSQL("CREATE TABLE " + dbName + "." + tabName1 + + " (id int) PARTITIONED BY (part_col STRING)", ADMIN1); + execHiveSQL("CREATE TABLE " + dbName + "." + tabName2 + + " (id int) PARTITIONED BY (part_col STRING)", ADMIN1); // user with ALL on DB should be able to add partion using Pig/HCatStore PigServer pigServer = context.getPigServer(USER1_1, ExecType.LOCAL); execPigLatin(USER1_1, pigServer, "A = load '" + dataFile.getPath() + "' as (id:int);"); - execPigLatin(USER1_1, pigServer, "store A into '" + dbName + "." + tabName + execPigLatin(USER1_1, pigServer, "store A into '" + dbName + "." + tabName1 + "' using " + HCatStorer.class.getName() + " ('part_col=part1');"); HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1); - assertEquals(1, client.listPartitionNames(dbName, tabName, (short) 10) + assertEquals(1, client.listPartitionNames(dbName, tabName1, (short) 10) .size()); - client.close(); - // user without ALL on DB should NOT be able to add partition with - // Pig/HCatStore + // user without select on DB should NOT be able to add partition with Pig/HCatStore pigServer = context.getPigServer(USER2_1, ExecType.LOCAL); execPigLatin(USER2_1, pigServer, "A = load '" + dataFile.getPath() + "' as (id:int);"); - try { - execPigLatin(USER2_1, pigServer, "store A into '" + dbName + "." + tabName + "' using " - + HCatStorer.class.getName() + " ('part_col=part2');"); - fail("USER2_1 has no access to the metadata, exception will be thrown."); - } catch (IOException e) { - // ignore the exception - } - + // This action won't be successful because of no permission, but there is no exception will + // be thrown in this thread. The detail exception can be found in + // sentry-tests/sentry-tests-hive/target/surefire-reports/org.apache.sentry.tests.e2e.metastore.TestMetaStoreWithPigHCat-output.txt. + execPigLatin(USER2_1, pigServer, "store A into '" + dbName + "." + tabName2 + "' using " + + HCatStorer.class.getName() + " ('part_col=part2');"); + // The previous action is failed, and there will be no data. + assertEquals(0, client.listPartitionNames(dbName, tabName2, (short) 10).size()); + client.close(); } } diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestMetastoreEndToEnd.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestMetastoreEndToEnd.java index 09433fd10..98ec814a6 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestMetastoreEndToEnd.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestMetastoreEndToEnd.java @@ -23,12 +23,12 @@ import java.io.File; import java.io.FileOutputStream; +import java.io.IOException; import java.util.ArrayList; import java.util.Map; -import junit.framework.Assert; +import org.junit.Assert; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -41,6 +41,7 @@ import org.apache.sentry.tests.e2e.hive.hiveserver.HiveServerFactory; import org.junit.After; import org.junit.Before; +import org.junit.BeforeClass; import org.junit.Test; import com.google.common.collect.Lists; @@ -63,29 +64,19 @@ public class TestMetastoreEndToEnd extends private static final String tabName2 = "tab2"; private static final String tabName3 = "tab3"; + @BeforeClass + public static void setupTestStaticConfiguration() throws Exception { + setMetastoreListener = false; + AbstractMetastoreTestWithStaticConfiguration.setupTestStaticConfiguration(); + } + + @Override @Before public void setup() throws Exception { - policyFile = setAdminOnServer1(ADMINGROUP); // PolicyFile.setAdminOnServer1(ADMINGROUP); - policyFile - .addRolesToGroup(USERGROUP1, db_all_role) - .addRolesToGroup(USERGROUP2, "read_db_role") - .addRolesToGroup(USERGROUP2, tab1_all_role) - .addRolesToGroup(USERGROUP2, tab2_all_role) - .addRolesToGroup(USERGROUP3, tab1_read_role) - .addRolesToGroup(USERGROUP3, tab2_read_role) - .addPermissionsToRole(db_all_role, "server=server1->db=" + dbName) - .addPermissionsToRole("read_db_role", - "server=server1->db=" + dbName + "->action=SELECT") - .addPermissionsToRole(tab1_all_role, - "server=server1->db=" + dbName + "->table=" + tabName1) - .addPermissionsToRole(tab2_all_role, - "server=server1->db=" + dbName + "->table=" + tabName2) - .addPermissionsToRole(tab1_read_role, - "server=server1->db=" + dbName + "->table=" + tabName1 + "->action=SELECT") - .addPermissionsToRole(tab2_read_role, - "server=server1->db=" + dbName + "->table=" + tabName2 + "->action=SELECT") - .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + policyFile = setAdminOnServer1(ADMINGROUP); + policyFile.setUserGroupMapping(StaticUserGroup.getStaticMapping()); writePolicyFile(policyFile); + super.setup(); dataFile = new File(dataDir, SINGLE_TYPE_DATA_FILE_NAME); FileOutputStream to = new FileOutputStream(dataFile); @@ -97,6 +88,26 @@ public void setup() throws Exception { createMetastoreDB(client, dbName); client.close(); + policyFile + .addRolesToGroup(USERGROUP1, db_all_role) + .addRolesToGroup(USERGROUP2, "read_db_role") + .addRolesToGroup(USERGROUP2, tab1_all_role) + .addRolesToGroup(USERGROUP2, tab2_all_role) + .addRolesToGroup(USERGROUP3, tab1_read_role) + .addRolesToGroup(USERGROUP3, tab2_read_role) + .addPermissionsToRole(db_all_role, "server=server1->db=" + dbName) + .addPermissionsToRole("read_db_role", + "server=server1->db=" + dbName + "->action=SELECT") + .addPermissionsToRole(tab1_all_role, + "server=server1->db=" + dbName + "->table=" + tabName1) + .addPermissionsToRole(tab2_all_role, + "server=server1->db=" + dbName + "->table=" + tabName2) + .addPermissionsToRole(tab1_read_role, + "server=server1->db=" + dbName + "->table=" + tabName1 + "->action=SELECT") + .addPermissionsToRole(tab2_read_role, + "server=server1->db=" + dbName + "->table=" + tabName2 + "->action=SELECT") + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); } @After @@ -505,7 +516,7 @@ public void testAlterSetLocationPrivileges() throws Exception { */ @Test public void testPartionInsert() throws Exception { - String partVal1 = "part1", partVal2 = "part2", partVal3 = "part5"; + String partVal1 = "part1", partVal2 = "part2"; policyFile.addRolesToGroup(USERGROUP1, uri_role).addPermissionsToRole( uri_role, "server=server1->uri=file://" + dataFile.getPath()); @@ -543,6 +554,60 @@ public void testPartionInsert() throws Exception { + dbName + "." + tabName1, USER1_1, dynamicInsertProperties); } + @Test + public void testAddPartion() throws Exception { + String partVal1 = "part1", partVal2 = "part2"; + String newPath1 = "fooTab1"; + String tabDir1 = hiveServer.getProperty(HiveServerFactory.WAREHOUSE_DIR) + + File.separator + newPath1; + + //URI privilege required when "using location" + policyFile.addRolesToGroup(USERGROUP1, uri_role).addPermissionsToRole( + uri_role, "server=server1->URI=" + tabDir1); + writePolicyFile(policyFile); + + execHiveSQL("DROP TABLE IF EXISTS " + dbName + "." + tabName1, USER1_1); + execHiveSQL("CREATE TABLE " + dbName + "." + tabName1 + + " (id int) PARTITIONED BY (part_col string)", USER1_1); + + //User with all on table + execHiveSQL("ALTER TABLE " + dbName + "." + tabName1 + + " ADD PARTITION (part_col ='" + partVal1 + "')", USER1_1); + verifyPartitionExists(dbName, tabName1, partVal1); + + execHiveSQL("ALTER TABLE " + dbName + "." + tabName1 + + " ADD PARTITION (part_col ='" + partVal2 + "') location '" + + tabDir1 + "'", USER1_1); + verifyPartitionExists(dbName, tabName1, partVal2); + + try { + execHiveSQL("ALTER TABLE " + dbName + "." + tabName1 + + " ADD PARTITION (part_col ='" + partVal2 + "') location '" + + tabDir1 + "'", USER2_1); + fail("alter table should have failed due to URI privilege missed"); + } catch (IOException e) { + // Expected error + } + + } + + + @Test + public void testInsertInto() throws Exception { + String partVal1 = "part1"; + + writePolicyFile(policyFile); + + execHiveSQL("DROP TABLE IF EXISTS " + dbName + "." + tabName1, USER1_1); + execHiveSQL("CREATE TABLE " + dbName + "." + tabName1 + + " (id int) PARTITIONED BY (part_col string)", USER1_1); + + execHiveSQL("INSERT INTO " + dbName + "." + tabName1 + + " PARTITION(part_col ='" + partVal1 + "') select 1 from " + dbName + "." + tabName1, USER2_1); + verifyPartitionExists(dbName, tabName1, partVal1); + + } + private void verifyPartitionExists(String dbName, String tabName, String partVal) throws Exception { HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1); diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/minisentry/InternalSentrySrv.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/minisentry/InternalSentrySrv.java index 603aa38a6..054b19359 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/minisentry/InternalSentrySrv.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/minisentry/InternalSentrySrv.java @@ -24,7 +24,6 @@ import org.apache.curator.test.TestingServer; import org.apache.hadoop.conf.Configuration; -import org.apache.sentry.provider.db.service.thrift.SentryProcessorWrapper; import org.apache.sentry.service.thrift.SentryService; import org.apache.sentry.service.thrift.SentryServiceFactory; import org.apache.sentry.service.thrift.ServiceConstants.ClientConfig; @@ -32,7 +31,6 @@ import org.apache.thrift.protocol.TProtocol; import org.apache.thrift.server.ServerContext; import org.apache.thrift.server.TServerEventHandler; -import org.apache.thrift.transport.TSocket; import org.apache.thrift.transport.TTransport; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -193,7 +191,6 @@ public void stop(int serverNum) throws Exception { } SentryService sentryServer = sentryServers.get(serverNum); sentryServer.stop(); - sentryServer.waitForShutDown(); } @Override diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/minisentry/SentrySrv.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/minisentry/SentrySrv.java index b8cf894e5..dac11517f 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/minisentry/SentrySrv.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/minisentry/SentrySrv.java @@ -25,7 +25,7 @@ public interface SentrySrv { * Start all the sentry services * @throws Exception */ - public void startAll() throws Exception; + void startAll() throws Exception; /** * Start the given server @@ -33,13 +33,13 @@ public interface SentrySrv { * - Server number (0 to N-1) * @throws Exception */ - public void start(int serverNum) throws Exception ; + void start(int serverNum) throws Exception ; /** * Stop all the Sentry servers * @throws Exception */ - public void stopAll() throws Exception; + void stopAll() throws Exception; /** * Stop the specified Sentry server @@ -47,7 +47,7 @@ public interface SentrySrv { * - Server number (0 to N-1) * @throws Exception */ - public void stop(int serverNum) throws Exception ; + void stop(int serverNum) throws Exception ; /** * Get the underlying Sentry service object @@ -55,7 +55,7 @@ public interface SentrySrv { * - Server number (0 to N-1) * @return */ - public SentryService get(int serverNum); + SentryService get(int serverNum); /** * Get the ZK connection string @@ -63,38 +63,38 @@ public interface SentrySrv { * @throws Exception * - If HA is not enabled */ - public String getZKQuorum() throws Exception; + String getZKQuorum() throws Exception; /** * Stop all the nodes and ZK if started. The SentrySrv can't be reused once * closed. */ - public void close(); + void close(); /** * Check if the sentry server is created with HA enabled. * @return True - HA is enabled False - HA is not enabled */ - public boolean isHaEnabled(); + boolean isHaEnabled(); /** * Get the number of active clients connections across servers */ - public long getNumActiveClients(); + long getNumActiveClients(); /** * Get the number of active clients connections for the given server */ - public long getNumActiveClients(int serverNum); + long getNumActiveClients(int serverNum); /** * Get the total number of clients connected so far */ - public long getTotalClients(); + long getTotalClients(); /** * Get the total number of clients connected so far */ - public long getTotalClients(int serverNum); + long getTotalClients(int serverNum); } diff --git a/sentry-tests/sentry-tests-hive/testutil/hadoop b/sentry-tests/sentry-tests-hive/src/test/resources/hadoop similarity index 100% rename from sentry-tests/sentry-tests-hive/testutil/hadoop rename to sentry-tests/sentry-tests-hive/src/test/resources/hadoop diff --git a/sentry-tests/sentry-tests-hive/src/test/resources/testPolicyImport.ini b/sentry-tests/sentry-tests-hive/src/test/resources/testPolicyImport.ini new file mode 100644 index 000000000..15fc5bf3b --- /dev/null +++ b/sentry-tests/sentry-tests-hive/src/test/resources/testPolicyImport.ini @@ -0,0 +1,25 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +[groups] +group1=roleImport1,roleImport2 +group2=roleImport1,roleImport2,roleImport3 +group3=roleImport2,roleImport3 +[roles] +roleImport1=server=server1,server=server1->action=select->grantoption=false,server=server1->db=db2->action=insert->grantoption=true,server=server1->db=db1->table=tbl1->action=insert +roleImport2=server=server1->db=db2->action=insert->grantoption=true,server=server1->db=db1->table=tbl1->action=insert,server=server1->db=db1->table=tbl2->column=col1->action=insert,server=server1->db=db1->table=tbl3->column=col1->action=*->grantoption=true +roleImport3=server=server1->db=db1->table=tbl2->column=col1->action=insert,server=server1->db=db1->table=tbl3->column=col1->action=*->grantoption=true,server=server1->db=db1->table=tbl4->column=col1->action=all->grantoption=true,server=server1->uri=hdfs://testserver:9999/path2->action=insert diff --git a/sentry-tests/sentry-tests-hive/src/test/resources/testPolicyImportAdmin.ini b/sentry-tests/sentry-tests-hive/src/test/resources/testPolicyImportAdmin.ini new file mode 100644 index 000000000..c778d052d --- /dev/null +++ b/sentry-tests/sentry-tests-hive/src/test/resources/testPolicyImportAdmin.ini @@ -0,0 +1,22 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +[groups] +admin=adminRole + +[roles] +adminRole=server=server1 diff --git a/sentry-tests/sentry-tests-hive/src/test/resources/testPolicyImportError.ini b/sentry-tests/sentry-tests-hive/src/test/resources/testPolicyImportError.ini new file mode 100644 index 000000000..4d53f2b58 --- /dev/null +++ b/sentry-tests/sentry-tests-hive/src/test/resources/testPolicyImportError.ini @@ -0,0 +1,21 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +[groups] +group1=roleImport1 +[roles] +roleImport1=server->db=db_1 diff --git a/sentry-tests/sentry-tests-hive/src/test/scripts/scale-test/create-many-dbs-tables.sh b/sentry-tests/sentry-tests-hive/src/test/scripts/scale-test/create-many-dbs-tables.sh new file mode 100755 index 000000000..dcdddeb95 --- /dev/null +++ b/sentry-tests/sentry-tests-hive/src/test/scripts/scale-test/create-many-dbs-tables.sh @@ -0,0 +1,277 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# This script means to create many testing objects (database, tables, +# partitions and a wide table with many partitions). The way to run it: +# !/usr/bin/env bash +# export HS2="HOSTNAME" +# export REALM="REALM.NAME" +# bash /root/tests/create-many-dbs-tables.sh & +# bash /root/tests/create-many-dbs-tables.sh & + +if [[ ${HS2} == "" ]]; then + echo "error: need to export HS2=hostname" + exit 1 +fi + +if [[ ${REALM} == "" ]]; then + echo "error: need to export REALM" + exit 1 +fi + +# Define default test scale +NUM_OF_DATABASES=60 +NUM_OF_TABLES_PER_DATABASE=20 +NUM_OF_ROLES_FOR_DATABASES=60 # <= NUM_OF_DATABASES +NUM_OF_ROLES_FOR_TABLES_PER_DATABASE=5 # <= NUM_OF_TABLES_PER_DATABASE +NUM_OF_GROUPS=60 # >= NUM_OF_DATABASES + +# Number of partitions varies between max and min +MAX_NUM_OF_PARTITIONS_PER_TABLE=10 +MIN_NUM_OF_PARTITIONS_PER_TABLE=2 + +BASE_EXTERNAL_DIR="/data" +LOCAL_OUTPUT_DIR="/tmp" +BL="beeline -n hive -p hive --silent=true -u 'jdbc:hive2://${HS2}:10000/default;principal=hive/_HOST@${REALM}'" + +# Number of external partitions wide tables have +declare -a NUM_OF_WIDE_TABLE_PARTITIONS=(10 100 1000) +wLen=${#NUM_OF_WIDE_TABLE_PARTITIONS[@]} + +process_id=$$ + +while getopts "d:t:g:b:l" OPTION +do case "${OPTION}" in + b) BASE_EXTERNAL_DIR="$OPTARG";; + d) NUM_OF_DATABASES="$OPTARG";; + l) LOCAL_OUTPUT_DIR="$OPTARG";; + t) NUM_OF_TABLES_PER_DATABASE="$OPTARG";; + g) NUM_OF_GROUPS="$OPTARG";; + [?]) print >&2 "Usage: $0 [-b BASE_EXTERNAL_DIR] [-d NUM_OF_DATABASES] [-l LOCAL_OUTPUT_DIR] [-t NUM_OF_TABLES_PER_DATABASE] [-g NUM_OF_GROUPS]" + exit 1;; + esac +done + +NUM_OF_PERMISSIONS=$(( NUM_OF_ROLES_FOR_DATABASES + NUM_OF_ROLES_FOR_TABLES_PER_DATABASE * NUM_OF_DATABASES)) +AVG_NUM_OF_PARTITIONS_PER_TABLE=$((( MAX_NUM_OF_PARTITIONS_PER_TABLE + MIN_NUM_OF_PARTITIONS_PER_TABLE) / 2 )) + +echo "[${process_id}] Scale numbers:" +echo "[${process_id}] number of databases: ${NUM_OF_DATABASES}" +echo "[${process_id}] number of tables: $((NUM_OF_TABLES_PER_DATABASE * NUM_OF_DATABASES))" +echo "[${process_id}] number of wide tables: ${wLen}" +echo "[${process_id}] number of partitions per table: ${AVG_NUM_OF_PARTITIONS_PER_TABLE}" +echo "[${process_id}] number of min partitions per wide table: ${NUM_OF_WIDE_TABLE_PARTITIONS[0]}" +echo "[${process_id}] number of max partitions per wide table: ${NUM_OF_WIDE_TABLE_PARTITIONS[${wLen}-1]}" +echo "[${process_id}] number of permissions: ${NUM_OF_PERMISSIONS}" +echo "[${process_id}] number of groups: ${NUM_OF_GROUPS}" + +# Random string as prefix for test databases and tables +prefix_string=$(cat /dev/urandom | tr -dc 'a-z' | fold -w 4 | head -n 1) +prefix_string=${prefix_string}$(date +%s | cut -c1-4) + +DB_NAME=${prefix_string}_db + +function validate_ret () { + ret=$1 + if [[ ${ret} != "" && ${ret} -ne 0 ]]; then + echo "ERROR!! when running query in bulk mode" + exit $ret + fi +} + +function get_group () { + count=$1 + group_name=group_$((count % NUM_OF_GROUPS)) + echo "$group_name" +} + +# Create groups +function create_groups () { + for g in $(seq ${NUM_OF_GROUPS}); do + group_name=$(get_group $g) + getent passwd ${group_name} | grep "${group_name}" 1>&2>/dev/null + if [[ $? -ne 0 ]]; then + sudo groupadd ${group_name} + sudo useradd -g ${group_name} ${group_name} + fi + done +} + +# Convenience function to create one table with many external partitons +function create_wide_table () { + db_name=$1 + tbl_name=$2 + num_of_pars=$3 + file_name=$4 + dir_file_name=$5 + echo "-- [${process_id}] Create ${tbl_name} in ${db_name} with ${num_of_pars} external partitions; " >> ${file_name} + echo "CREATE DATABASE IF NOT EXISTS ${db_name}; " >> ${file_name} + echo "USE ${db_name};" >> ${file_name} + table_dir=${BASE_EXTERNAL_DIR}/${db_name}/${tbl_name} + echo "sudo -u hdfs hdfs dfs -rm -R -skipTrash ${table_dir} 2>/dev/null" >> ${dir_file_name} + echo "DROP TABLE IF EXISTS ${tbl_name}; " >> ${file_name} + echo "CREATE TABLE ${tbl_name} (s STRING, i INT) PARTITIONED BY (par INT);" >> ${file_name} + echo "-- create ${num_of_pars} partitions on table ${tbl_name}" >> ${file_name} + for p in $(seq ${num_of_pars}); do + dir=${table_dir}/$p + echo "sudo -u hdfs hdfs dfs -mkdir -p ${dir}" >> ${dir_file_name} + echo "ALTER TABLE ${tbl_name} ADD PARTITION (par=$p) LOCATION '${dir}';" >> ${file_name} + done +} + +# Convenience function to create wide tables with many external partitions +function create_external_par_dirs_bulk_file () { + file_name=$1 + dir_file_name=$2 + echo "-- [${process_id}] Start bulk process to create wide tables" > ${file_name} + echo "# [${process_id}] Start to create external dirs for partitions" > ${dir_file_name} + db_id=$(awk -v n="${NUM_OF_DATABASES}" 'BEGIN{srand();print int(rand()*n+1)}') + db_name=${DB_NAME}_${db_id} + for p in "${!NUM_OF_WIDE_TABLE_PARTITIONS[@]}"; do + tbl_name=${db_name}_wide_tbl_$p + create_wide_table ${db_name} ${tbl_name} ${NUM_OF_WIDE_TABLE_PARTITIONS[p]} ${file_name} ${dir_file_name} + done + chmod a+x ${file_name} + chmod a+x ${dir_file_name} +} + +# Create internal databases and their tables in one bulk file +function create_dbs_tbls_bulk_file () { + file_name=$1 + echo "-- [${process_id}] start bulk load " > ${file_name} + for d in $(seq ${NUM_OF_DATABASES}); do + db_name=${DB_NAME}_${d} + echo "drop database if exists ${db_name}; " >> ${file_name} + echo "create database ${db_name}; " >> ${file_name} + echo "use ${db_name};" >> ${file_name} + NUM_OF_COLS=$(awk -v mn="${MIN_NUM_OF_PARTITIONS_PER_TABLE}" -v mx="${MAX_NUM_OF_PARTITIONS_PER_TABLE}" 'BEGIN{srand();print int(rand()*(mx-mn)+1)}') + NUM_OF_PARS=$(awk -v mn="${MIN_NUM_OF_PARTITIONS_PER_TABLE}" -v mx="${MAX_NUM_OF_PARTITIONS_PER_TABLE}" 'BEGIN{srand();print int(rand()*(mx-mn)+1)}') + + for t in $(seq ${NUM_OF_TABLES_PER_DATABASE}); do + tbl_name=${db_name}_tbl_${t} + # create table + echo "create table ${tbl_name} (col_start INT, " >> ${file_name} + for c in $(seq ${NUM_OF_COLS}); do + echo "col_${c} STRING, " >> ${file_name} + done + echo "col_end INT) partitioned by (par_start STRING, " >> ${file_name} + # create many partitions + for p in $(seq ${NUM_OF_PARS}); do + echo "par_${p} INT, " >> ${file_name} + done + echo "par_end STRING); " >> ${file_name} + done + done + chmod a+x ${file_name} +} + +# Create database roles +function create_dbs_roles () { + db_file_name=$1 + total_db_permissions=0 + echo "-- [${process_id}] Start to create database roles" > ${db_file_name} + for d in $(seq ${NUM_OF_ROLES_FOR_DATABASES}); do + db_name=${DB_NAME}_${d} + role_name=${db_name}_db_role_${d} + group_name=$(get_group $d) + echo "create role ${role_name}; " >> ${db_file_name} + echo "grant all on database ${db_name} to role ${role_name}; " >> ${db_file_name} + echo "grant ${role_name} to group ${group_name};" >> ${db_file_name} + done + chmod a+x ${db_file_name} +} + +# Create table roles +function create_tbls_roles () { + tbl_file_name=$1 + echo "-- [${process_id}] Start to create table roles;" > ${tbl_file_name} + # create table roles + for d in $(seq ${NUM_OF_DATABASES}); do + db_name=${DB_NAME}_${d} + echo "USE ${db_name};" >> ${tbl_file_name} + for t in $(seq ${NUM_OF_ROLES_FOR_TABLES_PER_DATABASE}); do + tbl_name=${db_name}_tbl_${t} + role_name=${tbl_name}_role_${t} + echo "CREATE ROLE ${role_name};" >> ${tbl_file_name} + rand_number=$(awk 'BEGIN{srand();print int(rand()*3)}') + case "$((rand_number % 3))" in + 0) echo "grant all on table ${tbl_name} to role ${role_name}; " >> ${tbl_file_name} + ;; + 1) echo "grant insert on table ${tbl_name} to role ${role_name}; " >> ${tbl_file_name} + ;; + *) echo "grant select on table ${tbl_name} to role ${role_name}; " >> ${tbl_file_name} + ;; + esac + group_name=$(get_group $d) + echo "grant role ${role_name} to group ${group_name}; " >> ${tbl_file_name} + done + done + chmod a+x ${tbl_file_name} +} + +########################### +# Start from here! +########################### +create_groups +echo "# [${process_id}] Created ${NUM_OF_GROUPS} groups" + +# Use Hive to create the partitions because it supports bulk adding of partitions. +# Hive doesn't allow fully qualified table names in ALTER statements, so start with a +# USE . +create_tables_file_name=${LOCAL_OUTPUT_DIR}/hive_${prefix_string}_bulk_tables.q +create_dbs_tbls_bulk_file ${create_tables_file_name} +echo "# [${process_id}] Created ${create_tables_file_name} to create databases and tables in bulk mode" + +create_wide_tables_file_name=${LOCAL_OUTPUT_DIR}/hive_${prefix_string}_bulk_wide_tables.q +create_wide_tables_dir_file_name=${LOCAL_OUTPUT_DIR}/hive_${prefix_string}_bulk_wide_tables_dirs.sh +create_external_par_dirs_bulk_file ${create_wide_tables_file_name} ${create_wide_tables_dir_file_name} +echo "# [${process_id}] Created ${create_wide_tables_file_name} to create wide tables with external partitions in bulk mode" +echo "# [${process_id}] Created ${create_wide_tables_dir_file_name} to create external dirs for external partitions in bulk mode" + +create_db_role_file_name=${LOCAL_OUTPUT_DIR}/hive_${prefix_string}_bulk_db_roles.q +create_dbs_roles ${create_db_role_file_name} +echo "# [${process_id}] Created ${create_db_role_file_name} to create database roles" + +create_tbl_role_file_name=${LOCAL_OUTPUT_DIR}/hive_${prefix_string}_bulk_tbl_roles.q +create_tbls_roles ${create_tbl_role_file_name} +echo "# [${process_id}] Created ${create_tbl_role_file_name} to create table roles" + +sudo -u hive hive -S -f ${create_tables_file_name} +validate_ret $? +echo "# [${process_id}] Succeessfully ran bulk file ${create_tables_file_name} to create databases and tables" + +. ${create_wide_tables_dir_file_name} +echo "# [${process_id}] Successfully ran ${create_wide_tables_dir_file_name} to create dirs for external partitions" + +sudo -u hive hive -S -f ${create_wide_tables_file_name} +validate_ret $? +echo "# [${process_id}] Successfully ran bulk file ${create_wide_tables_file_name} to create wide tables with external partitions" + +sudo -u hive ${BL} -f ${create_db_role_file_name} 1>/dev/null # to remove white lines after execution +validate_ret $? +echo "# [${process_id}] Successfully created database level roles and privileges" + +sudo -u hive ${BL} -f ${create_tbl_role_file_name} 1>/dev/null # to remove white lines after execution +validate_ret $? +echo "# [${process_id}] Successfully created table level roles and privileges" + +res_file=${LOCAL_OUTPUT_DIR}/hive_${prefix_string}.res +echo "-- [${process_id}] List all databases and roles in ${res_file}" > ${res_file} +sudo -u hive ${BL} -e "show databases" 2>/dev/null 1>>${res_file} +sudo -u hive ${BL} -e "show roles" 2>/dev/null 1>>${res_file} +echo "[${process_id}] Successfully listed all databases and roles in ${res_file}" diff --git a/sentry-tests/sentry-tests-kafka/pom.xml b/sentry-tests/sentry-tests-kafka/pom.xml new file mode 100644 index 000000000..54c720530 --- /dev/null +++ b/sentry-tests/sentry-tests-kafka/pom.xml @@ -0,0 +1,64 @@ + + + + + sentry-tests + org.apache.sentry + 1.7.0-incubating-SNAPSHOT + + 4.0.0 + + sentry-tests-kafka + Sentry Kafka Tests + end to end tests for sentry-kafka integration + + + + junit + junit + + + log4j + log4j + + + org.apache.sentry + sentry-binding-kafka + + + org.apache.kafka + kafka_2.10 + ${kafka.version} + + + org.apache.kafka + kafka-clients + ${kafka.version} + + + com.google.guava + guava + + + org.apache.sentry + sentry-provider-db + + + \ No newline at end of file diff --git a/sentry-tests/sentry-tests-kafka/src/main/java/org/apache/sentry/tests/e2e/kafka/CustomPrincipalBuilder.java b/sentry-tests/sentry-tests-kafka/src/main/java/org/apache/sentry/tests/e2e/kafka/CustomPrincipalBuilder.java new file mode 100644 index 000000000..5531fcb3f --- /dev/null +++ b/sentry-tests/sentry-tests-kafka/src/main/java/org/apache/sentry/tests/e2e/kafka/CustomPrincipalBuilder.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.tests.e2e.kafka; + +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.network.Authenticator; +import org.apache.kafka.common.network.TransportLayer; +import org.apache.kafka.common.security.auth.PrincipalBuilder; + +import java.security.Principal; +import java.util.Map; + +public class CustomPrincipalBuilder implements PrincipalBuilder { + @Override + public void configure(Map map) { + + } + + @Override + public Principal buildPrincipal(TransportLayer transportLayer, Authenticator authenticator) throws KafkaException { + try { + return transportLayer.peerPrincipal(); + } catch (Exception e) { + throw new KafkaException("Failed to build principal due to: ", e); + } + } + + @Override + public void close() throws KafkaException { + + } +} diff --git a/sentry-tests/sentry-tests-kafka/src/main/java/org/apache/sentry/tests/e2e/kafka/EmbeddedZkServer.java b/sentry-tests/sentry-tests-kafka/src/main/java/org/apache/sentry/tests/e2e/kafka/EmbeddedZkServer.java new file mode 100644 index 000000000..442ddff62 --- /dev/null +++ b/sentry-tests/sentry-tests-kafka/src/main/java/org/apache/sentry/tests/e2e/kafka/EmbeddedZkServer.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.tests.e2e.kafka; + +import org.apache.commons.io.FileUtils; +import org.apache.zookeeper.server.NIOServerCnxnFactory; +import org.apache.zookeeper.server.ZooKeeperServer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.nio.file.Files; +import java.nio.file.Path; + +public class EmbeddedZkServer { + private static final Logger LOGGER = LoggerFactory.getLogger(EmbeddedZkServer.class); + + private Path snapshotDir = null; + private Path logDir = null; + private ZooKeeperServer zookeeper = null; + private NIOServerCnxnFactory factory = null; + + public EmbeddedZkServer(int port) throws Exception { + snapshotDir = Files.createTempDirectory("zookeeper-snapshot-"); + logDir = Files.createTempDirectory("zookeeper-log-"); + int tickTime = 500; + zookeeper = new ZooKeeperServer(snapshotDir.toFile(), logDir.toFile(), tickTime); + factory = new NIOServerCnxnFactory(); + InetSocketAddress addr = new InetSocketAddress(InetAddress.getLocalHost().getHostAddress(), port); + LOGGER.info("Starting Zookeeper at " + addr); + factory.configure(addr, 0); + factory.startup(zookeeper); + } + + public void shutdown() throws IOException { + try { + zookeeper.shutdown(); + } catch (Exception e) { + LOGGER.error("Failed to shutdown ZK server", e); + } + + try { + factory.shutdown(); + } catch (Exception e) { + LOGGER.error("Failed to shutdown Zk connection factory.", e); + } + + FileUtils.deleteDirectory(logDir.toFile()); + FileUtils.deleteDirectory(snapshotDir.toFile()); + } + + public ZooKeeperServer getZk() { + return zookeeper; + } +} diff --git a/sentry-tests/sentry-tests-kafka/src/main/java/org/apache/sentry/tests/e2e/kafka/KafkaTestServer.java b/sentry-tests/sentry-tests-kafka/src/main/java/org/apache/sentry/tests/e2e/kafka/KafkaTestServer.java new file mode 100644 index 000000000..2a3b317b0 --- /dev/null +++ b/sentry-tests/sentry-tests-kafka/src/main/java/org/apache/sentry/tests/e2e/kafka/KafkaTestServer.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.tests.e2e.kafka; + +import kafka.server.KafkaServerStartable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Properties; + +public class KafkaTestServer { + private static final Logger LOGGER = LoggerFactory.getLogger(KafkaTestServer.class); + + private int zkPort = -1; + private int kafkaPort = -1; + private EmbeddedZkServer zkServer = null; + private KafkaServerStartable kafkaServer = null; + private File sentrySitePath = null; + + public KafkaTestServer(File sentrySitePath) throws Exception { + this.sentrySitePath = sentrySitePath; + this.zkPort = TestUtils.getFreePort(); + createZkServer(); + this.kafkaPort = TestUtils.getFreePort(); + createKafkaServer(); + } + + public void start() throws Exception { + kafkaServer.startup(); + LOGGER.info("Started Kafka broker."); + } + + public void shutdown() { + if (kafkaServer != null) { + kafkaServer.shutdown(); + kafkaServer.awaitShutdown(); + LOGGER.info("Stopped Kafka server."); + } + + if (zkServer != null) { + try { + zkServer.shutdown(); + LOGGER.info("Stopped ZK server."); + } catch (IOException e) { + LOGGER.error("Failed to shutdown ZK server.", e); + } + } + } + + private Path getTempDirectory() { + Path tempDirectory = null; + try { + tempDirectory = Files.createTempDirectory("kafka-sentry-"); + } catch (IOException e) { + LOGGER.error("Failed to create temp dir for Kafka's log dir."); + throw new RuntimeException(e); + } + return tempDirectory; + } + + private void setupKafkaProps(Properties props) throws UnknownHostException { + props.put("listeners", "SSL://" + InetAddress.getLocalHost().getHostAddress() + ":" + kafkaPort); + props.put("log.dir", getTempDirectory().toAbsolutePath().toString()); + props.put("zookeeper.connect", InetAddress.getLocalHost().getHostAddress() + ":" + zkPort); + props.put("replica.socket.timeout.ms", "1500"); + props.put("controller.socket.timeout.ms", "1500"); + props.put("controlled.shutdown.enable", true); + props.put("delete.topic.enable", false); + props.put("controlled.shutdown.retry.backoff.ms", "100"); + props.put("port", kafkaPort); + props.put("authorizer.class.name", "org.apache.sentry.kafka.authorizer.SentryKafkaAuthorizer"); + props.put("sentry.kafka.site.url", "file://" + sentrySitePath.getAbsolutePath()); + props.put("allow.everyone.if.no.acl.found", "true"); + props.put("ssl.keystore.location", KafkaTestServer.class.getResource("/test.keystore.jks").getPath()); + props.put("ssl.keystore.password", "test-ks-passwd"); + props.put("ssl.key.password", "test-key-passwd"); + props.put("ssl.truststore.location", KafkaTestServer.class.getResource("/test.truststore.jks").getPath()); + props.put("ssl.truststore.password", "test-ts-passwd"); + props.put("security.inter.broker.protocol", "SSL"); + props.put("ssl.client.auth", "required"); + props.put("kafka.superusers", "User:CN=superuser;User:CN=superuser1; User:CN=Superuser2 "); + } + + private void createKafkaServer() throws UnknownHostException { + Properties props = new Properties(); + setupKafkaProps(props); + kafkaServer = KafkaServerStartable.fromProps(props); + } + + private void createZkServer() throws Exception { + try { + zkServer = new EmbeddedZkServer(zkPort); + zkPort = zkServer.getZk().getClientPort(); + } catch (Exception e) { + LOGGER.error("Failed to create testing zookeeper server."); + throw new RuntimeException(e); + } + } + + public String getBootstrapServers() throws UnknownHostException { + return InetAddress.getLocalHost().getHostAddress() + ":" + kafkaPort; + } +} diff --git a/sentry-tests/sentry-tests-kafka/src/main/java/org/apache/sentry/tests/e2e/kafka/TestUtils.java b/sentry-tests/sentry-tests-kafka/src/main/java/org/apache/sentry/tests/e2e/kafka/TestUtils.java new file mode 100644 index 000000000..5d360829c --- /dev/null +++ b/sentry-tests/sentry-tests-kafka/src/main/java/org/apache/sentry/tests/e2e/kafka/TestUtils.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.tests.e2e.kafka; + +import java.io.IOException; +import java.net.ServerSocket; + +public class TestUtils { + public static int getFreePort() throws IOException { + synchronized (TestUtils.class) { + ServerSocket serverSocket = new ServerSocket(0); + int port = serverSocket.getLocalPort(); + serverSocket.close(); + return port; + } + } +} diff --git a/sentry-tests/sentry-tests-kafka/src/test/java/org/apache/sentry/tests/e2e/kafka/AbstractKafkaSentryTestBase.java b/sentry-tests/sentry-tests-kafka/src/test/java/org/apache/sentry/tests/e2e/kafka/AbstractKafkaSentryTestBase.java new file mode 100644 index 000000000..a2cfa28da --- /dev/null +++ b/sentry-tests/sentry-tests-kafka/src/test/java/org/apache/sentry/tests/e2e/kafka/AbstractKafkaSentryTestBase.java @@ -0,0 +1,227 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.tests.e2e.kafka; + +import com.google.common.base.Joiner; +import com.google.common.collect.Sets; +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.sentry.core.model.kafka.Cluster; +import org.apache.sentry.core.model.kafka.KafkaActionConstant; +import org.apache.sentry.core.model.kafka.Host; +import org.apache.sentry.kafka.conf.KafkaAuthConf; +import org.apache.sentry.provider.db.generic.SentryGenericProviderBackend; +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceClient; +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceClientFactory; +import org.apache.sentry.provider.db.generic.service.thrift.TAuthorizable; +import org.apache.sentry.provider.db.generic.service.thrift.TSentryPrivilege; +import org.apache.sentry.provider.file.LocalGroupResourceAuthorizationProvider; +import org.apache.sentry.provider.file.PolicyFile; +import org.apache.sentry.service.thrift.SentryService; +import org.apache.sentry.service.thrift.SentryServiceFactory; +import org.apache.sentry.service.thrift.ServiceConstants.ClientConfig; +import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.File; +import java.io.FileOutputStream; +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.TimeoutException; + +import static org.junit.Assert.assertTrue; + + +/** + * This class used to test the Kafka integration with Sentry. + */ +public class AbstractKafkaSentryTestBase { + + protected static final String COMPONENT = "kafka"; + protected static final String ADMIN_USER = "kafka"; + protected static final String ADMIN_GROUP = "group_kafka"; + protected static final String ADMIN_ROLE = "role_kafka"; + + protected static SentryService sentryServer; + protected static File sentrySitePath; + + protected static File baseDir; + protected static File dbDir; + protected static File policyFilePath; + + protected static PolicyFile policyFile; + + protected static String bootstrapServers = null; + protected static KafkaTestServer kafkaServer = null; + + @BeforeClass + public static void beforeTestEndToEnd() throws Exception { + setupConf(); + startSentryServer(); + setUserGroups(); + setAdminPrivilege(); + startKafkaServer(); + } + + @AfterClass + public static void afterTestEndToEnd() throws Exception { + stopSentryServer(); + stopKafkaServer(); + } + + private static void stopKafkaServer() { + if (kafkaServer != null) { + kafkaServer.shutdown(); + kafkaServer = null; + } + } + + private static void stopSentryServer() throws Exception { + if (sentryServer != null) { + sentryServer.stop(); + sentryServer = null; + } + + FileUtils.deleteDirectory(baseDir); + } + + public static void setupConf() throws Exception { + baseDir = createTempDir(); + sentrySitePath = new File(baseDir, "sentry-site.xml"); + dbDir = new File(baseDir, "sentry_policy_db"); + policyFilePath = new File(baseDir, "local_policy_file.ini"); + policyFile = new PolicyFile(); + + /** set the configuratoion for Sentry Service */ + Configuration conf = new Configuration(); + + conf.set(ServerConfig.SECURITY_MODE, ServerConfig.SECURITY_MODE_NONE); + conf.set(ServerConfig.SENTRY_VERIFY_SCHEM_VERSION, "false"); + conf.set(ServerConfig.ADMIN_GROUPS, Joiner.on(",").join(ADMIN_GROUP, + UserGroupInformation.getLoginUser().getPrimaryGroupName())); + conf.set(ServerConfig.RPC_PORT, String.valueOf(TestUtils.getFreePort())); + conf.set(ServerConfig.RPC_ADDRESS, NetUtils.createSocketAddr( + InetAddress.getLocalHost().getHostAddress() + ":" + conf.get(ServerConfig.RPC_PORT)) + .getAddress().getCanonicalHostName()); + conf.set(ServerConfig.SENTRY_STORE_JDBC_URL, + "jdbc:derby:;databaseName=" + dbDir.getPath() + ";create=true"); + conf.set(ServerConfig.SENTRY_STORE_JDBC_PASS, "dummy"); + conf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING, + ServerConfig.SENTRY_STORE_LOCAL_GROUP_MAPPING); + conf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING_RESOURCE, + policyFilePath.getPath()); + sentryServer = new SentryServiceFactory().create(conf); + } + + public static File createTempDir() { + File baseDir = new File(System.getProperty("java.io.tmpdir")); + String baseName = "kafka-e2e-"; + File tempDir = new File(baseDir, baseName + UUID.randomUUID().toString()); + if (tempDir.mkdir()) { + return tempDir; + } + throw new IllegalStateException("Failed to create temp directory"); + } + + public static void startSentryServer() throws Exception { + sentryServer.start(); + final long start = System.currentTimeMillis(); + while(!sentryServer.isRunning()) { + Thread.sleep(1000); + if(System.currentTimeMillis() - start > 60000L) { + throw new TimeoutException("Server did not start after 60 seconds"); + } + } + } + + public static void setUserGroups() throws Exception { + for (String user : StaticUserGroupRole.getUsers()) { + Set groups = StaticUserGroupRole.getGroups(user); + policyFile.addGroupsToUser(user, + groups.toArray(new String[groups.size()])); + } + UserGroupInformation loginUser = UserGroupInformation.getLoginUser(); + policyFile.addGroupsToUser(loginUser.getShortUserName(), loginUser.getGroupNames()); + + policyFile.write(policyFilePath); + } + + public static void setAdminPrivilege() throws Exception { + SentryGenericServiceClient sentryClient = null; + try { + /** grant all privilege to admin user */ + sentryClient = getSentryClient(); + sentryClient.createRoleIfNotExist(ADMIN_USER, ADMIN_ROLE, COMPONENT); + sentryClient.addRoleToGroups(ADMIN_USER, ADMIN_ROLE, COMPONENT, Sets.newHashSet(ADMIN_GROUP)); + final ArrayList authorizables = new ArrayList(); + Host host = new Host(InetAddress.getLocalHost().getHostName()); + authorizables.add(new TAuthorizable(host.getTypeName(), host.getName())); + Cluster cluster = new Cluster(); + authorizables.add(new TAuthorizable(cluster.getTypeName(), cluster.getName())); + sentryClient.grantPrivilege(ADMIN_USER, ADMIN_ROLE, COMPONENT, + new TSentryPrivilege(COMPONENT, "kafka", authorizables, + KafkaActionConstant.ALL)); + } finally { + if (sentryClient != null) { + sentryClient.close(); + } + } + } + + protected static SentryGenericServiceClient getSentryClient() throws Exception { + return SentryGenericServiceClientFactory.create(getClientConfig()); + } + + public static void assertCausedMessage(Exception e, String message) { + if (e.getCause() != null) { + assertTrue("Expected message: " + message + ", but got: " + e.getCause().getMessage(), e.getCause().getMessage().contains(message)); + } else { + assertTrue("Expected message: " + message + ", but got: " + e.getMessage(), e.getMessage().contains(message)); + } + } + + private static Configuration getClientConfig() { + Configuration conf = new Configuration(); + /** set the Sentry client configuration for Kafka Service integration */ + conf.set(ServerConfig.SECURITY_MODE, ServerConfig.SECURITY_MODE_NONE); + conf.set(ClientConfig.SERVER_RPC_ADDRESS, sentryServer.getAddress().getHostName()); + conf.set(ClientConfig.SERVER_RPC_PORT, String.valueOf(sentryServer.getAddress().getPort())); + + conf.set(KafkaAuthConf.AuthzConfVars.AUTHZ_PROVIDER.getVar(), + LocalGroupResourceAuthorizationProvider.class.getName()); + conf.set(KafkaAuthConf.AuthzConfVars.AUTHZ_PROVIDER_BACKEND.getVar(), + SentryGenericProviderBackend.class.getName()); + conf.set(KafkaAuthConf.AuthzConfVars.AUTHZ_PROVIDER_RESOURCE.getVar(), policyFilePath.getPath()); + return conf; + } + + private static void startKafkaServer() throws Exception { + // Workaround for SentryKafkaAuthorizer to be added to classpath + Class.forName("org.apache.sentry.kafka.authorizer.SentryKafkaAuthorizer"); + getClientConfig().writeXml(new FileOutputStream(sentrySitePath)); + + kafkaServer = new KafkaTestServer(sentrySitePath); + kafkaServer.start(); + bootstrapServers = kafkaServer.getBootstrapServers(); + } +} diff --git a/sentry-tests/sentry-tests-kafka/src/test/java/org/apache/sentry/tests/e2e/kafka/StaticUserGroupRole.java b/sentry-tests/sentry-tests-kafka/src/test/java/org/apache/sentry/tests/e2e/kafka/StaticUserGroupRole.java new file mode 100644 index 000000000..96b7cf43f --- /dev/null +++ b/sentry-tests/sentry-tests-kafka/src/test/java/org/apache/sentry/tests/e2e/kafka/StaticUserGroupRole.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.tests.e2e.kafka; + +import com.google.common.collect.Sets; + +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +public class StaticUserGroupRole { + public static final String SUPERUSER = "superuser"; + public static final String USER_1 = "user1"; + public static final String USER_2 = "user2"; + public static final String USER_KAFKA = "kafka"; + + public static final String GROUP_0 = "group0"; + public static final String GROUP_1 = "group1"; + public static final String GROUP_2 = "group2"; + public static final String GROUP_KAFKA = "group_kafka"; + + public static final String ROLE_0 = "role0"; + public static final String ROLE_1 = "role1"; + public static final String ROLE_2 = "role2"; + + private static Map> userToGroupsMapping = + new HashMap>(); + + static { + userToGroupsMapping.put(SUPERUSER, Sets.newHashSet(GROUP_0)); + userToGroupsMapping.put(USER_1, Sets.newHashSet(GROUP_1)); + userToGroupsMapping.put(USER_2, Sets.newHashSet(GROUP_2)); + userToGroupsMapping.put(USER_KAFKA, Sets.newHashSet(GROUP_KAFKA)); + } + + public static Set getUsers() { + return userToGroupsMapping.keySet(); + } + + public static Set getGroups(String user) { + return userToGroupsMapping.get(user); + } +} diff --git a/sentry-tests/sentry-tests-kafka/src/test/java/org/apache/sentry/tests/e2e/kafka/TestAclsCrud.java b/sentry-tests/sentry-tests-kafka/src/test/java/org/apache/sentry/tests/e2e/kafka/TestAclsCrud.java new file mode 100644 index 000000000..135d36204 --- /dev/null +++ b/sentry-tests/sentry-tests-kafka/src/test/java/org/apache/sentry/tests/e2e/kafka/TestAclsCrud.java @@ -0,0 +1,328 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.tests.e2e.kafka; + +import junit.framework.Assert; +import kafka.security.auth.Acl; +import kafka.security.auth.Allow$; +import kafka.security.auth.Operation$; +import kafka.security.auth.Resource; +import kafka.security.auth.ResourceType$; +import org.apache.kafka.common.security.auth.KafkaPrincipal; +import org.apache.sentry.kafka.authorizer.SentryKafkaAuthorizer; +import org.apache.sentry.kafka.conf.KafkaAuthConf; +import org.junit.After; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import scala.collection.immutable.Map; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Set; + +public class TestAclsCrud extends AbstractKafkaSentryTestBase { + private static final Logger LOGGER = LoggerFactory.getLogger(TestAclsCrud.class); + private SentryKafkaAuthorizer sentryKafkaAuthorizer; + + @After + public void cleanUp() throws Exception { + sentryKafkaAuthorizer.dropAllRoles(); + if (sentryKafkaAuthorizer != null) { + sentryKafkaAuthorizer.close(); + sentryKafkaAuthorizer = null; + } + } + + + @Test + public void testAddAclsForNonExistentRole() { + sentryKafkaAuthorizer = new SentryKafkaAuthorizer(); + java.util.Map configs = new HashMap<>(); + configs.put(KafkaAuthConf.SENTRY_KAFKA_SITE_URL, "file://" + sentrySitePath.getAbsolutePath()); + sentryKafkaAuthorizer.configure(configs); + + final String role1 = "role1"; + Set acls = new HashSet<>(); + final Acl acl = new Acl(new KafkaPrincipal("role", role1), + Allow$.MODULE$, + "127.0.0.1", + Operation$.MODULE$.fromString("READ")); + acls.add(acl); + scala.collection.immutable.Set aclsScala = scala.collection.JavaConversions.asScalaSet(acls).toSet(); + Resource resource = new Resource(ResourceType$.MODULE$.fromString("TOPIC"), "test-topic"); + try { + sentryKafkaAuthorizer.addAcls(aclsScala, resource); + } catch (Exception ex) { + assertCausedMessage(ex, "Can not add Acl for non-existent Role: role1"); + } + } + + @Test + public void testAddRole() { + sentryKafkaAuthorizer = new SentryKafkaAuthorizer(); + java.util.Map configs = new HashMap<>(); + configs.put(KafkaAuthConf.SENTRY_KAFKA_SITE_URL, "file://" + sentrySitePath.getAbsolutePath()); + sentryKafkaAuthorizer.configure(configs); + + final String role1 = "role1"; + try { + sentryKafkaAuthorizer.addRole(role1); + } catch (Exception ex) { + Assert.fail("Failed to create role."); + } + } + + @Test + public void testAddExistingRole() { + sentryKafkaAuthorizer = new SentryKafkaAuthorizer(); + java.util.Map configs = new HashMap<>(); + configs.put(KafkaAuthConf.SENTRY_KAFKA_SITE_URL, "file://" + sentrySitePath.getAbsolutePath()); + sentryKafkaAuthorizer.configure(configs); + + // Add role the first time + final String role1 = "role1"; + try { + sentryKafkaAuthorizer.addRole(role1); + } catch (Exception ex) { + Assert.fail("Failed to create role."); + } + + // Try adding same role again + try { + sentryKafkaAuthorizer.addRole(role1); + } catch (Exception ex) { + assertCausedMessage(ex, "Can not create an existing role, role1, again."); + } + } + + @Test + public void testAddAcls() { + sentryKafkaAuthorizer = new SentryKafkaAuthorizer(); + java.util.Map configs = new HashMap<>(); + configs.put(KafkaAuthConf.SENTRY_KAFKA_SITE_URL, "file://" + sentrySitePath.getAbsolutePath()); + sentryKafkaAuthorizer.configure(configs); + + final String role1 = "role1"; + Set acls = new HashSet<>(); + final Acl acl = new Acl(new KafkaPrincipal("role", role1), + Allow$.MODULE$, + "127.0.0.1", + Operation$.MODULE$.fromString("READ")); + acls.add(acl); + scala.collection.immutable.Set aclsScala = scala.collection.JavaConversions.asScalaSet(acls).toSet(); + Resource resource = new Resource(ResourceType$.MODULE$.fromString("TOPIC"), "test-topic"); + + // Add role + try { + sentryKafkaAuthorizer.addRole(role1); + } catch (Exception ex) { + Assert.fail("Failed to create role."); + } + + // Add acl + try { + sentryKafkaAuthorizer.addAcls(aclsScala, resource); + } catch (Exception ex) { + Assert.fail("Failed to add acl."); + } + + final scala.collection.immutable.Set obtainedAcls = sentryKafkaAuthorizer.getAcls(resource); + Assert.assertTrue("Obtained acls did not match expected Acls", obtainedAcls.contains(acl)); + } + + @Test + public void testAddRoleToGroups() { + sentryKafkaAuthorizer = new SentryKafkaAuthorizer(); + java.util.Map configs = new HashMap<>(); + configs.put(KafkaAuthConf.SENTRY_KAFKA_SITE_URL, "file://" + sentrySitePath.getAbsolutePath()); + sentryKafkaAuthorizer.configure(configs); + + final String role1 = "role1"; + Set acls = new HashSet<>(); + final Acl acl = new Acl(new KafkaPrincipal("role", role1), + Allow$.MODULE$, + "127.0.0.1", + Operation$.MODULE$.fromString("READ")); + acls.add(acl); + scala.collection.immutable.Set aclsScala = scala.collection.JavaConversions.asScalaSet(acls).toSet(); + Resource resource = new Resource(ResourceType$.MODULE$.fromString("TOPIC"), "test-topic"); + + // Add role + try { + sentryKafkaAuthorizer.addRole(role1); + } catch (Exception ex) { + Assert.fail("Failed to create role."); + } + + // Add acl + try { + sentryKafkaAuthorizer.addAcls(aclsScala, resource); + } catch (Exception ex) { + Assert.fail("Failed to add acl."); + } + + // Add role to group + Set groups = new HashSet<>(); + String group1 = "group1"; + groups.add(group1); + try { + sentryKafkaAuthorizer.addRoleToGroups(role1, groups); + } catch (Exception ex) { + throw ex; + } + + final scala.collection.immutable.Set obtainedAcls = sentryKafkaAuthorizer.getAcls(new KafkaPrincipal("group", group1)).get(resource).get(); + Assert.assertTrue("Obtained acls did not match expected Acls", obtainedAcls.contains(acl)); + } + + @Test + public void testRemoveAclsByResource() { + sentryKafkaAuthorizer = new SentryKafkaAuthorizer(); + java.util.Map configs = new HashMap<>(); + configs.put(KafkaAuthConf.SENTRY_KAFKA_SITE_URL, "file://" + sentrySitePath.getAbsolutePath()); + sentryKafkaAuthorizer.configure(configs); + + final String role1 = "role1"; + Set acls = new HashSet<>(); + final KafkaPrincipal principal1 = new KafkaPrincipal("role", role1); + final Acl acl = new Acl(principal1, + Allow$.MODULE$, + "127.0.0.1", + Operation$.MODULE$.fromString("READ")); + acls.add(acl); + scala.collection.immutable.Set aclsScala = scala.collection.JavaConversions.asScalaSet(acls).toSet(); + Resource resource = new Resource(ResourceType$.MODULE$.fromString("TOPIC"), "test-topic"); + + // Add role + try { + sentryKafkaAuthorizer.addRole(role1); + } catch (Exception ex) { + Assert.fail("Failed to create role."); + } + + // Add acl + try { + sentryKafkaAuthorizer.addAcls(aclsScala, resource); + } catch (Exception ex) { + Assert.fail("Failed to add acl."); + } + + // Add acl for different resource + Set acls2 = new HashSet<>(); + final Acl acl2 = new Acl(principal1, + Allow$.MODULE$, + "127.0.0.1", + Operation$.MODULE$.fromString("WRITE")); + acls2.add(acl2); + scala.collection.immutable.Set aclsScala2 = scala.collection.JavaConversions.asScalaSet(acls2).toSet(); + Resource resource2 = new Resource(ResourceType$.MODULE$.fromString("CLUSTER"), "test-cluster"); + try { + sentryKafkaAuthorizer.addAcls(aclsScala2, resource2); + } catch (Exception ex) { + Assert.fail("Failed to add second acl."); + } + + try { + sentryKafkaAuthorizer.removeAcls(resource); + } catch (Exception ex) { + Assert.fail("Failed to remove acls for resource."); + } + + final Map> obtainedAcls = sentryKafkaAuthorizer.getAcls(principal1); + Assert.assertTrue("Obtained acls must not contain acl for removed resource's acls.", !obtainedAcls.keySet().contains(resource)); + Assert.assertTrue("Obtained acls must contain acl for resource2.", obtainedAcls.keySet().contains(resource2)); + Assert.assertTrue("Obtained acl does not match expected acl.", obtainedAcls.get(resource2).get().contains(acl2)); + } + + @Test + public void testRemoveAclsByAclsAndResource() { + sentryKafkaAuthorizer = new SentryKafkaAuthorizer(); + java.util.Map configs = new HashMap<>(); + configs.put(KafkaAuthConf.SENTRY_KAFKA_SITE_URL, "file://" + sentrySitePath.getAbsolutePath()); + sentryKafkaAuthorizer.configure(configs); + + final String role1 = "role1"; + Set acls = new HashSet<>(); + final KafkaPrincipal principal1 = new KafkaPrincipal("role", role1); + final Acl acl = new Acl(principal1, + Allow$.MODULE$, + "127.0.0.1", + Operation$.MODULE$.fromString("READ")); + acls.add(acl); + scala.collection.immutable.Set aclsScala = scala.collection.JavaConversions.asScalaSet(acls).toSet(); + Resource resource = new Resource(ResourceType$.MODULE$.fromString("TOPIC"), "test-topic"); + + // Add role + try { + sentryKafkaAuthorizer.addRole(role1); + } catch (Exception ex) { + Assert.fail("Failed to create role."); + } + + // Add acl + try { + sentryKafkaAuthorizer.addAcls(aclsScala, resource); + } catch (Exception ex) { + Assert.fail("Failed to add acl."); + } + + // Add another acl to same resource + Set acls01 = new HashSet<>(); + final Acl acl01 = new Acl(principal1, + Allow$.MODULE$, + "127.0.0.1", + Operation$.MODULE$.fromString("DESCRIBE")); + acls01.add(acl01); + scala.collection.immutable.Set aclsScala01 = scala.collection.JavaConversions.asScalaSet(acls01).toSet(); + try { + sentryKafkaAuthorizer.addAcls(aclsScala01, resource); + } catch (Exception ex) { + Assert.fail("Failed to add acl."); + } + + + // Add acl for different resource + Set acls2 = new HashSet<>(); + final Acl acl2 = new Acl(principal1, + Allow$.MODULE$, + "127.0.0.1", + Operation$.MODULE$.fromString("WRITE")); + acls2.add(acl2); + scala.collection.immutable.Set aclsScala2 = scala.collection.JavaConversions.asScalaSet(acls2).toSet(); + Resource resource2 = new Resource(ResourceType$.MODULE$.fromString("CLUSTER"), "test-cluster"); + try { + sentryKafkaAuthorizer.addAcls(aclsScala2, resource2); + } catch (Exception ex) { + Assert.fail("Failed to add second acl."); + } + + // Remove acls + try { + sentryKafkaAuthorizer.removeAcls(aclsScala, resource); + } catch (Exception ex) { + Assert.fail("Failed to remove acls for resource."); + } + + final Map> obtainedAcls = sentryKafkaAuthorizer.getAcls(principal1); + Assert.assertTrue("Obtained acls must contain acl for resource.", obtainedAcls.keySet().contains(resource)); + Assert.assertTrue("Obtained acls must contain acl for resource2.", obtainedAcls.keySet().contains(resource2)); + Assert.assertTrue("Obtained acl must not contain removed acl for resource.", !obtainedAcls.get(resource).get().contains(acl)); + Assert.assertTrue("Obtained acl does not match expected acl for resource.", obtainedAcls.get(resource).get().contains(acl01)); + Assert.assertTrue("Obtained acl does not match expected acl for resource2.", obtainedAcls.get(resource2).get().contains(acl2)); + } +} \ No newline at end of file diff --git a/sentry-tests/sentry-tests-kafka/src/test/java/org/apache/sentry/tests/e2e/kafka/TestAuthorize.java b/sentry-tests/sentry-tests-kafka/src/test/java/org/apache/sentry/tests/e2e/kafka/TestAuthorize.java new file mode 100644 index 000000000..e8008308c --- /dev/null +++ b/sentry-tests/sentry-tests-kafka/src/test/java/org/apache/sentry/tests/e2e/kafka/TestAuthorize.java @@ -0,0 +1,298 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.tests.e2e.kafka; + +import com.google.common.collect.Sets; +import junit.framework.Assert; +import org.apache.kafka.clients.CommonClientConfigs; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.config.SslConfigs; +import org.apache.sentry.core.model.kafka.Cluster; +import org.apache.sentry.core.model.kafka.ConsumerGroup; +import org.apache.sentry.core.model.kafka.KafkaActionConstant; +import org.apache.sentry.core.model.kafka.Host; +import org.apache.sentry.core.model.kafka.Topic; +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceClient; +import org.apache.sentry.provider.db.generic.service.thrift.TAuthorizable; +import org.apache.sentry.provider.db.generic.service.thrift.TSentryPrivilege; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Properties; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; + +public class TestAuthorize extends AbstractKafkaSentryTestBase { + private static final Logger LOGGER = LoggerFactory.getLogger(TestAuthorize.class); + + @Test + public void testProduceConsumeForSuperuser() { + try { + final String SuperuserName = "test"; + testProduce(SuperuserName); + testConsume(SuperuserName); + } catch (Exception ex) { + Assert.fail("Superuser must have been allowed to perform any and all actions. \nException: \n" + ex); + } + } + + @Test + public void testProduceConsumeCycle() throws Exception { + final String localhost = InetAddress.getLocalHost().getHostAddress(); + + // START TESTING PRODUCER + try { + testProduce("user1"); + Assert.fail("user1 must not have been authorized to create topic t1."); + } catch (ExecutionException ex) { + assertCausedMessage(ex, "Not authorized to access topics: [t1]"); + } + + final String role = StaticUserGroupRole.ROLE_1; + final String group = StaticUserGroupRole.GROUP_1; + + // Allow HOST=localhost->Topic=t1->action=describe + ArrayList authorizables = new ArrayList(); + Host host = new Host(localhost); + authorizables.add(new TAuthorizable(host.getTypeName(), host.getName())); + Topic topic = new Topic("t1"); + authorizables.add(new TAuthorizable(topic.getTypeName(), topic.getName())); + addPermissions(role, group, KafkaActionConstant.DESCRIBE, authorizables); + try { + testProduce("user1"); + Assert.fail("user1 must not have been authorized to create topic t1."); + } catch (ExecutionException ex) { + assertCausedMessage(ex, "Not authorized to access topics: [t1]"); + } + + // Allow HOST=localhost->Cluster=kafka-cluster->action=create + authorizables = new ArrayList(); + authorizables.add(new TAuthorizable(host.getTypeName(), host.getName())); + Cluster cluster = new Cluster(); + authorizables.add(new TAuthorizable(cluster.getTypeName(), cluster.getName())); + addPermissions(role, group, KafkaActionConstant.CREATE, authorizables); + try { + testProduce("user1"); + Assert.fail("user1 must not have been authorized to create topic t1."); + } catch (ExecutionException ex) { + assertCausedMessage(ex, "Not authorized to access topics: [t1]"); + } + + // Allow HOST=localhost->Topic=t1->action=write + authorizables = new ArrayList(); + authorizables.add(new TAuthorizable(host.getTypeName(), host.getName())); + authorizables.add(new TAuthorizable(topic.getTypeName(), topic.getName())); + addPermissions(role, group, KafkaActionConstant.WRITE, authorizables); + try { + testProduce("user1"); + } catch (Exception ex) { + Assert.fail("user1 should have been able to successfully produce to topic t1. \n Exception: " + ex); + } + + // START TESTING CONSUMER + try { + testConsume("user1"); + Assert.fail("user1 must not have been authorized to describe consumer group sentrykafkaconsumer."); + } catch (Exception ex) { + assertCausedMessage(ex, "Not authorized to access group: sentrykafkaconsumer"); + } + + // HOST=localhost->Group=SentryKafkaConsumer->action=describe + authorizables = new ArrayList(); + authorizables.add(new TAuthorizable(host.getTypeName(), host.getName())); + ConsumerGroup consumerGroup = new ConsumerGroup("sentrykafkaconsumer"); + authorizables.add(new TAuthorizable(consumerGroup.getTypeName(), consumerGroup.getName())); + addPermissions(role, group, KafkaActionConstant.DESCRIBE, authorizables); + try { + testConsume("user1"); + Assert.fail("user1 must not have been authorized to read consumer group sentrykafkaconsumer."); + } catch (Exception ex) { + assertCausedMessage(ex, "Not authorized to access group: sentrykafkaconsumer"); + } + + // HOST=localhost->Group=SentryKafkaConsumer->action=read + authorizables = new ArrayList(); + authorizables.add(new TAuthorizable(host.getTypeName(), host.getName())); + authorizables.add(new TAuthorizable(consumerGroup.getTypeName(), consumerGroup.getName())); + addPermissions(role, group, KafkaActionConstant.READ, authorizables); + try { + testConsume("user1"); + Assert.fail("user1 must not have been authorized to read from topic t1."); + } catch (Exception ex) { + assertCausedMessage(ex, "Not authorized to access topics: [t1]"); + } + + // HOST=localhost->Topic=t1->action=read + authorizables = new ArrayList(); + authorizables.add(new TAuthorizable(host.getTypeName(), host.getName())); + authorizables.add(new TAuthorizable(topic.getTypeName(), topic.getName())); + addPermissions(role, group, KafkaActionConstant.READ, authorizables); + testConsume("user1"); + } + + private void addPermissions(String role, String group, String action, ArrayList authorizables) throws Exception { + SentryGenericServiceClient sentryClient = getSentryClient(); + try { + sentryClient.createRoleIfNotExist(ADMIN_USER, role, COMPONENT); + sentryClient.addRoleToGroups(ADMIN_USER, role, COMPONENT, Sets.newHashSet(group)); + + sentryClient.grantPrivilege(ADMIN_USER, role, COMPONENT, + new TSentryPrivilege(COMPONENT, "kafka", authorizables, + action)); + } finally { + if (sentryClient != null) { + sentryClient.close(); + sentryClient = null; + } + } + } + + private void testProduce(String producerUser) throws Exception { + final KafkaProducer kafkaProducer = createKafkaProducer(producerUser); + try { + final String topic = "t1"; + final String msg = "message1"; + ProducerRecord producerRecord = new ProducerRecord(topic, msg); + kafkaProducer.send(producerRecord).get(); + LOGGER.debug("Sent message: " + producerRecord); + } finally { + kafkaProducer.close(); + } + } + + private void testConsume(String consumerUser) throws Exception { + final KafkaConsumer kafkaConsumer = createKafkaConsumer(consumerUser); + try { + final String topic = "t1"; + final String msg = "message1"; + kafkaConsumer.subscribe(Collections.singletonList(topic), new CustomRebalanceListener(kafkaConsumer)); + waitTillTrue("Did not receive expected message.", 60, 2, new Callable() { + @Override + public Boolean call() throws Exception { + ConsumerRecords records = kafkaConsumer.poll(1000); + if (records.isEmpty()) { + LOGGER.debug("No record received from consumer."); + } + for (ConsumerRecord record : records) { + if (record.value().equals(msg)) { + LOGGER.debug("Received message: " + record); + return true; + } + } + return false; + } + }); + } finally { + kafkaConsumer.close(); + } + } + + private KafkaProducer createKafkaProducer(String user) { + Properties props = new Properties(); + props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); + props.put(ProducerConfig.CLIENT_ID_CONFIG, "SentryKafkaProducer"); + props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer"); + props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer"); + props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SSL"); + props.put(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, "JKS"); + props.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, KafkaTestServer.class.getResource("/" + user + ".keystore.jks").getPath()); + props.put(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, user + "-ks-passwd"); + props.put(SslConfigs.SSL_KEY_PASSWORD_CONFIG, user + "-key-passwd"); + props.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, KafkaTestServer.class.getResource("/" + user + ".truststore.jks").getPath()); + props.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, user + "-ts-passwd"); + + return new KafkaProducer(props); + } + + private KafkaConsumer createKafkaConsumer(String user) { + Properties props = new Properties(); + props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); + props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + props.put(ConsumerConfig.GROUP_ID_CONFIG, "sentrykafkaconsumer"); + props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"); + props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000"); + props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000"); + props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); + props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); + props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SSL"); + props.put(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, "JKS"); + props.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, KafkaTestServer.class.getResource("/" + user + ".keystore.jks").getPath()); + props.put(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, user + "-ks-passwd"); + props.put(SslConfigs.SSL_KEY_PASSWORD_CONFIG, user + "-key-passwd"); + props.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, KafkaTestServer.class.getResource("/" + user + ".truststore.jks").getPath()); + props.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, user + "-ts-passwd"); + + return new KafkaConsumer(props); + } + + /** + * Wait for a condition to succeed up to specified time. + * + * @param failureMessage Message to be displayed on failure. + * @param maxWaitTime Max waiting time for success in seconds. + * @param loopInterval Wait time between checks in seconds. + * @param testFunc Check to be performed for success, should return boolean. + * @throws Exception + */ + private void waitTillTrue( + String failureMessage, long maxWaitTime, long loopInterval, Callable testFunc) + throws Exception { + long startTime = System.currentTimeMillis(); + while (System.currentTimeMillis() - startTime <= maxWaitTime * 1000L) { + if (testFunc.call()) { + return; // Success + } + Thread.sleep(loopInterval * 1000L); + } + + Assert.fail(failureMessage); + } + + class CustomRebalanceListener implements ConsumerRebalanceListener { + + KafkaConsumer consumer = null; + + CustomRebalanceListener(KafkaConsumer kafkaConsumer) { + consumer = kafkaConsumer; + } + + @Override + public void onPartitionsRevoked(Collection collection) { + + } + + @Override + public void onPartitionsAssigned(Collection collection) { + for (TopicPartition tp : collection) { + consumer.seekToBeginning(tp); + } + } + } +} diff --git a/sentry-tests/sentry-tests-kafka/src/test/resources/log4j.properties b/sentry-tests/sentry-tests-kafka/src/test/resources/log4j.properties new file mode 100644 index 000000000..5f528843a --- /dev/null +++ b/sentry-tests/sentry-tests-kafka/src/test/resources/log4j.properties @@ -0,0 +1,38 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# Define some default values that can be overridden by system properties. +# +# For testing, it may also be convenient to specify + +sentry.root.logger=DEBUG,console +log4j.rootLogger=${sentry.root.logger} + +log4j.appender.console=org.apache.log4j.ConsoleAppender +log4j.appender.console.target=System.out +log4j.appender.console.layout=org.apache.log4j.PatternLayout +log4j.appender.console.layout.ConversionPattern=%d (%t) [%p - %l] %m%n + +log4g.logger.kafka.utils.Logging=WARN +log4j.logger.org.apache.kafka=WARN +log4j.logger.org.apache.sentry=DEBUG +log4j.logger.org.apache.zookeeper=WARN +log4j.logger.org.I0Itec.zkclient=WARN +log4j.logger.org.apache.hadoop=WARN +log4j.category.DataNucleus=OFF \ No newline at end of file diff --git a/sentry-tests/sentry-tests-kafka/src/test/resources/test.crt b/sentry-tests/sentry-tests-kafka/src/test/resources/test.crt new file mode 100644 index 000000000..fd6c902d7 --- /dev/null +++ b/sentry-tests/sentry-tests-kafka/src/test/resources/test.crt @@ -0,0 +1,15 @@ +-----BEGIN CERTIFICATE----- +MIICxzCCAa+gAwIBAgIEK13qfTANBgkqhkiG9w0BAQsFADAUMRIwEAYDVQQDEwlzdXBlcnVzZXIw +HhcNMTUxMjE1MjMzNTAzWhcNMTYwMzE0MjMzNTAzWjAUMRIwEAYDVQQDEwlzdXBlcnVzZXIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQChGUnirhdFKW6OXbPBqQ1tWEFrxvCHr51uVU9H +V2aqO+Q02a+Vzyb24dzyqnbM5uOeGqAyTFXpCPOK0oxTCvf/0idmHIcgt40797I7rxWDJw9/wYos +UGkqizAb878LaFScIo6Phu6zjdj/J16vd5KiWN5pzOLnwO8DebzO5s+N34VuNZ8s45zemq2bES9Z +z8mMolTkZS4d8wGExC93n5oiNrPGUneKRZJYukv3SiDMajaOTqnI4Xo/LIs3dynq8dTBQPTtUwnA +UZz8kpew6PfxDYYHjg2eHli/6Dopmur/R27xuxn5VnJHnxgL5mbxrRgAidGN6CwJFA7ZxSBn67pr +AgMBAAGjITAfMB0GA1UdDgQWBBTxczVGKoS4NuNIPlS4yJfm8fSj3zANBgkqhkiG9w0BAQsFAAOC +AQEAC4PSVAzUVGqhESIGDpJ6kbHzw/wBUmrjceTDQv9cVPNrHlMWoG67nM45tECWud3osB57nunV +vcwSNXxhf4M+IPK1BoT2awUjEfWN+F7guxFXpU2lQpmHPj+015g9pGvvneRLZj8VfdFo8PuyDeRy +V0HuG7xJ2xZMM8XpgL9BHrgD/4CITzRkaHnyuYb+Yz5GUFYOpLn0ANNm3gfW+eMiE/38zc+o23wJ +V49hAKGqalJUATWVzq7iCqTqxeIQ2RQyJ9O5p82Y5CIG1Tp07zdCPVqkKz7NAbt2K0ZW5/5qc5V/ +y88rnXWj9nZPYwyVj5rxqB8h2WDLDmxr1JuwuMOlYw== +-----END CERTIFICATE----- diff --git a/sentry-tests/sentry-tests-kafka/src/test/resources/test.keystore.jks b/sentry-tests/sentry-tests-kafka/src/test/resources/test.keystore.jks new file mode 100644 index 000000000..6a5fe66f2 Binary files /dev/null and b/sentry-tests/sentry-tests-kafka/src/test/resources/test.keystore.jks differ diff --git a/sentry-tests/sentry-tests-kafka/src/test/resources/test.truststore.jks b/sentry-tests/sentry-tests-kafka/src/test/resources/test.truststore.jks new file mode 100644 index 000000000..3c91e92af Binary files /dev/null and b/sentry-tests/sentry-tests-kafka/src/test/resources/test.truststore.jks differ diff --git a/sentry-tests/sentry-tests-kafka/src/test/resources/user1.crt b/sentry-tests/sentry-tests-kafka/src/test/resources/user1.crt new file mode 100644 index 000000000..5cb6caa25 --- /dev/null +++ b/sentry-tests/sentry-tests-kafka/src/test/resources/user1.crt @@ -0,0 +1,15 @@ +-----BEGIN CERTIFICATE----- +MIICvzCCAaegAwIBAgIEWaKEszANBgkqhkiG9w0BAQsFADAQMQ4wDAYDVQQDEwV1c2VyMTAeFw0x +NTEyMTUyMzQyNTlaFw0xNjAzMTQyMzQyNTlaMBAxDjAMBgNVBAMTBXVzZXIxMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAgDzGn4VvJnROVCC+CR77DfqmF1wkNUrOiaLL9qufoRi9DuZU +epmqebg0YyCQVyuIUe1p7qhnOGNnFN0nJC75C4MbCDX/s2+gxUBb6iaP7pwmdKzprvP3YGQrQXo/ +pv+zV9EH1P5JP+27B6NVGTGJPUP4UqZF2uyhNOHIcB9sMvZTnyfDLs+8o9dCv3bFPpwEGZnk3I1I +xD1cYSz+qb3E3M68L6cFVSo1qnK0QN8eBXXB/ljCHaQ47jLfZrJjjiRKA1YOnY+sRCbQDv4wU+dc +oOenLzLikrMdVyONokbkneS/LnwjmNev2i9I9NA0D3bZvJuN/DkuQ245iXgdnqOvJwIDAQABoyEw +HzAdBgNVHQ4EFgQUfzocV1Og4CsGte7Ux4luCVA3TTYwDQYJKoZIhvcNAQELBQADggEBAEeemqwJ +eY/GahjPesuyJKiIfH4MgMGvZ19441WnKG1CuHrtwMES8Znxc+iS4hutPX6I/Mvb9HMg8M3u9B7W +1dj4QOvi5iZuWqrV2bhBrFoUV7fXPjjMfu15i/CX5Lfu56cBeyKshq674rQ4AWn1k5saxa6Jhaao +6ceFfnTldgVSSS0rBFyz1fBj7dLXnS8MmxN0cmDO1jVXu2Tfjw0ofRmLxD1SCMEwrNEcERRUWudm +nIy1Q14xCYmTnGEf9uG8TmHO/y5Elc/jcMN2mGwb8N0FIV7nh1HLyAmR6O7JPrQ3QWR4Vr5tMH/K +3b9N51c0enX9UZedGYVc+qlLJ/P6B5w= +-----END CERTIFICATE----- diff --git a/sentry-tests/sentry-tests-kafka/src/test/resources/user1.keystore.jks b/sentry-tests/sentry-tests-kafka/src/test/resources/user1.keystore.jks new file mode 100644 index 000000000..60bb91a62 Binary files /dev/null and b/sentry-tests/sentry-tests-kafka/src/test/resources/user1.keystore.jks differ diff --git a/sentry-tests/sentry-tests-kafka/src/test/resources/user1.truststore.jks b/sentry-tests/sentry-tests-kafka/src/test/resources/user1.truststore.jks new file mode 100644 index 000000000..a59dab2b6 Binary files /dev/null and b/sentry-tests/sentry-tests-kafka/src/test/resources/user1.truststore.jks differ diff --git a/sentry-tests/sentry-tests-kafka/src/test/resources/user2.crt b/sentry-tests/sentry-tests-kafka/src/test/resources/user2.crt new file mode 100644 index 000000000..d0b0820d8 --- /dev/null +++ b/sentry-tests/sentry-tests-kafka/src/test/resources/user2.crt @@ -0,0 +1,15 @@ +-----BEGIN CERTIFICATE----- +MIICvzCCAaegAwIBAgIEC6qUijANBgkqhkiG9w0BAQsFADAQMQ4wDAYDVQQDEwV1c2VyMjAeFw0x +NTEyMTUyMzQ0MjVaFw0xNjAzMTQyMzQ0MjVaMBAxDjAMBgNVBAMTBXVzZXIyMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAhm2vitVj2xApz7ZtaWNcqegodc9nFY+HCcIx2WqoUzQTXZ8q +Fm6H6blKrL+xJXY7ZlEB8nMdfWFfOdS2zX6hutkstkwId5MSceWUb5GUzdClUQAS8DGMtQdU3LlY +EcIgz9fim6/Ad0ZIKwyAc47HJLd/nQOozAaDDnWdLbhRymv/PNEt5IndkeTfbFd1uWgpV9vhfLWN +3FmXOksVoIKR+l9YBOmAUIjstK2Tq8b/q4Dbcp82X1nPW12fG2FlowgolWEOlaCbSGwN60LjoP69 +1azAFU5IPaxmQ46oZpb7jMCRrHgdx+zhjRxjY9PpTCYWdtBHqnLyuckl/mpOxS64vwIDAQABoyEw +HzAdBgNVHQ4EFgQUHaTI3Xl/CjJLhVCZto5ZJBCTaLUwDQYJKoZIhvcNAQELBQADggEBAEg/SxvT ++NLmh7tWF0QZR2S6wl+UgJIqiS6NlEk3Te6TdPda2t2K8cmFndBcAmZqvLkz7dIkeDwa507SbrTg +NJXcOycpH1s15VjiVRF8dXqflLCEcBUNw8h4AENsdVcNKliR+YXLk1i/x5jVfncQps6Zxj68NFoN +h6tf7KyBHT4DvekYocjdXDQ/tPdvPqokYIM/q0K7NRZvDg6yUYukkFjta9D9623PwydtA/t75AEb +zOJra5A6qp/qo/U1UyLzEkwSlWaLaOa7MrNaFy/OQbkVncP+6jFCIXlWpQ+TqyUmTfwmL+A2oJWW +l3Ziy62zAfuaJ1EwY4zwFlZHJR4lF7E= +-----END CERTIFICATE----- diff --git a/sentry-tests/sentry-tests-kafka/src/test/resources/user2.keystore.jks b/sentry-tests/sentry-tests-kafka/src/test/resources/user2.keystore.jks new file mode 100644 index 000000000..beeff4cf9 Binary files /dev/null and b/sentry-tests/sentry-tests-kafka/src/test/resources/user2.keystore.jks differ diff --git a/sentry-tests/sentry-tests-kafka/src/test/resources/user2.truststore.jks b/sentry-tests/sentry-tests-kafka/src/test/resources/user2.truststore.jks new file mode 100644 index 000000000..067677d64 Binary files /dev/null and b/sentry-tests/sentry-tests-kafka/src/test/resources/user2.truststore.jks differ diff --git a/sentry-tests/sentry-tests-solr/pom.xml b/sentry-tests/sentry-tests-solr/pom.xml index 5a1e5c2dd..e90ca062b 100644 --- a/sentry-tests/sentry-tests-solr/pom.xml +++ b/sentry-tests/sentry-tests-solr/pom.xml @@ -22,7 +22,7 @@ limitations under the License. org.apache.sentry sentry-tests - 1.5.0-incubating-SNAPSHOT + 1.7.0-incubating-SNAPSHOT sentry-tests-solr @@ -73,4 +73,16 @@ limitations under the License. + + + + org.apache.maven.plugins + maven-surefire-plugin + + false + + + + + diff --git a/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/AbstractSolrSentryTestBase.java b/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/AbstractSolrSentryTestBase.java index 2495a9eec..7ddd1e2a3 100644 --- a/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/AbstractSolrSentryTestBase.java +++ b/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/AbstractSolrSentryTestBase.java @@ -28,6 +28,7 @@ import java.net.MalformedURLException; import java.net.URI; import java.util.Comparator; +import java.util.Iterator; import java.util.Map; import java.util.Random; import java.util.Set; @@ -61,6 +62,7 @@ import org.apache.solr.common.SolrDocument; import org.apache.solr.common.SolrDocumentList; import org.apache.solr.common.SolrInputDocument; +import org.apache.solr.common.params.SolrParams; import org.apache.solr.common.cloud.ClusterState; import org.apache.solr.common.cloud.Replica; import org.apache.solr.common.cloud.Slice; @@ -68,7 +70,6 @@ import org.apache.solr.common.params.CollectionParams.CollectionAction; import org.apache.solr.common.params.CoreAdminParams; import org.apache.solr.common.params.ModifiableSolrParams; -import org.apache.solr.common.util.NamedList; import org.apache.solr.servlet.SolrDispatchFilter; import org.junit.After; import org.junit.AfterClass; @@ -90,6 +91,7 @@ public class AbstractSolrSentryTestBase { protected static final Random RANDOM = new Random(); protected static final String RESOURCES_DIR = "target" + File.separator + "test-classes" + File.separator + "solr"; protected static final String CONF_DIR_IN_ZK = "conf1"; + protected static final String DEFAULT_COLLECTION = "collection1"; protected static final int NUM_SERVERS = 4; private static void addPropertyToSentry(StringBuilder builder, String name, String value) { @@ -136,6 +138,10 @@ public int compare(Class o1, Class o2) { return 0; } + public int hashCode() { + return 17; + } + public boolean equals(Object obj) { return true; } @@ -413,20 +419,33 @@ protected void verifyQueryFail(String solrUserName, * @param solrUserName - User authenticated into Solr * @param adminOp - Admin operation to be performed * @param collectionName - Name of the collection to be queried - * @param ignoreError - boolean to specify whether to ignore the error if any occurred. - * (We may need this attribute for running DELETE command on a collection which doesn't exist) * @throws Exception */ protected void verifyCollectionAdminOpPass(String solrUserName, CollectionAction adminOp, String collectionName) throws Exception { + verifyCollectionAdminOpPass(solrUserName, adminOp, collectionName, null); + } + + /** + * Method to validate collection Admin operation pass + * @param solrUserName - User authenticated into Solr + * @param adminOp - Admin operation to be performed + * @param collectionName - Name of the collection to be queried + * @param params - SolrParams to use + * @throws Exception + */ + protected void verifyCollectionAdminOpPass(String solrUserName, + CollectionAction adminOp, + String collectionName, + SolrParams params) throws Exception { String originalUser = getAuthenticatedUser(); try { setAuthenticationUser(solrUserName); - QueryRequest request = populateCollectionAdminParams(adminOp, collectionName); + QueryRequest request = populateCollectionAdminParams(adminOp, collectionName, params); CloudSolrServer solrServer = createNewCloudSolrServer(); try { - NamedList result = solrServer.request(request); + solrServer.request(request); if (adminOp.compareTo(CollectionAction.CREATE) == 0) { // Wait for collection creation to complete. waitForRecoveriesToFinish(collectionName, solrServer, false); @@ -449,15 +468,30 @@ protected void verifyCollectionAdminOpPass(String solrUserName, protected void verifyCollectionAdminOpFail(String solrUserName, CollectionAction adminOp, String collectionName) throws Exception { + verifyCollectionAdminOpFail(solrUserName, adminOp, collectionName, null); + } + + /** + * Method to validate collection Admin operation fail + * @param solrUserName - User authenticated into Solr + * @param adminOp - Admin operation to be performed + * @param collectionName - Name of the collection to be queried + * @param params - SolrParams to use + * @throws Exception + */ + protected void verifyCollectionAdminOpFail(String solrUserName, + CollectionAction adminOp, + String collectionName, + SolrParams params) throws Exception { String originalUser = getAuthenticatedUser(); try { setAuthenticationUser(solrUserName); try { - QueryRequest request = populateCollectionAdminParams(adminOp, collectionName); + QueryRequest request = populateCollectionAdminParams(adminOp, collectionName, params); CloudSolrServer solrServer = createNewCloudSolrServer(); try { - NamedList result = solrServer.request(request); + solrServer.request(request); if (adminOp.compareTo(CollectionAction.CREATE) == 0) { // Wait for collection creation to complete. waitForRecoveriesToFinish(collectionName, solrServer, false); @@ -483,7 +517,20 @@ protected void verifyCollectionAdminOpFail(String solrUserName, * @return - instance of QueryRequest. */ public QueryRequest populateCollectionAdminParams(CollectionAction adminOp, - String collectionName) { + String collectionName) { + return populateCollectionAdminParams(adminOp, collectionName, null); + } + + /** + * Method to populate the Solr params based on the collection admin being performed. + * @param adminOp - Collection admin operation + * @param collectionName - Name of the collection + * @param params - SolrParams to use + * @return - instance of QueryRequest. + */ + public QueryRequest populateCollectionAdminParams(CollectionAction adminOp, + String collectionName, + SolrParams params) { ModifiableSolrParams modParams = new ModifiableSolrParams(); modParams.set(CoreAdminParams.ACTION, adminOp.name()); switch (adminOp) { @@ -519,6 +566,14 @@ public QueryRequest populateCollectionAdminParams(CollectionAction adminOp, throw new IllegalArgumentException("Admin operation: " + adminOp + " is not supported!"); } + if (params != null) { + Iterator it = params.getParameterNamesIterator(); + while (it.hasNext()) { + String param = it.next(); + String [] value = params.getParams(param); + modParams.set(param, value); + } + } QueryRequest request = new QueryRequest(modParams); request.setPath("/admin/collections"); return request; @@ -701,16 +756,22 @@ private ZkController getZkController() { } protected void uploadConfigDirToZk(String collectionConfigDir) throws Exception { + uploadConfigDirToZk(collectionConfigDir, CONF_DIR_IN_ZK); + } + + protected void uploadConfigDirToZk(String collectionConfigDir, String confDirInZk) throws Exception { ZkController zkController = getZkController(); - // conf1 is the config used by AbstractFullDistribZkTestBase - zkController.uploadConfigDir(new File(collectionConfigDir), - CONF_DIR_IN_ZK); + zkController.uploadConfigDir(new File(collectionConfigDir), confDirInZk); } protected void uploadConfigFileToZk(String file, String nameInZk) throws Exception { + uploadConfigFileToZk(file, nameInZk, CONF_DIR_IN_ZK); + } + + protected void uploadConfigFileToZk(String file, String nameInZk, String confDirInZk) throws Exception { ZkController zkController = getZkController(); zkController.getZkClient().makePath(ZkController.CONFIGS_ZKNODE + "/" - + CONF_DIR_IN_ZK + "/" + nameInZk, new File(file), false, true); + + confDirInZk + "/" + nameInZk, new File(file), false, true); } protected CloudSolrServer createNewCloudSolrServer() throws Exception { @@ -753,7 +814,6 @@ protected String makeHttpRequest(CloudSolrServer server, String node, String htt String retValue = ""; try { final HttpResponse response = httpClient.execute(method); - int httpStatus = response.getStatusLine().getStatusCode(); httpEntity = response.getEntity(); if (httpEntity != null) { @@ -808,7 +868,9 @@ protected static void waitForRecoveriesToFinish(String collection, int cnt = 0; while (cont) { - if (verbose) LOG.debug("-"); + if (verbose) { + LOG.debug("-"); + } boolean sawLiveRecovering = false; zkStateReader.updateClusterState(true); ClusterState clusterState = zkStateReader.getClusterState(); @@ -817,9 +879,11 @@ protected static void waitForRecoveriesToFinish(String collection, for (Map.Entry entry : slices.entrySet()) { Map shards = entry.getValue().getReplicasMap(); for (Map.Entry shard : shards.entrySet()) { - if (verbose) LOG.debug("rstate:" + if (verbose) { + LOG.debug("rstate:" + shard.getValue().getStr(ZkStateReader.STATE_PROP) + " live:" + clusterState.liveNodesContain(shard.getValue().getNodeName())); + } String state = shard.getValue().getStr(ZkStateReader.STATE_PROP); if ((state.equals(ZkStateReader.RECOVERING) || state.equals(ZkStateReader.SYNC) || state @@ -832,9 +896,13 @@ protected static void waitForRecoveriesToFinish(String collection, } if (!sawLiveRecovering || cnt == timeoutSeconds) { if (!sawLiveRecovering) { - if (verbose) LOG.debug("no one is recovering"); + if (verbose) { + LOG.debug("no one is recovering"); + } } else { - if (verbose) LOG.debug("Gave up waiting for recovery to finish.."); + if (verbose) { + LOG.debug("Gave up waiting for recovery to finish.."); + } if (failOnTimeout) { fail("There are still nodes recovering - waited for " + timeoutSeconds + " seconds"); diff --git a/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/DocLevelGenerator.java b/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/DocLevelGenerator.java new file mode 100644 index 000000000..e50e3f8d8 --- /dev/null +++ b/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/DocLevelGenerator.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.tests.e2e.solr; + +import org.apache.solr.common.SolrInputDocument; +import org.apache.solr.client.solrj.impl.CloudSolrServer; + +import java.util.ArrayList; + +public class DocLevelGenerator { + private String authField; + + public DocLevelGenerator(String authField) { + this.authField = authField; + } + + /** + * Generates docs according to the following parameters: + * + * @param server SolrServer to use + * @param numDocs number of documents to generate + * @param evenDocsToken every even number doc gets this token added to the authField + * @param oddDocsToken every odd number doc gets this token added to the authField + * @param extraAuthFieldsCount generates this number of bogus entries in the authField + */ + public void generateDocs(CloudSolrServer server, int numDocs, String evenDocsToken, String oddDocsToken, int extraAuthFieldsCount) throws Exception { + + // create documents + ArrayList docs = new ArrayList(); + for (int i = 0; i < numDocs; ++i) { + SolrInputDocument doc = new SolrInputDocument(); + String iStr = Long.toString(i); + doc.addField("id", iStr); + doc.addField("description", "description" + iStr); + + // put some bogus tokens in + for (int k = 0; k < extraAuthFieldsCount; ++k) { + doc.addField(authField, authField + Long.toString(k)); + } + // even docs get evenDocsToken, odd docs get oddDocsToken + if (i % 2 == 0) { + doc.addField(authField, evenDocsToken); + } else { + doc.addField(authField, oddDocsToken); + } + // add a token to all docs so we can check that we can get all + // documents returned + doc.addField(authField, "docLevel_role"); + + docs.add(doc); + } + + server.add(docs); + server.commit(true, true); + } +} diff --git a/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/ModifiableUserAuthenticationFilter.java b/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/ModifiableUserAuthenticationFilter.java index 533858b4f..ac676a84c 100644 --- a/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/ModifiableUserAuthenticationFilter.java +++ b/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/ModifiableUserAuthenticationFilter.java @@ -27,15 +27,11 @@ import javax.servlet.http.HttpServletRequest; import org.apache.solr.servlet.SolrRequestParsers; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * Authentication Filter that authenticates any request as user "junit" */ public class ModifiableUserAuthenticationFilter implements Filter { - private static final Logger LOG = LoggerFactory - .getLogger(ModifiableUserAuthenticationFilter.class); /** * String that saves the user to be authenticated into Solr diff --git a/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/TestDocLevelOperations.java b/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/TestDocLevelOperations.java index ff508e128..71452e245 100644 --- a/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/TestDocLevelOperations.java +++ b/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/TestDocLevelOperations.java @@ -16,8 +16,6 @@ */ package org.apache.sentry.tests.e2e.solr; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.junit.After; import org.junit.Before; import static org.junit.Assert.assertEquals; @@ -25,11 +23,14 @@ import org.apache.solr.client.solrj.SolrQuery; import org.apache.solr.client.solrj.impl.CloudSolrServer; +import org.apache.solr.client.solrj.request.QueryRequest; import org.apache.solr.client.solrj.response.QueryResponse; import org.apache.solr.common.SolrDocument; import org.apache.solr.common.SolrDocumentList; import org.apache.solr.common.SolrInputDocument; +import org.apache.solr.common.params.ModifiableSolrParams; +import org.apache.solr.common.params.SolrParams; import java.io.File; import java.net.URLEncoder; @@ -42,9 +43,6 @@ * Test the document-level security features */ public class TestDocLevelOperations extends AbstractSolrSentryTestBase { - private static final Logger LOG = LoggerFactory - .getLogger(TestDocLevelOperations.class); - private static final String DEFAULT_COLLECTION = "collection1"; private static final String AUTH_FIELD = "sentry_auth"; private static final int NUM_DOCS = 100; private static final int EXTRA_AUTH_FIELDS = 2; @@ -70,6 +68,31 @@ private void setupCollectionWithDocSecurity(String name) throws Exception { setupCollection(name); } + private QueryRequest getRealTimeGetRequest() { + // real time get request + StringBuilder idsBuilder = new StringBuilder("0"); + for (int i = 1; i < NUM_DOCS; ++i) { + idsBuilder.append("," + i); + } + return getRealTimeGetRequest(idsBuilder.toString()); + } + + private QueryRequest getRealTimeGetRequest(String ids) { + final ModifiableSolrParams idsParams = new ModifiableSolrParams(); + idsParams.add("ids", ids); + return new QueryRequest() { + @Override + public String getPath() { + return "/get"; + } + + @Override + public SolrParams getParams() { + return idsParams; + } + }; + } + /** * Creates docs as follows and verifies queries work as expected: * - creates NUM_DOCS documents, where the document id equals the order @@ -84,67 +107,45 @@ private void createDocsAndQuerySimple(String collectionName, boolean checkNonAdm // ensure no current documents verifyDeletedocsPass(ADMIN_USER, collectionName, true); - // create documents - ArrayList docs = new ArrayList(); - for (int i = 0; i < NUM_DOCS; ++i) { - SolrInputDocument doc = new SolrInputDocument(); - String iStr = Long.toString(i); - doc.addField("id", iStr); - doc.addField("description", "description" + iStr); - - // put some bogus tokens in - for (int k = 0; k < EXTRA_AUTH_FIELDS; ++k) { - doc.addField(AUTH_FIELD, AUTH_FIELD + Long.toString(k)); - } - // 50% of docs get "junit", 50% get "admin" as token - if (i % 2 == 0) { - doc.addField(AUTH_FIELD, "junit_role"); - } else { - doc.addField(AUTH_FIELD, "admin_role"); - } - // add a token to all docs so we can check that we can get all - // documents returned - doc.addField(AUTH_FIELD, "docLevel_role"); - - docs.add(doc); - } CloudSolrServer server = getCloudSolrServer(collectionName); try { - server.add(docs); - server.commit(true, true); + DocLevelGenerator generator = new DocLevelGenerator(AUTH_FIELD); + generator.generateDocs(server, NUM_DOCS, "junit_role", "admin_role", EXTRA_AUTH_FIELDS); - // queries - SolrQuery query = new SolrQuery(); - query.setQuery("*:*"); + querySimple(new QueryRequest(new SolrQuery("*:*")), server, checkNonAdminUsers); + querySimple(getRealTimeGetRequest(), server, checkNonAdminUsers); + } finally { + server.shutdown(); + } + } - // as admin -- should get the other half - setAuthenticationUser("admin"); - QueryResponse rsp = server.query(query); - SolrDocumentList docList = rsp.getResults(); + private void querySimple(QueryRequest request, CloudSolrServer server, + boolean checkNonAdminUsers) throws Exception { + // as admin -- should get the other half + setAuthenticationUser("admin"); + QueryResponse rsp = request.process(server); + SolrDocumentList docList = rsp.getResults(); + assertEquals(NUM_DOCS / 2, docList.getNumFound()); + for (SolrDocument doc : docList) { + String id = doc.getFieldValue("id").toString(); + assertEquals(1, Long.valueOf(id) % 2); + } + + if (checkNonAdminUsers) { + // as junit -- should get half the documents + setAuthenticationUser("junit"); + rsp = request.process(server); + docList = rsp.getResults(); assertEquals(NUM_DOCS / 2, docList.getNumFound()); for (SolrDocument doc : docList) { String id = doc.getFieldValue("id").toString(); - assertEquals(1, Long.valueOf(id) % 2); + assertEquals(0, Long.valueOf(id) % 2); } - if (checkNonAdminUsers) { - // as junit -- should get half the documents - setAuthenticationUser("junit"); - rsp = server.query(query); - docList = rsp.getResults(); - assertEquals(NUM_DOCS / 2, docList.getNumFound()); - for (SolrDocument doc : docList) { - String id = doc.getFieldValue("id").toString(); - assertEquals(0, Long.valueOf(id) % 2); - } - - // as docLevel -- should get all - setAuthenticationUser("docLevel"); - rsp = server.query(query); - assertEquals(NUM_DOCS, rsp.getResults().getNumFound()); - } - } finally { - server.shutdown(); + // as docLevel -- should get all + setAuthenticationUser("docLevel"); + rsp = request.process(server); + assertEquals(NUM_DOCS, rsp.getResults().getNumFound()); } } @@ -163,20 +164,20 @@ public void testDocLevelOperations() throws Exception { // test filter queries work as AND -- i.e. user can't avoid doc-level // checks by prefixing their own filterQuery setAuthenticationUser("junit"); - String fq = URLEncoder.encode(" {!raw f=" + AUTH_FIELD + " v=docLevel_role}"); + String fq = URLEncoder.encode(" {!raw f=" + AUTH_FIELD + " v=docLevel_role}", "UTF-8"); String path = "/" + collectionName + "/select?q=*:*&fq="+fq; String retValue = makeHttpRequest(server, "GET", path, null, null); assertTrue(retValue.contains("numFound=\"" + NUM_DOCS / 2 + "\" ")); // test that user can't inject an "OR" into the query final String syntaxErrorMsg = "org.apache.solr.search.SyntaxError: Cannot parse"; - fq = URLEncoder.encode(" {!raw f=" + AUTH_FIELD + " v=docLevel_role} OR "); + fq = URLEncoder.encode(" {!raw f=" + AUTH_FIELD + " v=docLevel_role} OR ", "UTF-8"); path = "/" + collectionName + "/select?q=*:*&fq="+fq; retValue = makeHttpRequest(server, "GET", path, null, null); assertTrue(retValue.contains(syntaxErrorMsg)); // same test, prefix OR this time - fq = URLEncoder.encode(" OR {!raw f=" + AUTH_FIELD + " v=docLevel_role}"); + fq = URLEncoder.encode(" OR {!raw f=" + AUTH_FIELD + " v=docLevel_role}", "UTF-8"); path = "/" + collectionName + "/select?q=*:*&fq="+fq; retValue = makeHttpRequest(server, "GET", path, null, null); assertTrue(retValue.contains(syntaxErrorMsg)); @@ -223,7 +224,9 @@ public void testAllRolesToken() throws Exception { } if (i % allRolesFactor == 0) { doc.addField(AUTH_FIELD, allRolesToken); ++totalAllRolesAdded; - if (!addedViaJunit) ++totalOnlyAllRolesAdded; + if (!addedViaJunit) { + ++totalOnlyAllRolesAdded; + } } docs.add(doc); } @@ -237,31 +240,10 @@ public void testAllRolesToken() throws Exception { server.add(docs); server.commit(true, true); - // queries - SolrQuery query = new SolrQuery(); - query.setQuery("*:*"); - - // as admin -- should only get all roles token documents - setAuthenticationUser("admin"); - QueryResponse rsp = server.query(query); - SolrDocumentList docList = rsp.getResults(); - assertEquals(totalAllRolesAdded, docList.getNumFound()); - for (SolrDocument doc : docList) { - String id = doc.getFieldValue("id").toString(); - assertEquals(0, Long.valueOf(id) % allRolesFactor); - } - - // as junit -- should get junit added + onlyAllRolesAdded - setAuthenticationUser("junit"); - rsp = server.query(query); - docList = rsp.getResults(); - assertEquals(totalJunitAdded + totalOnlyAllRolesAdded, docList.getNumFound()); - for (SolrDocument doc : docList) { - String id = doc.getFieldValue("id").toString(); - boolean addedJunit = (Long.valueOf(id) % junitFactor) == 0; - boolean onlyAllRoles = !addedJunit && (Long.valueOf(id) % allRolesFactor) == 0; - assertEquals(true, addedJunit || onlyAllRoles); - } + checkAllRolesToken(new QueryRequest(new SolrQuery("*:*")), server, + totalAllRolesAdded, totalOnlyAllRolesAdded, allRolesFactor, totalJunitAdded, junitFactor); + checkAllRolesToken(getRealTimeGetRequest(), server, + totalAllRolesAdded, totalOnlyAllRolesAdded, allRolesFactor, totalJunitAdded, junitFactor); } finally { server.shutdown(); } @@ -270,6 +252,31 @@ public void testAllRolesToken() throws Exception { } } + private void checkAllRolesToken(QueryRequest request, CloudSolrServer server, + int totalAllRolesAdded, int totalOnlyAllRolesAdded, int allRolesFactor, int totalJunitAdded, int junitFactor) throws Exception { + // as admin -- should only get all roles token documents + setAuthenticationUser("admin"); + QueryResponse rsp = request.process(server); + SolrDocumentList docList = rsp.getResults(); + assertEquals(totalAllRolesAdded, docList.getNumFound()); + for (SolrDocument doc : docList) { + String id = doc.getFieldValue("id").toString(); + assertEquals(0, Long.valueOf(id) % allRolesFactor); + } + + // as junit -- should get junit added + onlyAllRolesAdded + setAuthenticationUser("junit"); + rsp = request.process(server); + docList = rsp.getResults(); + assertEquals(totalJunitAdded + totalOnlyAllRolesAdded, docList.getNumFound()); + for (SolrDocument doc : docList) { + String id = doc.getFieldValue("id").toString(); + boolean addedJunit = (Long.valueOf(id) % junitFactor) == 0; + boolean onlyAllRoles = !addedJunit && (Long.valueOf(id) % allRolesFactor) == 0; + assertEquals(true, addedJunit || onlyAllRoles); + } + } + /** * delete the docs as "deleteUser" using deleteByQuery "deleteQueryStr". * Verify that number of docs returned for "queryUser" equals @@ -280,32 +287,35 @@ private void deleteByQueryTest(String collectionName, String deleteUser, createDocsAndQuerySimple(collectionName, true); CloudSolrServer server = getCloudSolrServer(collectionName); try { - SolrQuery query = new SolrQuery(); - query.setQuery("*:*"); - setAuthenticationUser(deleteUser); server.deleteByQuery(deleteByQueryStr); server.commit(); - QueryResponse rsp = server.query(query); - long junitResults = rsp.getResults().getNumFound(); - assertEquals(0, junitResults); - - setAuthenticationUser(queryUser); - rsp = server.query(query); - long docLevelResults = rsp.getResults().getNumFound(); - assertEquals(expectedQueryDocs, docLevelResults); + + checkDeleteByQuery(new QueryRequest(new SolrQuery("*:*")), server, + queryUser, expectedQueryDocs); + checkDeleteByQuery(getRealTimeGetRequest(), server, + queryUser, expectedQueryDocs); } finally { server.shutdown(); } } + private void checkDeleteByQuery(QueryRequest query, CloudSolrServer server, + String queryUser, int expectedQueryDocs) throws Exception { + QueryResponse rsp = query.process(server); + long junitResults = rsp.getResults().getNumFound(); + assertEquals(0, junitResults); + + setAuthenticationUser(queryUser); + rsp = query.process(server); + long docLevelResults = rsp.getResults().getNumFound(); + assertEquals(expectedQueryDocs, docLevelResults); + } + private void deleteByIdTest(String collectionName) throws Exception { createDocsAndQuerySimple(collectionName, true); CloudSolrServer server = getCloudSolrServer(collectionName); try { - SolrQuery query = new SolrQuery(); - query.setQuery("*:*"); - setAuthenticationUser("junit"); List allIds = new ArrayList(NUM_DOCS); for (int i = 0; i < NUM_DOCS; ++i) { @@ -314,19 +324,25 @@ private void deleteByIdTest(String collectionName) throws Exception { server.deleteById(allIds); server.commit(); - QueryResponse rsp = server.query(query); - long junitResults = rsp.getResults().getNumFound(); - assertEquals(0, junitResults); - - setAuthenticationUser("docLevel"); - rsp = server.query(query); - long docLevelResults = rsp.getResults().getNumFound(); - assertEquals(0, docLevelResults); + checkDeleteById(new QueryRequest(new SolrQuery("*:*")), server); + checkDeleteById(getRealTimeGetRequest(), server); } finally { server.shutdown(); } } + private void checkDeleteById(QueryRequest request, CloudSolrServer server) + throws Exception { + QueryResponse rsp = request.process(server); + long junitResults = rsp.getResults().getNumFound(); + assertEquals(0, junitResults); + + setAuthenticationUser("docLevel"); + rsp = request.process(server); + long docLevelResults = rsp.getResults().getNumFound(); + assertEquals(0, docLevelResults); + } + private void updateDocsTest(String collectionName) throws Exception { createDocsAndQuerySimple(collectionName, true); CloudSolrServer server = getCloudSolrServer(collectionName); @@ -335,10 +351,10 @@ private void updateDocsTest(String collectionName) throws Exception { String docIdStr = Long.toString(1); // verify we can't view one of the odd documents - SolrQuery query = new SolrQuery(); - query.setQuery("id:"+docIdStr); - QueryResponse rsp = server.query(query); - assertEquals(0, rsp.getResults().getNumFound()); + QueryRequest query = new QueryRequest(new SolrQuery("id:"+docIdStr)); + QueryRequest rtgQuery = getRealTimeGetRequest(docIdStr); + checkUpdateDocsQuery(query, server, 0); + checkUpdateDocsQuery(rtgQuery, server, 0); // overwrite the document that we can't see ArrayList docs = new ArrayList(); @@ -351,13 +367,19 @@ private void updateDocsTest(String collectionName) throws Exception { server.commit(); // verify we can now view the document - rsp = server.query(query); - assertEquals(1, rsp.getResults().getNumFound()); + checkUpdateDocsQuery(query, server, 1); + checkUpdateDocsQuery(rtgQuery, server, 1); } finally { server.shutdown(); } } + private void checkUpdateDocsQuery(QueryRequest request, CloudSolrServer server, int expectedDocs) + throws Exception { + QueryResponse rsp = request.process(server); + assertEquals(expectedDocs, rsp.getResults().getNumFound()); + } + @Test public void testUpdateDeleteOperations() throws Exception { String collectionName = "testUpdateDeleteOperations"; diff --git a/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/TestQueryOperations.java b/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/TestQueryOperations.java index c25717569..f8ed955db 100644 --- a/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/TestQueryOperations.java +++ b/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/TestQueryOperations.java @@ -27,9 +27,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope; - import org.junit.Test; import static org.junit.Assert.assertEquals; diff --git a/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/TestRealTimeGet.java b/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/TestRealTimeGet.java new file mode 100644 index 000000000..6181d8b9a --- /dev/null +++ b/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/TestRealTimeGet.java @@ -0,0 +1,476 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.tests.e2e.solr; + +import org.apache.solr.client.solrj.SolrServerException; +import org.apache.solr.client.solrj.impl.CloudSolrServer; +import org.apache.solr.client.solrj.request.QueryRequest; +import org.apache.solr.client.solrj.response.QueryResponse; +import org.apache.solr.common.SolrDocument; +import org.apache.solr.common.SolrDocumentList; +import org.apache.solr.common.SolrInputDocument; +import org.apache.solr.common.params.CollectionParams.CollectionAction; +import org.apache.solr.common.params.ModifiableSolrParams; +import org.apache.solr.common.params.SolrParams; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Random; +import java.util.Set; + +public class TestRealTimeGet extends AbstractSolrSentryTestBase { + private static final String AUTH_FIELD = "sentry_auth"; + private static final Random rand = new Random(); + private String userName = null; + + @Before + public void beforeTest() throws Exception { + userName = getAuthenticatedUser(); + } + + @After + public void afterTest() throws Exception { + setAuthenticationUser(userName); + } + + private void setupCollectionWithDocSecurity(String name) throws Exception { + setupCollectionWithDocSecurity(name, 2); + } + + private void setupCollectionWithDocSecurity(String name, int shards) throws Exception { + String configDir = RESOURCES_DIR + File.separator + DEFAULT_COLLECTION + + File.separator + "conf"; + uploadConfigDirToZk(configDir, name); + // replace solrconfig.xml with solrconfig-doc-level.xml + uploadConfigFileToZk(configDir + File.separator + "solrconfig-doclevel.xml", + "solrconfig.xml", name); + ModifiableSolrParams modParams = new ModifiableSolrParams(); + modParams.set("numShards", shards); + StringBuilder builder = new StringBuilder(); + for (int i = 0; i < shards; ++i) { + if (i != 0) { + builder.append(","); + } + builder.append("shard").append(i+1); + } + modParams.set("shards", builder.toString()); + verifyCollectionAdminOpPass(ADMIN_USER, CollectionAction.CREATE, name, modParams); + } + + private void setupCollectionWithoutDocSecurity(String name) throws Exception { + String configDir = RESOURCES_DIR + File.separator + DEFAULT_COLLECTION + + File.separator + "conf"; + uploadConfigDirToZk(configDir, name); + setupCollection(name); + } + + private QueryRequest getRealTimeGetRequest(final SolrParams params) { + return new QueryRequest() { + @Override + public String getPath() { + return "/get"; + } + + @Override + public SolrParams getParams() { + return params; + } + }; + } + + private void assertExpected(ExpectedResult expectedResult, QueryResponse rsp, + QueryResponse controlRsp) throws Exception { + SolrDocumentList docList = rsp.getResults(); + SolrDocumentList controlDocList = controlRsp.getResults(); + SolrDocument doc = (SolrDocument)rsp.getResponse().get("doc"); + SolrDocument controlDoc = (SolrDocument)controlRsp.getResponse().get("doc"); + + if (expectedResult.expectedDocs == 0) { + // could be null rather than 0 size, check against control that format is identical + assertNull("Should be no doc present: " + doc, doc); + assertNull("Should be no doc present: " + controlDoc, controlDoc); + assertTrue((docList == null && controlDocList == null) || + (controlDocList.getNumFound() == 0 && controlDocList.getNumFound() == 0)); + } else { + if (docList == null) { + assertNull(controlDocList); + assertNotNull(doc); + assertNotNull(controlDoc); + } else { + assertNotNull(controlDocList); + assertNull(doc); + assertNull(controlDoc); + assertEquals(expectedResult.expectedDocs, docList.getNumFound()); + assertEquals(docList.getNumFound(), controlDocList.getNumFound()); + } + } + } + + private QueryResponse getIdResponse(ExpectedResult expectedResult) throws Exception { + ModifiableSolrParams params = new ModifiableSolrParams(); + for (int i = 0; i < expectedResult.ids.length; ++i) { + params.add("id", expectedResult.ids[ i ]); + } + if (expectedResult.fl != null) { + params.add("fl", expectedResult.fl); + } + QueryRequest request = getRealTimeGetRequest(params); + return request.process(expectedResult.server); + } + + private QueryResponse getIdsResponse(ExpectedResult expectedResult) throws Exception { + StringBuilder builder = new StringBuilder(); + for (int i = 0; i < expectedResult.ids.length; ++i) { + if (i != 0) { + builder.append(","); + } + builder.append(expectedResult.ids[ i ]); + } + ModifiableSolrParams params = new ModifiableSolrParams(); + params.add("ids", builder.toString()); + if (expectedResult.fl != null) { + params.add("fl", expectedResult.fl); + } + QueryRequest request = getRealTimeGetRequest(params); + return request.process(expectedResult.server); + } + + private void assertIdVsIds(ExpectedResult expectedResult, ExpectedResult controlExpectedResult) + throws Exception { + // test specifying with "id" + QueryResponse idRsp = getIdResponse(expectedResult); + QueryResponse idControlRsp = getIdResponse(controlExpectedResult); + assertExpected(expectedResult, idRsp, idControlRsp); + + // test specifying with "ids" + QueryResponse idsRsp = getIdsResponse(expectedResult); + QueryResponse idsControlRsp = getIdsResponse(controlExpectedResult); + assertExpected(expectedResult, idsRsp, idsControlRsp); + } + + @Test + public void testIdvsIds() throws Exception { + final String collection = "testIdvsIds"; + final String collectionControl = collection + "Control"; + setupCollectionWithDocSecurity(collection); + setupCollectionWithoutDocSecurity(collectionControl); + CloudSolrServer server = getCloudSolrServer(collection); + CloudSolrServer serverControl = getCloudSolrServer(collectionControl); + + try { + for (CloudSolrServer s : new CloudSolrServer [] {server, serverControl}) { + DocLevelGenerator generator = new DocLevelGenerator(AUTH_FIELD); + generator.generateDocs(s, 100, "junit_role", "admin_role", 2); + } + + // check that control collection does not filter + assertIdVsIds(new ExpectedResult(serverControl, new String[] {"2"}, 1), + new ExpectedResult(serverControl, new String[] {"2"}, 1)); + + // single id + assertIdVsIds(new ExpectedResult(server, new String[] {"1"}, 1), + new ExpectedResult(serverControl, new String[] {"1"}, 1)); + + // single id (invalid) + assertIdVsIds(new ExpectedResult(server, new String[] {"bogusId"}, 0), + new ExpectedResult(serverControl, new String[] {"bogusId"}, 0)); + + // single id (no permission) + assertIdVsIds(new ExpectedResult(server, new String[] {"2"}, 0), + new ExpectedResult(serverControl, new String[] {"2fake"}, 0)); + + // multiple ids (some invalid, some valid, some no permission) + assertIdVsIds(new ExpectedResult(server, new String[] {"bogus1", "1", "2"}, 1), + new ExpectedResult(serverControl, new String[] {"bogus1", "1", "bogus2"}, 1)); + assertIdVsIds(new ExpectedResult(server, new String[] {"bogus1", "1", "2", "3"}, 2), + new ExpectedResult(serverControl, new String[] {"bogus1", "1", "bogus2", "3"}, 2)); + + // multiple ids (all invalid) + assertIdVsIds(new ExpectedResult(server, new String[] {"bogus1", "bogus2", "bogus3"}, 0), + new ExpectedResult(serverControl, new String[] {"bogus1", "bogus2", "bogus3"}, 0)); + + // multiple ids (all no permission) + assertIdVsIds(new ExpectedResult(server, new String[] {"2", "4", "6"}, 0), + new ExpectedResult(serverControl, new String[] {"bogus2", "bogus4", "bogus6"}, 0)); + + } finally { + server.shutdown(); + serverControl.shutdown(); + } + } + + private void assertFlOnDocList(SolrDocumentList list, Set expectedIds, + List expectedFields) { + assertEquals("Doc list size should be: " + expectedIds.size(), expectedIds.size(), list.getNumFound()); + for (SolrDocument doc : list) { + expectedIds.contains(doc.get("id")); + for (String field : expectedFields) { + assertNotNull("Field: " + field + " should not be null in doc: " + doc, doc.get(field)); + } + assertEquals("doc should have: " + expectedFields.size() + " fields. Doc: " + doc, + expectedFields.size(), doc.getFieldNames().size()); + } + } + + private void assertFl(CloudSolrServer server, String [] ids, Set expectedIds, + String fl, List expectedFields) throws Exception { + { + QueryResponse idRsp = getIdResponse(new ExpectedResult(server, ids, expectedIds.size(), fl)); + SolrDocumentList idList = idRsp.getResults(); + assertFlOnDocList(idList, expectedIds, expectedFields); + } + { + QueryResponse idsRsp = getIdsResponse(new ExpectedResult(server, ids, expectedIds.size(), fl)); + SolrDocumentList idsList = idsRsp.getResults(); + assertFlOnDocList(idsList, expectedIds, expectedFields); + } + } + + @Test + public void testFl() throws Exception { + final String collection = "testFl"; + // FixMe: have to use one shard, because of a Solr bug where "fl" is not applied to + // multi-shard get requests + setupCollectionWithDocSecurity(collection, 1); + CloudSolrServer server = getCloudSolrServer(collection); + + try { + DocLevelGenerator generator = new DocLevelGenerator(AUTH_FIELD); + generator.generateDocs(server, 100, "junit_role", "admin_role", 2); + String [] ids = new String[] {"1", "3", "5"}; + + assertFl(server, ids, new HashSet(Arrays.asList(ids)), "id", Arrays.asList("id")); + assertFl(server, ids, new HashSet(Arrays.asList(ids)), null, Arrays.asList("id", "description", "_version_")); + // test transformer + assertFl(server, ids, new HashSet(Arrays.asList(ids)), "id,mydescription:description", Arrays.asList("id", "mydescription")); + } finally { + server.shutdown(); + } + } + + @Test + public void testNonCommitted() throws Exception { + final String collection = "testNonCommitted"; + setupCollectionWithDocSecurity(collection, 1); + CloudSolrServer server = getCloudSolrServer(collection); + + try { + DocLevelGenerator generator = new DocLevelGenerator(AUTH_FIELD); + generator.generateDocs(server, 100, "junit_role", "admin_role", 2); + + // make some uncommitted modifications and ensure they are reflected + server.deleteById("1"); + + SolrInputDocument doc2 = new SolrInputDocument(); + doc2.addField("id", "2"); + doc2.addField("description", "description2"); + doc2.addField(AUTH_FIELD, "admin_role"); + + SolrInputDocument doc3 = new SolrInputDocument(); + doc3.addField("id", "3"); + doc3.addField("description", "description3"); + doc3.addField(AUTH_FIELD, "junit_role"); + + SolrInputDocument doc200 = new SolrInputDocument(); + doc200.addField("id", "200"); + doc200.addField("description", "description200"); + doc200.addField(AUTH_FIELD, "admin_role"); + server.add(Arrays.asList(new SolrInputDocument [] {doc2, doc3, doc200})); + + assertFl(server, new String[] {"1", "2", "3", "4", "5", "200"}, + new HashSet(Arrays.asList("2", "5", "200")), "id", Arrays.asList("id")); + } finally { + server.shutdown(); + } + } + + private void assertConcurrentOnDocList(SolrDocumentList list, String authField, String expectedAuthFieldValue) { + for (SolrDocument doc : list) { + Collection authFieldValues = doc.getFieldValues(authField); + assertNotNull(authField + " should not be null. Doc: " + doc, authFieldValues); + + boolean foundAuthFieldValue = false; + for (Object obj : authFieldValues) { + if (obj.toString().equals(expectedAuthFieldValue)) { + foundAuthFieldValue = true; + break; + } + } + assertTrue("Did not find: " + expectedAuthFieldValue + " in doc: " + doc, foundAuthFieldValue); + } + } + + private void assertConcurrent(CloudSolrServer server, String [] ids, String authField, String expectedAuthFieldValue) + throws Exception { + { + QueryResponse idRsp = getIdResponse(new ExpectedResult(server, ids, -1, null)); + SolrDocumentList idList = idRsp.getResults(); + assertConcurrentOnDocList(idList, authField, expectedAuthFieldValue); + } + { + QueryResponse idsRsp = getIdsResponse(new ExpectedResult(server, ids, -1, null)); + SolrDocumentList idsList = idsRsp.getResults(); + assertConcurrentOnDocList(idsList, authField, expectedAuthFieldValue); + } + } + + @Test + public void testConcurrentChanges() throws Exception { + final String collection = "testConcurrentChanges"; + // Ensure the auth field is stored so we can check a consistent doc is returned + final String authField = "sentry_auth_stored"; + System.setProperty("sentry.auth.field", authField); + setupCollectionWithDocSecurity(collection, 1); + CloudSolrServer server = getCloudSolrServer(collection); + int numQueries = 5; + + try { + DocLevelGenerator generator = new DocLevelGenerator(authField); + generator.generateDocs(server, 100, "junit_role", "admin_role", 2); + + List threads = new LinkedList(); + int docsToModify = 10; + for (int i = 0; i < docsToModify; ++i) { + SolrInputDocument doc = new SolrInputDocument(); + doc.addField("id", Integer.toString(i)); + doc.addField("description", "description" + Integer.toString(i)); + doc.addField(authField, "junit_role"); + server.add(doc); + + threads.add(new AuthFieldModifyThread(server, doc, + authField, "junit_role", "admin_role")); + } + server.commit(); + + for (AuthFieldModifyThread thread : threads) { + thread.start(); + } + + // query + String [] ids = new String[docsToModify]; + for (int j = 0; j < ids.length; ++j) { + ids[ j ] = Integer.toString(j); + } + for (int k = 0; k < numQueries; ++k) { + assertConcurrent(server, ids, authField, "admin_role"); + } + + for (AuthFieldModifyThread thread : threads) { + thread.setFinished(); + thread.join(); + } + } finally { + System.clearProperty("sentry.auth.field"); + server.shutdown(); + } + } + + @Test + public void testSuperUser() throws Exception { + final String collection = "testSuperUser"; + setupCollectionWithDocSecurity(collection, 1); + CloudSolrServer server = getCloudSolrServer(collection); + int docCount = 100; + + try { + DocLevelGenerator generator = new DocLevelGenerator(AUTH_FIELD); + generator.generateDocs(server, docCount, "junit_role", "admin_role", 2); + + setAuthenticationUser("solr"); + String [] ids = new String[docCount]; + for (int i = 0; i < docCount; ++i) { + ids[ i ] = Integer.toString(i); + } + QueryResponse response = getIdResponse(new ExpectedResult(server, ids, docCount)); + assertEquals("Wrong number of documents", docCount, response.getResults().getNumFound()); + } finally { + server.shutdown(); + } + } + + private class AuthFieldModifyThread extends Thread { + private CloudSolrServer server; + private SolrInputDocument doc; + private String authField; + private String authFieldValue0; + private String authFieldValue1; + private volatile boolean finished = false; + + private AuthFieldModifyThread(CloudSolrServer server, + SolrInputDocument doc, String authField, + String authFieldValue0, String authFieldValue1) { + this.server = server; + this.doc = doc; + this.authField = authField; + this.authFieldValue0 = authFieldValue0; + this.authFieldValue1 = authFieldValue1; + } + + @Override + public void run() { + while (!finished) { + if (rand.nextBoolean()) { + doc.setField(authField, authFieldValue0); + } else { + doc.setField(authField, authFieldValue1); + } + try { + server.add(doc); + } catch (SolrServerException sse) { + throw new RuntimeException(sse); + } catch (IOException ioe) { + throw new RuntimeException(ioe); + } + } + } + + public void setFinished() { + finished = true; + } + } + + private static class ExpectedResult { + public final CloudSolrServer server; + public final String [] ids; + public final int expectedDocs; + public final String fl; + + public ExpectedResult(CloudSolrServer server, String [] ids, int expectedDocs) { + this(server, ids, expectedDocs, null); + } + + public ExpectedResult(CloudSolrServer server, String [] ids, int expectedDocs, String fl) { + this.server = server; + this.ids = ids; + this.expectedDocs = expectedDocs; + this.fl = fl; + } + } +} diff --git a/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/db/integration/AbstractSolrSentryTestWithDbProvider.java b/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/db/integration/AbstractSolrSentryTestWithDbProvider.java index 9438ee580..11f93d5cb 100644 --- a/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/db/integration/AbstractSolrSentryTestWithDbProvider.java +++ b/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/db/integration/AbstractSolrSentryTestWithDbProvider.java @@ -18,9 +18,12 @@ package org.apache.sentry.tests.e2e.solr.db.integration; +import static org.apache.sentry.core.model.search.SearchModelAuthorizable.AuthorizableType.Collection; + import java.io.File; import java.io.FileOutputStream; import java.util.Comparator; +import java.util.List; import java.util.TreeMap; import java.util.UUID; import java.util.concurrent.TimeoutException; @@ -32,12 +35,17 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.sentry.SentryUserException; import org.apache.sentry.binding.solr.HdfsTestUtil; import org.apache.sentry.binding.solr.conf.SolrAuthzConf.AuthzConfVars; +import org.apache.sentry.core.common.Action; import org.apache.sentry.core.model.search.SearchConstants; -import org.apache.sentry.provider.common.AuthorizationComponent; -import org.apache.sentry.provider.db.generic.service.thrift.SearchPolicyServiceClient; -import org.apache.sentry.provider.db.generic.service.thrift.SearchProviderBackend; +import org.apache.sentry.provider.db.generic.SentryGenericProviderBackend; +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceClient; +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceClientFactory; +import org.apache.sentry.provider.db.generic.service.thrift.TAuthorizable; +import org.apache.sentry.provider.db.generic.service.thrift.TSentryGrantOption; +import org.apache.sentry.provider.db.generic.service.thrift.TSentryPrivilege; import org.apache.sentry.provider.file.LocalGroupResourceAuthorizationProvider; import org.apache.sentry.provider.file.PolicyFile; import org.apache.sentry.service.thrift.SentryService; @@ -52,6 +60,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.collect.Lists; import com.google.common.collect.Sets; /** @@ -68,11 +77,13 @@ public class AbstractSolrSentryTestWithDbProvider extends AbstractSolrSentryTest protected static final String ADMIN_GROUP = "admin_group"; protected static final String ADMIN_ROLE = "admin_role"; protected static final String ADMIN_COLLECTION_NAME = "admin"; + protected static final String COMPONENT_SOLR = "solr"; + protected static final String SERVICE_NAME = SearchConstants.SENTRY_SEARCH_SERVICE_DEFAULT; protected static final Configuration conf = new Configuration(false); protected static SentryService server; - protected static SearchPolicyServiceClient client; + protected static SentryGenericServiceClient client; protected static File baseDir; protected static File hdfsDir; @@ -118,6 +129,7 @@ public static void setupConf() throws Exception { conf.set(ServerConfig.RPC_PORT, String.valueOf(PORT)); conf.set(ServerConfig.SENTRY_STORE_JDBC_URL, "jdbc:derby:;databaseName=" + dbDir.getPath() + ";create=true"); + conf.set(ServerConfig.SENTRY_STORE_JDBC_PASS, "dummy"); conf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING_RESOURCE, policyFilePath.getPath()); server = new SentryServiceFactory().create(conf); @@ -128,7 +140,8 @@ public static void setupConf() throws Exception { ServerConfig.SENTRY_STORE_LOCAL_GROUP_MAPPING); conf.set(AuthzConfVars.AUTHZ_PROVIDER.getVar(), LocalGroupResourceAuthorizationProvider.class.getName()); - conf.set(AuthzConfVars.AUTHZ_PROVIDER_BACKEND.getVar(), SearchProviderBackend.class.getName()); + conf.set(AuthzConfVars.AUTHZ_PROVIDER_BACKEND.getVar(), + SentryGenericProviderBackend.class.getName()); conf.set(AuthzConfVars.AUTHZ_PROVIDER_RESOURCE.getVar(), policyFilePath.getPath()); } @@ -157,6 +170,11 @@ public int compare(Class o1, Class o2) { public boolean equals(Object obj) { return true; } + + @Override + public int hashCode() { + return 17; + } }); extraRequestFilters.put(ModifiableUserAuthenticationFilter.class, "*"); @@ -192,7 +210,7 @@ public static void startSentryService() throws Exception { } public static void connectToSentryService() throws Exception { - client = new SearchPolicyServiceClient(conf); + client = SentryGenericServiceClientFactory.create(conf); } public static void stopAllService() throws Exception { @@ -260,16 +278,47 @@ public static void setGroupsAndRoles() throws Exception { writePolicyFile(); for (int i = 0; i < roles.length; i++) { - client.createRole(ADMIN_USER, roles[i]); - client.addRoleToGroups(ADMIN_USER, roles[i], Sets.newHashSet(groups[i])); + client.createRole(ADMIN_USER, roles[i], COMPONENT_SOLR); + client.addRoleToGroups(ADMIN_USER, roles[i], COMPONENT_SOLR, Sets.newHashSet(groups[i])); } /** * user[admin]->group[admin]->role[admin] * grant ALL privilege on collection ALL to role admin */ - client.createRole(ADMIN_USER, ADMIN_ROLE); - client.addRoleToGroups(ADMIN_USER, ADMIN_ROLE, Sets.newHashSet(ADMIN_GROUP)); - client.grantCollectionPrivilege(SearchConstants.ALL, ADMIN_USER, ADMIN_ROLE, SearchConstants.ALL); + client.createRole(ADMIN_USER, ADMIN_ROLE, COMPONENT_SOLR); + client.addRoleToGroups(ADMIN_USER, ADMIN_ROLE, COMPONENT_SOLR, Sets.newHashSet(ADMIN_GROUP)); + grantCollectionPrivilege(SearchConstants.ALL, ADMIN_USER, ADMIN_ROLE, SearchConstants.ALL); + } + + protected static void grantCollectionPrivilege(String collection, String requestor, + String roleName, String action) throws SentryUserException { + TSentryPrivilege tPrivilege = toTSentryPrivilege(collection, action); + client.grantPrivilege(requestor, roleName, COMPONENT_SOLR, tPrivilege); + } + + protected static void revokeCollectionPrivilege(String collection, String requestor, + String roleName, String action) throws SentryUserException { + TSentryPrivilege tPrivilege = toTSentryPrivilege(collection, action); + client.revokePrivilege(requestor, roleName, COMPONENT_SOLR, tPrivilege); + } + + protected static void dropCollectionPrivilege(String collection, String requestor) + throws SentryUserException { + final TSentryPrivilege tPrivilege = toTSentryPrivilege(collection, Action.ALL); + client.dropPrivilege(requestor, COMPONENT_SOLR, tPrivilege); + } + + private static TSentryPrivilege toTSentryPrivilege(String collection, String action) { + TSentryPrivilege tPrivilege = new TSentryPrivilege(); + tPrivilege.setComponent(COMPONENT_SOLR); + tPrivilege.setServiceName(SERVICE_NAME); + tPrivilege.setAction(action); + tPrivilege.setGrantOption(TSentryGrantOption.FALSE); + + List authorizables = Lists.newArrayList(new TAuthorizable(Collection.name(), + collection)); + tPrivilege.setAuthorizables(authorizables); + return tPrivilege; } -} \ No newline at end of file +} diff --git a/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/db/integration/TestSolrAdminOperations.java b/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/db/integration/TestSolrAdminOperations.java index 00a7a8995..c07b3b8ef 100644 --- a/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/db/integration/TestSolrAdminOperations.java +++ b/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/db/integration/TestSolrAdminOperations.java @@ -17,6 +17,8 @@ package org.apache.sentry.tests.e2e.solr.db.integration; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.util.Arrays; @@ -24,13 +26,8 @@ import org.apache.sentry.core.model.search.SearchConstants; import org.apache.solr.common.params.CollectionParams.CollectionAction; import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.junit.Assert.assertTrue; public class TestSolrAdminOperations extends AbstractSolrSentryTestWithDbProvider { - private static final Logger LOG = LoggerFactory.getLogger(TestSolrAdminOperations.class); private static final String TEST_COLLECTION_NAME1 = "collection1"; private static final String COLLECTION_CONFIG_DIR = RESOURCES_DIR + File.separator + "collection1" + File.separator + "conf"; @@ -52,8 +49,8 @@ public void testAdminOperations() throws Exception { * user0->group0->role0 * grant ALL privilege on collection admin and collection1 to role0 */ - client.grantCollectionPrivilege(ADMIN_COLLECTION_NAME, ADMIN_USER, "role0", SearchConstants.ALL); - client.grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role0", SearchConstants.ALL); + grantCollectionPrivilege(ADMIN_COLLECTION_NAME, ADMIN_USER, "role0", SearchConstants.ALL); + grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role0", SearchConstants.ALL); verifyCollectionAdminOpPass(grantor, CollectionAction.CREATE, TEST_COLLECTION_NAME1); verifyCollectionAdminOpPass(grantor, CollectionAction.RELOAD, TEST_COLLECTION_NAME1); @@ -62,7 +59,7 @@ public void testAdminOperations() throws Exception { verifyCollectionAdminOpPass(grantor, CollectionAction.DELETE, TEST_COLLECTION_NAME1); //revoke UPDATE privilege on collection collection1 from role1, create collection1 will be failed - client.revokeCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role0", SearchConstants.UPDATE); + revokeCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role0", SearchConstants.UPDATE); verifyCollectionAdminOpFail(grantor, CollectionAction.CREATE, TEST_COLLECTION_NAME1); verifyCollectionAdminOpFail(grantor, CollectionAction.RELOAD, TEST_COLLECTION_NAME1); @@ -75,8 +72,8 @@ public void testAdminOperations() throws Exception { * grant UPDATE privilege on collection admin and collection1 to role1 */ grantor = "user1"; - client.grantCollectionPrivilege(ADMIN_COLLECTION_NAME, ADMIN_USER, "role1", SearchConstants.UPDATE); - client.grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role1", SearchConstants.UPDATE); + grantCollectionPrivilege(ADMIN_COLLECTION_NAME, ADMIN_USER, "role1", SearchConstants.UPDATE); + grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role1", SearchConstants.UPDATE); verifyCollectionAdminOpPass(grantor, CollectionAction.CREATE, TEST_COLLECTION_NAME1); verifyCollectionAdminOpPass(grantor, CollectionAction.RELOAD, TEST_COLLECTION_NAME1); @@ -85,7 +82,7 @@ public void testAdminOperations() throws Exception { verifyCollectionAdminOpPass(grantor, CollectionAction.DELETE, TEST_COLLECTION_NAME1); //revoke UPDATE privilege on collection admin from role1, create collection1 will be failed - client.revokeCollectionPrivilege(ADMIN_COLLECTION_NAME, ADMIN_USER, "role1", SearchConstants.UPDATE); + revokeCollectionPrivilege(ADMIN_COLLECTION_NAME, ADMIN_USER, "role1", SearchConstants.UPDATE); verifyCollectionAdminOpFail(grantor, CollectionAction.CREATE, TEST_COLLECTION_NAME1); verifyCollectionAdminOpFail(grantor, CollectionAction.RELOAD, TEST_COLLECTION_NAME1); verifyCollectionAdminOpFail(grantor, CollectionAction.CREATEALIAS, TEST_COLLECTION_NAME1); @@ -98,8 +95,8 @@ public void testAdminOperations() throws Exception { * grant QUERY privilege on collection admin and collection1 to role2 */ grantor = "user2"; - client.grantCollectionPrivilege(ADMIN_COLLECTION_NAME, ADMIN_USER, "role2", SearchConstants.QUERY); - client.grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role2", SearchConstants.QUERY); + grantCollectionPrivilege(ADMIN_COLLECTION_NAME, ADMIN_USER, "role2", SearchConstants.QUERY); + grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role2", SearchConstants.QUERY); verifyCollectionAdminOpFail(grantor, CollectionAction.CREATE, TEST_COLLECTION_NAME1); verifyCollectionAdminOpFail(grantor, CollectionAction.RELOAD, TEST_COLLECTION_NAME1); @@ -108,11 +105,11 @@ public void testAdminOperations() throws Exception { verifyCollectionAdminOpFail(grantor, CollectionAction.DELETE, TEST_COLLECTION_NAME1); //grant UPDATE privilege on collection collection1 to role2, create collection1 will be failed - client.grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role2", SearchConstants.UPDATE); + grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role2", SearchConstants.UPDATE); verifyCollectionAdminOpFail(grantor, CollectionAction.CREATE, TEST_COLLECTION_NAME1); //grant UPDATE privilege on collection admin to role2, create collection1 will be successful. - client.grantCollectionPrivilege(ADMIN_COLLECTION_NAME, ADMIN_USER, "role2", SearchConstants.UPDATE); + grantCollectionPrivilege(ADMIN_COLLECTION_NAME, ADMIN_USER, "role2", SearchConstants.UPDATE); verifyCollectionAdminOpPass(grantor, CollectionAction.CREATE, TEST_COLLECTION_NAME1); verifyCollectionAdminOpPass(grantor, CollectionAction.RELOAD, TEST_COLLECTION_NAME1); @@ -133,8 +130,8 @@ public void testAdminOperations() throws Exception { * grant UPDATE privilege on collection admin to role3 * grant QUERY privilege on collection collection1 to role3 */ - client.grantCollectionPrivilege(ADMIN_COLLECTION_NAME, ADMIN_USER, "role3", SearchConstants.ALL); - client.grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role3", SearchConstants.ALL); + grantCollectionPrivilege(ADMIN_COLLECTION_NAME, ADMIN_USER, "role3", SearchConstants.ALL); + grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role3", SearchConstants.ALL); verifyCollectionAdminOpPass(grantor, CollectionAction.CREATE, TEST_COLLECTION_NAME1); verifyCollectionAdminOpPass(grantor, CollectionAction.RELOAD, TEST_COLLECTION_NAME1); @@ -159,24 +156,27 @@ public void testSyncPrivilegesWithDeleteCollection() throws Exception { * Grant ALL privilege on collection admin to role0 * user0 can execute create & delete collection1 operation */ - client.grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role0", SearchConstants.ALL); - client.grantCollectionPrivilege(ADMIN_COLLECTION_NAME, ADMIN_USER, "role0", SearchConstants.ALL); + grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role0", SearchConstants.ALL); + grantCollectionPrivilege(ADMIN_COLLECTION_NAME, ADMIN_USER, "role0", SearchConstants.ALL); assertTrue("user0 has one privilege on collection admin", - client.listPrivilegesByRoleName("user0", "role0", Arrays.asList(new Collection(ADMIN_COLLECTION_NAME))).size() == 1); + client.listPrivilegesByRoleName("user0", "role0", COMPONENT_SOLR, SERVICE_NAME, + Arrays.asList(new Collection(ADMIN_COLLECTION_NAME))).size() == 1); assertTrue("user0 has one privilege on collection collection1", - client.listPrivilegesByRoleName("user0", "role0", Arrays.asList(new Collection(TEST_COLLECTION_NAME1))).size() == 1); + client.listPrivilegesByRoleName("user0", "role0", COMPONENT_SOLR, SERVICE_NAME, + Arrays.asList(new Collection(TEST_COLLECTION_NAME1))).size() == 1); /** * user1->group1->role1 * grant QUERY privilege on collection collection1 to role1 */ - client.listPrivilegesByRoleName("user0", "role0", null); - client.grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role1", SearchConstants.ALL); + client.listPrivilegesByRoleName("user0", "role0", COMPONENT_SOLR, SERVICE_NAME, null); + grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role1", SearchConstants.ALL); assertTrue("user1 has one privilege record", - client.listPrivilegesByRoleName("user1", "role1", Arrays.asList(new Collection(TEST_COLLECTION_NAME1))).size() == 1); + client.listPrivilegesByRoleName("user1", "role1", COMPONENT_SOLR, SERVICE_NAME, + Arrays.asList(new Collection(TEST_COLLECTION_NAME1))).size() == 1); /** * create collection collection1 @@ -189,32 +189,36 @@ public void testSyncPrivilegesWithDeleteCollection() throws Exception { //check the user0 assertTrue("user0 has one privilege on collection admin", - client.listPrivilegesByRoleName("user0", "role0", Arrays.asList(new Collection(ADMIN_COLLECTION_NAME))).size() == 1); + client.listPrivilegesByRoleName("user0", "role0", COMPONENT_SOLR, SERVICE_NAME, + Arrays.asList(new Collection(ADMIN_COLLECTION_NAME))).size() == 1); assertTrue("user0 has no privilege on collection collection1", - client.listPrivilegesByRoleName("user0", "role0", Arrays.asList(new Collection(TEST_COLLECTION_NAME1))).size() == 0); + client.listPrivilegesByRoleName("user0", "role0", COMPONENT_SOLR, SERVICE_NAME, + Arrays.asList(new Collection(TEST_COLLECTION_NAME1))).size() == 0); //check the user1 assertTrue("user1 has no privilege on collection collection1", - client.listPrivilegesByRoleName("user1", "role1", Arrays.asList(new Collection(TEST_COLLECTION_NAME1))).size() == 0); - + client.listPrivilegesByRoleName("user1", "role1", COMPONENT_SOLR, SERVICE_NAME, + Arrays.asList(new Collection(TEST_COLLECTION_NAME1))).size() == 0); /** * user2->group2->role2 * Grant UPDATE privilege on collection collection1 to role2 */ - client.grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role2", SearchConstants.UPDATE); + grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role2", SearchConstants.UPDATE); assertTrue("user2 has one privilege on collection collection1", - client.listPrivilegesByRoleName("user2", "role2", Arrays.asList(new Collection(TEST_COLLECTION_NAME1))).size() == 1); + client.listPrivilegesByRoleName("user2", "role2", COMPONENT_SOLR, SERVICE_NAME, + Arrays.asList(new Collection(TEST_COLLECTION_NAME1))).size() == 1); /** * user3->group3->role3 * grant QUERY privilege on collection collection1 to role3 */ - client.grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role3", SearchConstants.QUERY); + grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role3", SearchConstants.QUERY); assertTrue("user1 has one privilege record", - client.listPrivilegesByRoleName("user3", "role3", Arrays.asList(new Collection(TEST_COLLECTION_NAME1))).size() == 1); + client.listPrivilegesByRoleName("user3", "role3", COMPONENT_SOLR, SERVICE_NAME, + Arrays.asList(new Collection(TEST_COLLECTION_NAME1))).size() == 1); /** * create collection collection1 @@ -227,10 +231,12 @@ public void testSyncPrivilegesWithDeleteCollection() throws Exception { //check the user2 assertTrue("user2 has no privilege on collection collection1", - client.listPrivilegesByRoleName("user2", "role2", Arrays.asList(new Collection(TEST_COLLECTION_NAME1))).size() == 0); + client.listPrivilegesByRoleName("user2", "role2", COMPONENT_SOLR, SERVICE_NAME, + Arrays.asList(new Collection(TEST_COLLECTION_NAME1))).size() == 0); //check the user3 assertTrue("user3 has no privilege on collection collection1", - client.listPrivilegesByRoleName("user3", "role3", Arrays.asList(new Collection(TEST_COLLECTION_NAME1))).size() == 0); + client.listPrivilegesByRoleName("user3", "role3", COMPONENT_SOLR, SERVICE_NAME, + Arrays.asList(new Collection(TEST_COLLECTION_NAME1))).size() == 0); } -} \ No newline at end of file +} diff --git a/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/db/integration/TestSolrDocLevelOperations.java b/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/db/integration/TestSolrDocLevelOperations.java index 193743b63..7f1fdfdbe 100644 --- a/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/db/integration/TestSolrDocLevelOperations.java +++ b/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/db/integration/TestSolrDocLevelOperations.java @@ -64,14 +64,14 @@ public void testDocLevelOperations() throws Exception { // as user0 setAuthenticationUser("user0"); - client.grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role0", SearchConstants.QUERY); + grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role0", SearchConstants.QUERY); rsp = server.query(query); docList = rsp.getResults(); assertEquals(NUM_DOCS/4, rsp.getResults().getNumFound()); //as user1 setAuthenticationUser("user1"); - client.grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role1", SearchConstants.QUERY); + grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role1", SearchConstants.QUERY); rsp = server.query(query); docList = rsp.getResults(); assertEquals(NUM_DOCS/4, rsp.getResults().getNumFound()); docList = rsp.getResults(); @@ -79,14 +79,14 @@ public void testDocLevelOperations() throws Exception { //as user2 setAuthenticationUser("user2"); - client.grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role2", SearchConstants.QUERY); + grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role2", SearchConstants.QUERY); rsp = server.query(query); docList = rsp.getResults(); assertEquals(NUM_DOCS/4, rsp.getResults().getNumFound()); //as user3 setAuthenticationUser("user3"); - client.grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role3", SearchConstants.QUERY); + grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role3", SearchConstants.QUERY); rsp = server.query(query); docList = rsp.getResults(); assertEquals(NUM_DOCS/4, rsp.getResults().getNumFound()); @@ -106,7 +106,7 @@ public void updateDocsTest() throws Exception { CloudSolrServer server = getCloudSolrServer(TEST_COLLECTION_NAME1); try { setAuthenticationUser("user0"); - client.grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role0", SearchConstants.QUERY); + grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role0", SearchConstants.QUERY); String docIdStr = Long.toString(1); // verify we can't view one of the odd documents diff --git a/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/db/integration/TestSolrQueryOperations.java b/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/db/integration/TestSolrQueryOperations.java index afe69122a..3eb6c0f02 100644 --- a/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/db/integration/TestSolrQueryOperations.java +++ b/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/db/integration/TestSolrQueryOperations.java @@ -18,16 +18,14 @@ import java.io.File; +import org.apache.sentry.core.model.search.Collection; import org.apache.sentry.core.model.search.SearchConstants; import org.apache.solr.common.SolrInputDocument; import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import com.google.common.collect.Sets; +import com.google.common.collect.Lists; public class TestSolrQueryOperations extends AbstractSolrSentryTestWithDbProvider { - private static final Logger LOG = LoggerFactory.getLogger(TestSolrQueryOperations.class); private static final String TEST_COLLECTION_NAME1 = "collection1"; private static final String COLLECTION_CONFIG_DIR = RESOURCES_DIR + File.separator + "collection1" + File.separator + "conf"; @@ -54,13 +52,13 @@ public void testQueryOperations() throws Exception { * grant ALL privilege on collection collection1 to role0 */ String grantor = "user0"; - client.grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role0", SearchConstants.ALL); + grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role0", SearchConstants.ALL); verifyQueryPass(grantor, TEST_COLLECTION_NAME1, ALL_DOCS); - client.revokeCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role0", SearchConstants.UPDATE); + revokeCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role0", SearchConstants.UPDATE); verifyQueryPass(grantor, TEST_COLLECTION_NAME1, ALL_DOCS); - client.revokeCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role0", SearchConstants.QUERY); + revokeCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role0", SearchConstants.QUERY); verifyQueryFail(grantor, TEST_COLLECTION_NAME1, ALL_DOCS); /** @@ -68,10 +66,10 @@ public void testQueryOperations() throws Exception { * grant QUERY privilege on collection collection1 to role1 */ grantor = "user1"; - client.grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role1", SearchConstants.QUERY); + grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role1", SearchConstants.QUERY); verifyQueryPass(grantor, TEST_COLLECTION_NAME1, ALL_DOCS); - client.revokeCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role1", SearchConstants.QUERY); + revokeCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role1", SearchConstants.QUERY); verifyQueryFail(grantor, TEST_COLLECTION_NAME1, ALL_DOCS); /** @@ -79,13 +77,15 @@ public void testQueryOperations() throws Exception { * grant UPDATE privilege on collection collection1 to role2 */ grantor = "user2"; - client.grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role2", SearchConstants.UPDATE); + grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role2", SearchConstants.UPDATE); verifyQueryFail(grantor, TEST_COLLECTION_NAME1, ALL_DOCS); - client.grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role2", SearchConstants.QUERY); + grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role2", SearchConstants.QUERY); verifyQueryPass(grantor, TEST_COLLECTION_NAME1, ALL_DOCS); - client.renameCollectionPrivilege(TEST_COLLECTION_NAME1, "new_" + TEST_COLLECTION_NAME1, ADMIN_USER); + client.renamePrivilege(ADMIN_USER, COMPONENT_SOLR, SERVICE_NAME, + Lists.newArrayList(new Collection(TEST_COLLECTION_NAME1)), + Lists.newArrayList(new Collection("new_" + TEST_COLLECTION_NAME1))); verifyQueryFail(grantor, TEST_COLLECTION_NAME1, ALL_DOCS); grantor = "user3"; @@ -93,4 +93,4 @@ public void testQueryOperations() throws Exception { deleteCollection(TEST_COLLECTION_NAME1); } -} \ No newline at end of file +} diff --git a/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/db/integration/TestSolrUpdateOperations.java b/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/db/integration/TestSolrUpdateOperations.java index de189792d..94123259a 100644 --- a/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/db/integration/TestSolrUpdateOperations.java +++ b/sentry-tests/sentry-tests-solr/src/test/java/org/apache/sentry/tests/e2e/solr/db/integration/TestSolrUpdateOperations.java @@ -21,13 +21,8 @@ import org.apache.sentry.core.model.search.SearchConstants; import org.apache.solr.common.SolrInputDocument; import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.collect.Sets; public class TestSolrUpdateOperations extends AbstractSolrSentryTestWithDbProvider { - private static final Logger LOG = LoggerFactory.getLogger(TestSolrUpdateOperations.class); private static final String TEST_COLLECTION_NAME1 = "collection1"; private static final String COLLECTION_CONFIG_DIR = RESOURCES_DIR + File.separator + "collection1" + File.separator + "conf"; @@ -51,13 +46,13 @@ public void testUpdateOperations() throws Exception { * grant ALL privilege on collection collection1 to role0 */ String grantor = "user0"; - client.grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role0", SearchConstants.ALL); + grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role0", SearchConstants.ALL); cleanSolrCollection(TEST_COLLECTION_NAME1); verifyUpdatePass(grantor, TEST_COLLECTION_NAME1, solrInputDoc); verifyDeletedocsPass(grantor, TEST_COLLECTION_NAME1, false); //drop privilege - client.dropCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER); + dropCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER); verifyUpdateFail(grantor, TEST_COLLECTION_NAME1, solrInputDoc); uploadSolrDoc(TEST_COLLECTION_NAME1, solrInputDoc); verifyDeletedocsFail(grantor, TEST_COLLECTION_NAME1, false); @@ -67,13 +62,13 @@ public void testUpdateOperations() throws Exception { * grant UPDATE privilege on collection collection1 to role1 */ grantor = "user1"; - client.grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role1", SearchConstants.UPDATE); + grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role1", SearchConstants.UPDATE); cleanSolrCollection(TEST_COLLECTION_NAME1); verifyUpdatePass(grantor, TEST_COLLECTION_NAME1, solrInputDoc); verifyDeletedocsPass(grantor, TEST_COLLECTION_NAME1, false); //revoke privilege - client.revokeCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role1", SearchConstants.ALL); + revokeCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role1", SearchConstants.ALL); verifyUpdateFail(grantor, TEST_COLLECTION_NAME1, solrInputDoc); uploadSolrDoc(TEST_COLLECTION_NAME1, solrInputDoc); verifyDeletedocsFail(grantor, TEST_COLLECTION_NAME1, false); @@ -83,13 +78,13 @@ public void testUpdateOperations() throws Exception { * grant QUERY privilege on collection collection1 to role2 */ grantor = "user2"; - client.grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role2", SearchConstants.QUERY); + grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role2", SearchConstants.QUERY); cleanSolrCollection(TEST_COLLECTION_NAME1); verifyUpdateFail(grantor, TEST_COLLECTION_NAME1, solrInputDoc); uploadSolrDoc(TEST_COLLECTION_NAME1, solrInputDoc); verifyDeletedocsFail(grantor, TEST_COLLECTION_NAME1, false); - client.grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role2", SearchConstants.ALL); + grantCollectionPrivilege(TEST_COLLECTION_NAME1, ADMIN_USER, "role2", SearchConstants.ALL); cleanSolrCollection(TEST_COLLECTION_NAME1); verifyUpdatePass(grantor, TEST_COLLECTION_NAME1, solrInputDoc); verifyDeletedocsPass(grantor, TEST_COLLECTION_NAME1, false); @@ -102,4 +97,4 @@ public void testUpdateOperations() throws Exception { deleteCollection(TEST_COLLECTION_NAME1); } -} \ No newline at end of file +} diff --git a/sentry-tests/sentry-tests-solr/src/test/resources/solr/collection1/conf/schema.xml b/sentry-tests/sentry-tests-solr/src/test/resources/solr/collection1/conf/schema.xml index 66449ffe5..c8bc32fc3 100644 --- a/sentry-tests/sentry-tests-solr/src/test/resources/solr/collection1/conf/schema.xml +++ b/sentry-tests/sentry-tests-solr/src/test/resources/solr/collection1/conf/schema.xml @@ -216,6 +216,7 @@ + diff --git a/sentry-tests/sentry-tests-solr/src/test/resources/solr/collection1/conf/solrconfig-doclevel.xml b/sentry-tests/sentry-tests-solr/src/test/resources/solr/collection1/conf/solrconfig-doclevel.xml index af1184d7c..f07d494ea 100644 --- a/sentry-tests/sentry-tests-solr/src/test/resources/solr/collection1/conf/solrconfig-doclevel.xml +++ b/sentry-tests/sentry-tests-solr/src/test/resources/solr/collection1/conf/solrconfig-doclevel.xml @@ -387,14 +387,14 @@ 'soft' commit which only ensures that changes are visible but does not ensure that data is synced to disk. This is faster and more near-realtime friendly than a hard commit. - --> - - ${solr.autoSoftCommit.maxTime:1000} + --> + + ${solr.autoSoftCommit.maxTime:20000} - + - + true json @@ -1351,14 +1351,17 @@ true - sentry_auth + ${sentry.auth.field:sentry_auth} OR - + + 4.0.0 + + + org.apache.sentry + sentry-tests + 1.7.0-incubating-SNAPSHOT + + + sentry-tests-sqoop + Sentry Sqoop Tests + end to end tests for sentry-sqoop integration + + + + junit + junit + + + log4j + log4j + + + org.apache.sqoop + test + + + org.apache.hadoop + hadoop-common + + + javax.servlet + servlet-api + + + + + org.apache.hadoop + hadoop-minicluster + + + javax.servlet + servlet-api + + + + + org.eclipse.jetty + jetty-servlet + 8.1.10.v20130312 + + + org.eclipse.jetty + jetty-server + 8.1.10.v20130312 + + + org.apache.sentry + sentry-provider-db + test + + + org.apache.sentry + sentry-provider-file + test + + + org.apache.sentry + sentry-binding-sqoop + + + org.apache.sentry + sentry-core-model-sqoop + + + com.google.guava + guava + + + + + download-sqoop2 + + true + !skipTests + + + + + org.apache.maven.plugins + maven-antrun-plugin + + true + + + + download-sqoop2 + generate-sources + + run + + + + + set -e + set -x + /bin/pwd + BASE_DIR=./target + DOWNLOAD_DIR=./thirdparty + download() { + url=$1; + packageName=$2 + if [[ ! -f $DOWNLOAD_DIR/$packageName ]] + then + wget --no-check-certificate -nv -O $DOWNLOAD_DIR/$packageName $url + fi + } + mkdir -p $DOWNLOAD_DIR + download "https://repository.apache.org/content/repositories/snapshots/org/apache/sqoop/sqoop-server/2.0.0-SNAPSHOT/sqoop-server-2.0.0-20150530.005523-4.war" sqoop.war + download "http://archive.apache.org/dist/tomcat/tomcat-6/v6.0.36/bin/apache-tomcat-6.0.36.zip" apache-tomcat-6.0.36.zip + + + + + + + + + + + + + + diff --git a/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/AbstractSqoopSentryTestBase.java b/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/AbstractSqoopSentryTestBase.java new file mode 100644 index 000000000..8a01e1c5c --- /dev/null +++ b/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/AbstractSqoopSentryTestBase.java @@ -0,0 +1,219 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This class used to test the Sqoop integration with Sentry. + * It will set up a miniSqoopCluster and Sentry service in a JVM process. + */ +package org.apache.sentry.tests.e2e.sqoop; + +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.FileOutputStream; +import java.util.ArrayList; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.TimeoutException; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.sentry.core.model.sqoop.SqoopActionConstant; +import org.apache.sentry.provider.db.generic.SentryGenericProviderBackend; +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceClient; +import org.apache.sentry.provider.db.generic.service.thrift.SentryGenericServiceClientFactory; +import org.apache.sentry.provider.db.generic.service.thrift.TAuthorizable; +import org.apache.sentry.provider.db.generic.service.thrift.TSentryPrivilege; +import org.apache.sentry.provider.file.LocalGroupResourceAuthorizationProvider; +import org.apache.sentry.provider.file.PolicyFile; +import org.apache.sentry.service.thrift.SentryService; +import org.apache.sentry.service.thrift.SentryServiceFactory; +import org.apache.sentry.service.thrift.ServiceConstants.ClientConfig; +import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; +import org.apache.sentry.sqoop.conf.SqoopAuthConf.AuthzConfVars; +import org.apache.sqoop.common.test.utils.NetworkUtils; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import com.google.common.base.Joiner; +import com.google.common.collect.Sets; + +public class AbstractSqoopSentryTestBase { + private static final String SERVER_HOST = NetUtils + .createSocketAddr("localhost:80").getAddress().getCanonicalHostName(); + + protected static final String COMPONENT = "sqoop"; + protected static final String ADMIN_USER = "sqoop"; + protected static final String ADMIN_GROUP = "sqoop"; + protected static final String ADMIN_ROLE = "sqoop"; + protected static final String SQOOP_SERVER_NAME = "sqoopServer1"; + /** test users, groups and roles */ + protected static final String USER1 = StaticUserGroupRole.USER_1; + protected static final String USER2 = StaticUserGroupRole.USER_2; + protected static final String USER3 = StaticUserGroupRole.USER_3; + protected static final String USER4 = StaticUserGroupRole.USER_4; + protected static final String USER5 = StaticUserGroupRole.USER_5; + + protected static final String GROUP1 = StaticUserGroupRole.GROUP_1; + protected static final String GROUP2 = StaticUserGroupRole.GROUP_2; + protected static final String GROUP3 = StaticUserGroupRole.GROUP_3; + protected static final String GROUP4 = StaticUserGroupRole.GROUP_4; + protected static final String GROUP5 = StaticUserGroupRole.GROUP_5; + + protected static final String ROLE1 = StaticUserGroupRole.ROLE_1; + protected static final String ROLE2 = StaticUserGroupRole.ROLE_2; + protected static final String ROLE3 = StaticUserGroupRole.ROLE_3; + protected static final String ROLE4 = StaticUserGroupRole.ROLE_4; + protected static final String ROLE5 = StaticUserGroupRole.ROLE_5; + + protected static SentryService server; + protected static TomcatSqoopRunner sqoopServerRunner; + + protected static File baseDir; + protected static File sqoopDir; + protected static File dbDir; + protected static File policyFilePath; + + protected static PolicyFile policyFile; + + @BeforeClass + public static void beforeTestEndToEnd() throws Exception { + setupConf(); + startSentryService(); + setUserGroups(); + setAdminPrivilege(); + startSqoopWithSentryEnable(); + } + + @AfterClass + public static void afterTestEndToEnd() throws Exception { + if (server != null) { + server.stop(); + } + if (sqoopServerRunner != null) { + sqoopServerRunner.stop(); + } + + FileUtils.deleteDirectory(baseDir); + } + + public static void setupConf() throws Exception { + baseDir = createTempDir(); + sqoopDir = new File(baseDir, "sqoop"); + dbDir = new File(baseDir, "sentry_policy_db"); + policyFilePath = new File(baseDir, "local_policy_file.ini"); + policyFile = new PolicyFile(); + + /** set the configuratoion for Sentry Service */ + Configuration conf = new Configuration(); + + conf.set(ServerConfig.SECURITY_MODE, ServerConfig.SECURITY_MODE_NONE); + conf.set(ServerConfig.SENTRY_VERIFY_SCHEM_VERSION, "false"); + conf.set(ServerConfig.ADMIN_GROUPS, Joiner.on(",").join(ADMIN_GROUP, + UserGroupInformation.getLoginUser().getPrimaryGroupName())); + conf.set(ServerConfig.RPC_ADDRESS, SERVER_HOST); + conf.set(ServerConfig.RPC_PORT, String.valueOf(NetworkUtils.findAvailablePort())); + conf.set(ServerConfig.SENTRY_STORE_JDBC_URL, + "jdbc:derby:;databaseName=" + dbDir.getPath() + ";create=true"); + conf.set(ServerConfig.SENTRY_STORE_JDBC_PASS, "dummy"); + conf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING, + ServerConfig.SENTRY_STORE_LOCAL_GROUP_MAPPING); + conf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING_RESOURCE, + policyFilePath.getPath()); + server = new SentryServiceFactory().create(conf); + } + + public static File createTempDir() { + File baseDir = new File(System.getProperty("java.io.tmpdir")); + String baseName = "sqoop-e2e-"; + File tempDir = new File(baseDir, baseName + UUID.randomUUID().toString()); + if (tempDir.mkdir()) { + return tempDir; + } + throw new IllegalStateException("Failed to create temp directory"); + } + + public static void startSentryService() throws Exception { + server.start(); + final long start = System.currentTimeMillis(); + while(!server.isRunning()) { + Thread.sleep(1000); + if(System.currentTimeMillis() - start > 60000L) { + throw new TimeoutException("Server did not start after 60 seconds"); + } + } + } + + public static void startSqoopWithSentryEnable() throws Exception { + File sentrySitePath = new File(baseDir, "sentry-site.xml"); + getClientConfig().writeXml(new FileOutputStream(sentrySitePath)); + sqoopServerRunner = new TomcatSqoopRunner(sqoopDir.toString(), SQOOP_SERVER_NAME, + sentrySitePath.toURI().toURL().toString()); + sqoopServerRunner.start(); + } + + private static Configuration getClientConfig() { + Configuration conf = new Configuration(); + /** set the Sentry client configuration for Sqoop Service integration */ + conf.set(ServerConfig.SECURITY_MODE, ServerConfig.SECURITY_MODE_NONE); + conf.set(ClientConfig.SERVER_RPC_ADDRESS, server.getAddress().getHostName()); + conf.set(ClientConfig.SERVER_RPC_PORT, String.valueOf(server.getAddress().getPort())); + + conf.set(AuthzConfVars.AUTHZ_PROVIDER.getVar(), + LocalGroupResourceAuthorizationProvider.class.getName()); + conf.set(AuthzConfVars.AUTHZ_PROVIDER_BACKEND.getVar(), + SentryGenericProviderBackend.class.getName()); + conf.set(AuthzConfVars.AUTHZ_PROVIDER_RESOURCE.getVar(), policyFilePath.getPath()); + conf.set(AuthzConfVars.AUTHZ_TESTING_MODE.getVar(), "true"); + return conf; + } + + public static void setUserGroups() throws Exception { + for (String user : StaticUserGroupRole.getUsers()) { + Set groups = StaticUserGroupRole.getGroups(user); + policyFile.addGroupsToUser(user, + groups.toArray(new String[groups.size()])); + } + policyFile.addGroupsToUser(ADMIN_USER, ADMIN_GROUP); + UserGroupInformation loginUser = UserGroupInformation.getLoginUser(); + policyFile.addGroupsToUser(loginUser.getShortUserName(), loginUser.getGroupNames()); + policyFile.write(policyFilePath); + } + + public static void setAdminPrivilege() throws Exception { + SentryGenericServiceClient sentryClient = null; + try { + /** grant all privilege to admin user */ + sentryClient = SentryGenericServiceClientFactory.create(getClientConfig()); + sentryClient.createRoleIfNotExist(ADMIN_USER, ADMIN_ROLE, COMPONENT); + sentryClient.addRoleToGroups(ADMIN_USER, ADMIN_ROLE, COMPONENT, Sets.newHashSet(ADMIN_GROUP)); + sentryClient.grantPrivilege(ADMIN_USER, ADMIN_ROLE, COMPONENT, + new TSentryPrivilege(COMPONENT, SQOOP_SERVER_NAME, new ArrayList(), + SqoopActionConstant.ALL)); + } finally { + if (sentryClient != null) { + sentryClient.close(); + } + } + } + + public static void assertCausedMessage(Exception e, String message) { + assertTrue(e.getCause().getMessage().contains(message)); + } +} diff --git a/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/StaticUserGroupRole.java b/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/StaticUserGroupRole.java new file mode 100644 index 000000000..e51ee00ab --- /dev/null +++ b/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/StaticUserGroupRole.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.tests.e2e.sqoop; + +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import com.google.common.collect.Sets; + +public class StaticUserGroupRole { + public static final String USER_1 = "user1"; + public static final String USER_2 = "user2"; + public static final String USER_3 = "user3"; + public static final String USER_4 = "user4"; + public static final String USER_5 = "user5"; + + public static final String GROUP_1 = "group1"; + public static final String GROUP_2 = "group2"; + public static final String GROUP_3 = "group3"; + public static final String GROUP_4 = "group4"; + public static final String GROUP_5 = "group5"; + + public static final String ROLE_1 = "role1"; + public static final String ROLE_2 = "role2"; + public static final String ROLE_3 = "role3"; + public static final String ROLE_4 = "role4"; + public static final String ROLE_5 = "role5"; + + private static Map> userToGroupsMapping = + new HashMap>(); + + static { + userToGroupsMapping.put(USER_1, Sets.newHashSet(GROUP_1)); + userToGroupsMapping.put(USER_2, Sets.newHashSet(GROUP_2)); + userToGroupsMapping.put(USER_3, Sets.newHashSet(GROUP_3)); + userToGroupsMapping.put(USER_4, Sets.newHashSet(GROUP_4)); + userToGroupsMapping.put(USER_5, Sets.newHashSet(GROUP_5)); + } + + public static Set getUsers() { + return userToGroupsMapping.keySet(); + } + + public static Set getGroups(String user) { + return userToGroupsMapping.get(user); + } +} diff --git a/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestConnectorEndToEnd.java b/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestConnectorEndToEnd.java new file mode 100644 index 000000000..27f14209a --- /dev/null +++ b/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestConnectorEndToEnd.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.tests.e2e.sqoop; + +import static org.junit.Assert.assertTrue; + +import org.apache.sentry.core.model.sqoop.SqoopActionConstant; +import org.apache.sqoop.client.SqoopClient; +import org.apache.sqoop.model.MConnector; +import org.apache.sqoop.model.MPrincipal; +import org.apache.sqoop.model.MPrivilege; +import org.apache.sqoop.model.MResource; +import org.apache.sqoop.model.MRole; +import org.junit.Test; + +import com.google.common.collect.Lists; + +public class TestConnectorEndToEnd extends AbstractSqoopSentryTestBase { + private static String JDBC_CONNECTOR_NAME = "generic-jdbc-connector"; + private static String HDFS_CONNECTOR_NAME = "hdfs-connector"; + + @Test + public void testShowAllConnector() throws Exception { + // USER3 at firstly has no privilege on any Sqoop resource + SqoopClient client = sqoopServerRunner.getSqoopClient(USER3); + assertTrue(client.getConnectors().size() == 0); + /** + * ADMIN_USER grant read action privilege on connector all to role ROLE3 + * ADMIN_USER grant role ROLE3 to group GROUP3 + */ + client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MRole role3 = new MRole(ROLE3); + MPrincipal group3 = new MPrincipal(GROUP3, MPrincipal.TYPE.GROUP); + MResource allConnector = new MResource(SqoopActionConstant.ALL, MResource.TYPE.CONNECTOR); + MPrivilege readPriv = new MPrivilege(allConnector,SqoopActionConstant.READ, false); + client.createRole(role3); + client.grantRole(Lists.newArrayList(role3), Lists.newArrayList(group3)); + client.grantPrivilege(Lists.newArrayList(new MPrincipal(role3.getName(), MPrincipal.TYPE.ROLE)), + Lists.newArrayList(readPriv)); + + // check USER3 has the read privilege on all connector + client = sqoopServerRunner.getSqoopClient(USER3); + assertTrue(client.getConnectors().size() > 0); + } + + @Test + public void testShowSpecificConnector() throws Exception { + // USER1 and USER2 at firstly has no privilege on any Sqoop resource + SqoopClient client = sqoopServerRunner.getSqoopClient(USER1); + assertTrue(client.getConnectors().size() == 0); + client = sqoopServerRunner.getSqoopClient(USER2); + assertTrue(client.getConnectors().size() == 0); + + /** + * ADMIN_USER grant read action privilege on jdbc connector to role ROLE1 + * ADMIN_USER grant read action privilege on hdfs connector to role ROLE2 + */ + client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MConnector hdfsConnector = client.getConnector(HDFS_CONNECTOR_NAME); + MConnector jdbcConnector = client.getConnector(JDBC_CONNECTOR_NAME); + + MRole role1 = new MRole(ROLE1); + MPrincipal group1 = new MPrincipal(GROUP1, MPrincipal.TYPE.GROUP); + MPrivilege readHdfsPriv = new MPrivilege(new MResource(String.valueOf(hdfsConnector.getPersistenceId()), MResource.TYPE.CONNECTOR), + SqoopActionConstant.READ, false); + client.createRole(role1); + client.grantRole(Lists.newArrayList(role1), Lists.newArrayList(group1)); + client.grantPrivilege(Lists.newArrayList(new MPrincipal(role1.getName(), MPrincipal.TYPE.ROLE)), + Lists.newArrayList(readHdfsPriv)); + + MRole role2 = new MRole(ROLE2); + MPrincipal group2 = new MPrincipal(GROUP2, MPrincipal.TYPE.GROUP); + MPrivilege readJdbcPriv = new MPrivilege(new MResource(String.valueOf(jdbcConnector.getPersistenceId()), MResource.TYPE.CONNECTOR), + SqoopActionConstant.READ, false); + client.createRole(role2); + client.grantRole(Lists.newArrayList(role2), Lists.newArrayList(group2)); + client.grantPrivilege(Lists.newArrayList(new MPrincipal(role2.getName(), MPrincipal.TYPE.ROLE)), + Lists.newArrayList(readJdbcPriv)); + + client = sqoopServerRunner.getSqoopClient(USER1); + assertTrue(client.getConnectors().size() == 1); + // user1 can show hdfs connector + assertTrue(client.getConnector(HDFS_CONNECTOR_NAME) != null); + // user1 can't show jdbc connector + assertTrue(client.getConnector(JDBC_CONNECTOR_NAME) == null); + + client = sqoopServerRunner.getSqoopClient(USER2); + assertTrue(client.getConnectors().size() == 1); + // user2 can show jdbc connector + assertTrue(client.getConnector(JDBC_CONNECTOR_NAME) != null); + // user2 can't show hdfs connector + assertTrue(client.getConnector(HDFS_CONNECTOR_NAME) == null); + } +} diff --git a/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestGrantPrivilege.java b/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestGrantPrivilege.java new file mode 100644 index 000000000..8c7753ed5 --- /dev/null +++ b/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestGrantPrivilege.java @@ -0,0 +1,217 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.tests.e2e.sqoop; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.util.List; + +import org.apache.sentry.core.model.sqoop.SqoopActionConstant; +import org.apache.sentry.sqoop.SentrySqoopError; +import org.apache.sqoop.client.SqoopClient; +import org.apache.sqoop.model.MPrincipal; +import org.apache.sqoop.model.MPrivilege; +import org.apache.sqoop.model.MResource; +import org.apache.sqoop.model.MRole; +import org.junit.Test; + +import com.google.common.collect.Lists; + +public class TestGrantPrivilege extends AbstractSqoopSentryTestBase { + + @Test + public void testNotSupportGrantPrivilegeToUser() throws Exception { + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MPrincipal user1 = new MPrincipal("not_support_grant_user_1", MPrincipal.TYPE.GROUP); + MResource allConnector = new MResource(SqoopActionConstant.ALL, MResource.TYPE.CONNECTOR); + MPrivilege readPriv = new MPrivilege(allConnector,SqoopActionConstant.READ, false); + try { + client.grantPrivilege(Lists.newArrayList(user1), Lists.newArrayList(readPriv)); + fail("expected not support exception happend"); + } catch (Exception e) { + assertCausedMessage(e, SentrySqoopError.GRANT_REVOKE_PRIVILEGE_NOT_SUPPORT_FOR_PRINCIPAL); + } + } + + @Test + public void testNotSupportGrantPrivilegeToGroup() throws Exception { + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MPrincipal group1 = new MPrincipal("not_support_grant_group_1", MPrincipal.TYPE.GROUP); + MResource allConnector = new MResource(SqoopActionConstant.ALL, MResource.TYPE.CONNECTOR); + MPrivilege readPriv = new MPrivilege(allConnector,SqoopActionConstant.READ, false); + try { + client.grantPrivilege(Lists.newArrayList(group1), Lists.newArrayList(readPriv)); + fail("expected not support exception happend"); + } catch (Exception e) { + assertCausedMessage(e, SentrySqoopError.GRANT_REVOKE_PRIVILEGE_NOT_SUPPORT_FOR_PRINCIPAL); + } + } + + @Test + public void testGrantPrivilege() throws Exception { + /** + * user1 belongs to group group1 + * admin user grant role role1 to group group1 + * admin user grant read privilege on connector all to role role1 + */ + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MRole role1 = new MRole(ROLE1); + MPrincipal group1Princ = new MPrincipal(GROUP1, MPrincipal.TYPE.GROUP); + MPrincipal role1Princ = new MPrincipal(ROLE1, MPrincipal.TYPE.ROLE); + MResource allConnector = new MResource(SqoopActionConstant.ALL, MResource.TYPE.CONNECTOR); + MPrivilege readPrivilege = new MPrivilege(allConnector, SqoopActionConstant.READ, false); + client.createRole(role1); + client.grantRole(Lists.newArrayList(role1), Lists.newArrayList(group1Princ)); + client.grantPrivilege(Lists.newArrayList(role1Princ), Lists.newArrayList(readPrivilege)); + + // check user1 has privilege on role1 + client = sqoopServerRunner.getSqoopClient(USER1); + assertTrue(client.getPrivilegesByPrincipal(role1Princ, allConnector).size() == 1); + } + + @Test + public void testGrantPrivilegeTwice() throws Exception { + /** + * user2 belongs to group group2 + * admin user grant role role2 to group group2 + * admin user grant write privilege on connector all to role role2 + */ + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MRole role2 = new MRole(ROLE2); + MPrincipal group2Princ = new MPrincipal(GROUP2, MPrincipal.TYPE.GROUP); + MPrincipal role2Princ = new MPrincipal(ROLE2, MPrincipal.TYPE.ROLE); + MResource allConnector = new MResource(SqoopActionConstant.ALL, MResource.TYPE.CONNECTOR); + MPrivilege writePrivilege = new MPrivilege(allConnector, SqoopActionConstant.WRITE, false); + client.createRole(role2); + client.grantRole(Lists.newArrayList(role2), Lists.newArrayList(group2Princ)); + client.grantPrivilege(Lists.newArrayList(role2Princ), Lists.newArrayList(writePrivilege)); + + // check user2 has one privilege on role2 + client = sqoopServerRunner.getSqoopClient(USER2); + assertTrue(client.getPrivilegesByPrincipal(role2Princ, allConnector).size() == 1); + + // grant privilege to role role2 again + client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + client.grantPrivilege(Lists.newArrayList(role2Princ), Lists.newArrayList(writePrivilege)); + + // check user2 has only one privilege on role2 + client = sqoopServerRunner.getSqoopClient(USER2); + assertTrue(client.getPrivilegesByPrincipal(role2Princ, allConnector).size() == 1); + } + + @Test + public void testGrantPrivilegeWithAllPrivilegeExist() throws Exception { + /** + * user3 belongs to group group3 + * admin user grant role role3 to group group3 + * admin user grant all privilege on connector all to role role3 + */ + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MRole role3 = new MRole(ROLE3); + MPrincipal group3Princ = new MPrincipal(GROUP3, MPrincipal.TYPE.GROUP); + MPrincipal role3Princ = new MPrincipal(ROLE3, MPrincipal.TYPE.ROLE); + MResource allConnector = new MResource(SqoopActionConstant.ALL, MResource.TYPE.CONNECTOR); + MPrivilege allPrivilege = new MPrivilege(allConnector, SqoopActionConstant.ALL_NAME, false); + client.createRole(role3); + client.grantRole(Lists.newArrayList(role3), Lists.newArrayList(group3Princ)); + client.grantPrivilege(Lists.newArrayList(role3Princ), Lists.newArrayList(allPrivilege)); + + // check user3 has one privilege on role3 + client = sqoopServerRunner.getSqoopClient(USER3); + assertTrue(client.getPrivilegesByPrincipal(role3Princ, allConnector).size() == 1); + // user3 has the all action on role3 + MPrivilege user3Privilege = client.getPrivilegesByPrincipal(role3Princ, allConnector).get(0); + assertEquals(user3Privilege.getAction(), SqoopActionConstant.ALL_NAME); + + /** + * admin user grant read privilege on connector all to role role3 + * because the role3 has already the all privilege, the read privilege granting has + * no impact on the role3 + */ + client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MPrivilege readPrivilege = new MPrivilege(allConnector, SqoopActionConstant.READ, false); + client.grantPrivilege(Lists.newArrayList(role3Princ), Lists.newArrayList(readPrivilege)); + // check user3 has only one privilege on role3 + client = sqoopServerRunner.getSqoopClient(USER3); + assertTrue(client.getPrivilegesByPrincipal(role3Princ, allConnector).size() == 1); + // user3 has the all action on role3 + user3Privilege = client.getPrivilegesByPrincipal(role3Princ, allConnector).get(0); + assertEquals(user3Privilege.getAction(), SqoopActionConstant.ALL_NAME); + } + + @Test + public void testGrantALLPrivilegeWithOtherPrivilegesExist() throws Exception { + /** + * user4 belongs to group group4 + * admin user grant role role4 to group group4 + * admin user grant read privilege on connector all to role role4 + */ + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MRole role4 = new MRole(ROLE4); + MPrincipal group4Princ = new MPrincipal(GROUP4, MPrincipal.TYPE.GROUP); + MPrincipal role4Princ = new MPrincipal(ROLE4, MPrincipal.TYPE.ROLE); + MResource allConnector = new MResource(SqoopActionConstant.ALL, MResource.TYPE.CONNECTOR); + MPrivilege readPrivilege = new MPrivilege(allConnector, SqoopActionConstant.READ, false); + client.createRole(role4); + client.grantRole(Lists.newArrayList(role4), Lists.newArrayList(group4Princ)); + client.grantPrivilege(Lists.newArrayList(role4Princ), Lists.newArrayList(readPrivilege)); + + // check user4 has one privilege on role1 + client = sqoopServerRunner.getSqoopClient(USER4); + assertTrue(client.getPrivilegesByPrincipal(role4Princ, allConnector).size() == 1); + // user4 has the read action on collector all + MPrivilege user4Privilege = client.getPrivilegesByPrincipal(role4Princ, allConnector).get(0); + assertEquals(user4Privilege.getAction().toLowerCase(), SqoopActionConstant.READ); + + /** + * admin user grant write privilege on connector all to role role4 + */ + client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MPrivilege writePrivilege = new MPrivilege(allConnector, SqoopActionConstant.WRITE, false); + client.grantPrivilege(Lists.newArrayList(role4Princ), Lists.newArrayList(writePrivilege)); + + // check user4 has two privileges on role1 + client = sqoopServerRunner.getSqoopClient(USER4); + assertTrue(client.getPrivilegesByPrincipal(role4Princ, allConnector).size() == 2); + // user4 has the read and write action on collector all + List actions = Lists.newArrayList(); + for (MPrivilege privilege : client.getPrivilegesByPrincipal(role4Princ, allConnector)) { + actions.add(privilege.getAction().toLowerCase()); + } + assertEquals(2, actions.size()); + assertTrue(actions.contains(SqoopActionConstant.READ)); + assertTrue(actions.contains(SqoopActionConstant.WRITE)); + + /** + * admin user grant all privilege on connector all to role role4 + * because the all privilege includes the read and write privileges, these privileges will + * be removed + */ + client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MPrivilege allPrivilege = new MPrivilege(allConnector, SqoopActionConstant.ALL_NAME, false); + client.grantPrivilege(Lists.newArrayList(role4Princ), Lists.newArrayList(allPrivilege)); + + // check user4 has only privilege on role1 + client = sqoopServerRunner.getSqoopClient(USER4); + assertTrue(client.getPrivilegesByPrincipal(role4Princ, allConnector).size() == 1); + // user4 has the all action on role3 + user4Privilege = client.getPrivilegesByPrincipal(role4Princ, allConnector).get(0); + assertEquals(user4Privilege.getAction(), SqoopActionConstant.ALL_NAME); + } +} diff --git a/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestJobEndToEnd.java b/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestJobEndToEnd.java new file mode 100644 index 000000000..636e26970 --- /dev/null +++ b/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestJobEndToEnd.java @@ -0,0 +1,305 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.tests.e2e.sqoop; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.apache.sentry.core.model.sqoop.SqoopActionConstant; +import org.apache.sqoop.client.SqoopClient; +import org.apache.sqoop.model.MJob; +import org.apache.sqoop.model.MLink; +import org.apache.sqoop.model.MPrincipal; +import org.apache.sqoop.model.MPrivilege; +import org.apache.sqoop.model.MResource; +import org.apache.sqoop.model.MRole; +import org.apache.sqoop.security.SecurityError; +import org.junit.Test; + +import com.google.common.collect.Lists; + +public class TestJobEndToEnd extends AbstractSqoopSentryTestBase { + @Test + public void testShowJob() throws Exception { + /** + * ADMIN_USER create two links and one job + */ + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MLink rdbmsLink = client.createLink("generic-jdbc-connector"); + sqoopServerRunner.fillRdbmsLinkConfig(rdbmsLink); + sqoopServerRunner.saveLink(client, rdbmsLink); + + MLink hdfsLink = client.createLink("hdfs-connector"); + sqoopServerRunner.fillHdfsLink(hdfsLink); + sqoopServerRunner.saveLink(client, hdfsLink); + + MJob job1 = client.createJob(hdfsLink.getPersistenceId(), rdbmsLink.getPersistenceId()); + // set HDFS "FROM" config for the job, since the connector test case base class only has utilities for HDFS! + sqoopServerRunner.fillHdfsFromConfig(job1); + // set the RDBM "TO" config here + sqoopServerRunner.fillRdbmsToConfig(job1); + // create job + sqoopServerRunner.saveJob(client, job1); + /** + * ADMIN_USER grant read privilege on all job to role1 + */ + MRole role1 = new MRole(ROLE1); + MPrincipal group1 = new MPrincipal(GROUP1, MPrincipal.TYPE.GROUP); + MResource allJob = new MResource(SqoopActionConstant.ALL, MResource.TYPE.JOB); + MPrivilege readAllPrivilege = new MPrivilege(allJob,SqoopActionConstant.READ, false); + client.createRole(role1); + client.grantRole(Lists.newArrayList(role1), Lists.newArrayList(group1)); + client.grantPrivilege(Lists.newArrayList(new MPrincipal(role1.getName(), MPrincipal.TYPE.ROLE)), + Lists.newArrayList(readAllPrivilege)); + + /** + * ADMIN_USER grant read privilege on job1 to role2 + */ + MRole role2 = new MRole(ROLE2); + MPrincipal group2 = new MPrincipal(GROUP2, MPrincipal.TYPE.GROUP); + MResource job1Resource = new MResource(String.valueOf(job1.getPersistenceId()), MResource.TYPE.JOB); + MPrivilege readJob1Privilege = new MPrivilege(job1Resource,SqoopActionConstant.READ, false); + client.createRole(role2); + client.grantRole(Lists.newArrayList(role2), Lists.newArrayList(group2)); + client.grantPrivilege(Lists.newArrayList(new MPrincipal(role2.getName(), MPrincipal.TYPE.ROLE)), + Lists.newArrayList(readJob1Privilege)); + + // user1 can show all jobs + client = sqoopServerRunner.getSqoopClient(USER1); + try { + assertTrue(client.getJobs().size() == 1); + assertTrue(client.getJob(job1.getPersistenceId()) != null); + } catch (Exception e) { + fail("unexpected Authorization exception happend"); + } + + // user2 can show job1 + client = sqoopServerRunner.getSqoopClient(USER2); + try { + assertTrue(client.getJobs().size() == 1); + assertTrue(client.getJob(job1.getPersistenceId()) != null); + } catch (Exception e) { + fail("unexpected Authorization exception happend"); + } + + // user3 can't show job1 + client = sqoopServerRunner.getSqoopClient(USER3); + try { + assertTrue(client.getJobs().size() == 0); + client.getJob(job1.getPersistenceId()); + fail("expected Authorization exception happend"); + } catch (Exception e) { + assertCausedMessage(e, SecurityError.AUTH_0014.getMessage()); + } + + client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + client.deleteJob(job1.getPersistenceId()); + } + + @Test + public void testUpdateDeleteJob() throws Exception { + /** + * ADMIN_USER create two links and one job + */ + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MLink rdbmsLink = client.createLink("generic-jdbc-connector"); + sqoopServerRunner.fillRdbmsLinkConfig(rdbmsLink); + rdbmsLink.setName("rdbm_testUpdateJob"); + sqoopServerRunner.saveLink(client, rdbmsLink); + + MLink hdfsLink = client.createLink("hdfs-connector"); + sqoopServerRunner.fillHdfsLink(hdfsLink); + hdfsLink.setName("hdfs_testUpdateJob"); + sqoopServerRunner.saveLink(client, hdfsLink); + + MJob job2 = client.createJob(hdfsLink.getPersistenceId(), rdbmsLink.getPersistenceId()); + // set HDFS "FROM" config for the job, since the connector test case base class only has utilities for HDFS! + sqoopServerRunner.fillHdfsFromConfig(job2); + // set the RDBM "TO" config here + sqoopServerRunner.fillRdbmsToConfig(job2); + // create job + sqoopServerRunner.saveJob(client, job2); + + /** + * ADMIN_USER grant update privilege on job2 to role4 + * ADMIN_USER grant read privilege on all connector to role4 + * ADMIN_USER grant read privilege on all link to role4 + */ + MRole role4 = new MRole(ROLE4); + MPrincipal group4 = new MPrincipal(GROUP4, MPrincipal.TYPE.GROUP); + MResource job2Resource = new MResource(String.valueOf(job2.getPersistenceId()), MResource.TYPE.JOB); + MPrivilege writeJob2Privilege = new MPrivilege(job2Resource,SqoopActionConstant.WRITE, false); + MResource allConnector = new MResource(SqoopActionConstant.ALL, MResource.TYPE.CONNECTOR); + MPrivilege readConnectorPriv = new MPrivilege(allConnector,SqoopActionConstant.READ, false); + MResource allLink = new MResource(SqoopActionConstant.ALL, MResource.TYPE.LINK); + MPrivilege readLinkPriv = new MPrivilege(allLink,SqoopActionConstant.READ, false); + client.createRole(role4); + client.grantRole(Lists.newArrayList(role4), Lists.newArrayList(group4)); + client.grantPrivilege(Lists.newArrayList(new MPrincipal(role4.getName(), MPrincipal.TYPE.ROLE)), + Lists.newArrayList(writeJob2Privilege, readConnectorPriv, readLinkPriv)); + + // user4 can't show job2 + client = sqoopServerRunner.getSqoopClient(USER4); + try { + assertTrue(client.getJobs().size() == 0); + client.getJob(job2.getPersistenceId()); + fail("expected Authorization exception happend"); + } catch (Exception e) { + assertCausedMessage(e, SecurityError.AUTH_0014.getMessage()); + } + // user4 can update job2 + try { + job2.setName("job2_update_user4_1"); + client.updateJob(job2); + } catch (Exception e) { + fail("unexpected Authorization exception happend"); + } + // user3 can't update job2 + client = sqoopServerRunner.getSqoopClient(USER3); + try { + assertTrue(client.getJobs().size() == 0); + job2.setName("job2_update_user3_1"); + client.updateJob(job2); + fail("expected Authorization exception happend"); + } catch (Exception e) { + assertCausedMessage(e, SecurityError.AUTH_0014.getMessage()); + } + + // user3 can't delete job2 + try { + client.deleteJob(job2.getPersistenceId()); + fail("expected Authorization exception happend"); + } catch (Exception e) { + assertCausedMessage(e, SecurityError.AUTH_0014.getMessage()); + } + + //user4 can delete job2 because user4 has write privilege on job2 + client = sqoopServerRunner.getSqoopClient(USER4); + try { + client.deleteJob(job2.getPersistenceId()); + } catch (Exception e) { + fail("unexpected Authorization exception happend"); + } + + client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + client.dropRole(role4); + } + + @Test + public void testEnableAndStartJob() throws Exception { + /** + * ADMIN_USER create two links and one job + */ + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MLink rdbmsLink = client.createLink("generic-jdbc-connector"); + sqoopServerRunner.fillRdbmsLinkConfig(rdbmsLink); + rdbmsLink.setName("rdbm_testEnableAndStartJob"); + sqoopServerRunner.saveLink(client, rdbmsLink); + + MLink hdfsLink = client.createLink("hdfs-connector"); + sqoopServerRunner.fillHdfsLink(hdfsLink); + hdfsLink.setName("hdfs_testEnableAndStartJob"); + sqoopServerRunner.saveLink(client, hdfsLink); + + MJob job2 = client.createJob(hdfsLink.getPersistenceId(), rdbmsLink.getPersistenceId()); + // set HDFS "FROM" config for the job, since the connector test case base class only has utilities for HDFS! + sqoopServerRunner.fillHdfsFromConfig(job2); + // set the RDBM "TO" config here + sqoopServerRunner.fillRdbmsToConfig(job2); + // create job + sqoopServerRunner.saveJob(client, job2); + + /** + * ADMIN_USER grant update privilege on job2 to role4 + * ADMIN_USER grant read privilege on all connector to role4 + * ADMIN_USER grant read privilege on all link to role4 + */ + MRole role4 = new MRole(ROLE4); + MPrincipal group4 = new MPrincipal(GROUP4, MPrincipal.TYPE.GROUP); + MResource job2Resource = new MResource(String.valueOf(job2.getPersistenceId()), MResource.TYPE.JOB); + MPrivilege writeJob2Privilege = new MPrivilege(job2Resource,SqoopActionConstant.WRITE, false); + MResource allConnector = new MResource(SqoopActionConstant.ALL, MResource.TYPE.CONNECTOR); + MPrivilege readConnectorPriv = new MPrivilege(allConnector,SqoopActionConstant.READ, false); + MResource allLink = new MResource(SqoopActionConstant.ALL, MResource.TYPE.LINK); + MPrivilege readLinkPriv = new MPrivilege(allLink,SqoopActionConstant.READ, false); + client.createRole(role4); + client.grantRole(Lists.newArrayList(role4), Lists.newArrayList(group4)); + client.grantPrivilege(Lists.newArrayList(new MPrincipal(role4.getName(), MPrincipal.TYPE.ROLE)), + Lists.newArrayList(writeJob2Privilege, readConnectorPriv, readLinkPriv)); + + + /** + * ADMIN_USER grant read privilege on job2 to role5 + * ADMIN_USER grant read privilege on all connector to role5 + * ADMIN_USER grant read privilege on all link to role5 + */ + MRole role5 = new MRole(ROLE5); + MPrincipal group5 = new MPrincipal(GROUP5, MPrincipal.TYPE.GROUP); + MPrivilege readJob2Privilege = new MPrivilege(job2Resource,SqoopActionConstant.READ, false); + client.createRole(role5); + client.grantRole(Lists.newArrayList(role5), Lists.newArrayList(group5)); + client.grantPrivilege(Lists.newArrayList(new MPrincipal(role5.getName(), MPrincipal.TYPE.ROLE)), + Lists.newArrayList(readJob2Privilege, readConnectorPriv, readLinkPriv)); + + // user5 can't enable and start job2 + client = sqoopServerRunner.getSqoopClient(USER5); + try { + client.enableJob(job2.getPersistenceId(), true); + fail("expected Authorization exception happend"); + } catch (Exception e) { + assertCausedMessage(e, SecurityError.AUTH_0014.getMessage()); + } + + try { + client.startJob(job2.getPersistenceId()); + fail("expected Authorization exception happend"); + } catch (Exception e) { + assertCausedMessage(e, SecurityError.AUTH_0014.getMessage()); + } + + // user3 can't enable and start job2 + client = sqoopServerRunner.getSqoopClient(USER3); + try { + client.enableJob(job2.getPersistenceId(), true); + fail("expected Authorization exception happend"); + } catch (Exception e) { + assertCausedMessage(e, SecurityError.AUTH_0014.getMessage()); + } + + try { + client.startJob(job2.getPersistenceId()); + fail("expected Authorization exception happend"); + } catch (Exception e) { + assertCausedMessage(e, SecurityError.AUTH_0014.getMessage()); + } + + // user4 can enable or start job2 + client = sqoopServerRunner.getSqoopClient(USER4); + try { + client.enableJob(job2.getPersistenceId(), false); + client.enableJob(job2.getPersistenceId(), true); + client.deleteJob(job2.getPersistenceId()); + } catch (Exception e) { + fail("unexpected Authorization exception happend"); + } + + + client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + client.dropRole(role4); + client.dropRole(role5); + } +} diff --git a/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestLinkEndToEnd.java b/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestLinkEndToEnd.java new file mode 100644 index 000000000..8c8a91dd7 --- /dev/null +++ b/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestLinkEndToEnd.java @@ -0,0 +1,253 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.tests.e2e.sqoop; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.apache.sentry.core.model.sqoop.SqoopActionConstant; +import org.apache.sqoop.client.SqoopClient; +import org.apache.sqoop.model.MLink; +import org.apache.sqoop.model.MPrincipal; +import org.apache.sqoop.model.MPrivilege; +import org.apache.sqoop.model.MResource; +import org.apache.sqoop.model.MRole; +import org.apache.sqoop.security.SecurityError; +import org.junit.Test; + +import com.google.common.collect.Lists; + +public class TestLinkEndToEnd extends AbstractSqoopSentryTestBase { + + private void dropAndCreateRole(SqoopClient client, MRole mrole) throws Exception { + try { + client.dropRole(mrole); + } catch (Exception e) { + // nothing to do if role doesn't exist + } + client.createRole(mrole); + } + + @Test + public void testShowLink() throws Exception { + /** + * ADMIN_USER create a hdfs link + */ + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MLink hdfsLink = client.createLink("hdfs-connector"); + sqoopServerRunner.fillHdfsLink(hdfsLink); + sqoopServerRunner.saveLink(client, hdfsLink); + + /** + * ADMIN_USER grant read privilege on all link to role1 + */ + MRole role1 = new MRole(ROLE1); + MPrincipal group1 = new MPrincipal(GROUP1, MPrincipal.TYPE.GROUP); + MResource allLink = new MResource(SqoopActionConstant.ALL, MResource.TYPE.LINK); + MPrivilege readAllPrivilege = new MPrivilege(allLink,SqoopActionConstant.READ, false); + dropAndCreateRole(client, role1); + client.grantRole(Lists.newArrayList(role1), Lists.newArrayList(group1)); + client.grantPrivilege(Lists.newArrayList(new MPrincipal(role1.getName(), MPrincipal.TYPE.ROLE)), + Lists.newArrayList(readAllPrivilege)); + + /** + * ADMIN_USER grant read privilege on hdfs link to role2 + */ + MRole role2 = new MRole(ROLE2); + MPrincipal group2 = new MPrincipal(GROUP2, MPrincipal.TYPE.GROUP); + MResource hdfsLinkResource = new MResource(String.valueOf(hdfsLink.getPersistenceId()), MResource.TYPE.LINK); + MPrivilege readHdfsLinkPrivilege = new MPrivilege(hdfsLinkResource,SqoopActionConstant.READ, false); + dropAndCreateRole(client, role2); + client.grantRole(Lists.newArrayList(role2), Lists.newArrayList(group2)); + client.grantPrivilege(Lists.newArrayList(new MPrincipal(role2.getName(), MPrincipal.TYPE.ROLE)), + Lists.newArrayList(readHdfsLinkPrivilege)); + + // user1 can show all link + client = sqoopServerRunner.getSqoopClient(USER1); + try { + assertTrue(client.getLinks().size() == 1); + assertTrue(client.getLink(hdfsLink.getPersistenceId()) != null); + } catch (Exception e) { + fail("unexpected Authorization exception happend"); + } + + // user2 can show hdfs link + client = sqoopServerRunner.getSqoopClient(USER2); + try { + assertTrue(client.getLinks().size() == 1); + assertTrue(client.getLink(hdfsLink.getPersistenceId()) != null); + } catch (Exception e) { + fail("unexpected Authorization exception happend"); + } + + // user3 can't show hdfs link + client = sqoopServerRunner.getSqoopClient(USER3); + try { + assertTrue(client.getLinks().size() == 0); + client.getLink(hdfsLink.getPersistenceId()); + fail("expected Authorization exception happend"); + } catch (Exception e) { + assertCausedMessage(e, SecurityError.AUTH_0014.getMessage()); + } + + client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + client.deleteLink(hdfsLink.getPersistenceId()); + } + + @Test + public void testUpdateDtestUpdateDeleteLinkeleteLink() throws Exception { + /** + * ADMIN_USER create a hdfs link + */ + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MLink hdfsLink = client.createLink("hdfs-connector"); + sqoopServerRunner.fillHdfsLink(hdfsLink); + sqoopServerRunner.saveLink(client, hdfsLink); + + /** + * ADMIN_USER grant update privilege on hdfs link to role4 + * ADMIN_USER grant read privilege on all connector to role4 + */ + MRole role4 = new MRole(ROLE4); + MPrincipal group4 = new MPrincipal(GROUP4, MPrincipal.TYPE.GROUP); + MResource hdfsLinkResource = new MResource(String.valueOf(hdfsLink.getPersistenceId()), MResource.TYPE.LINK); + MPrivilege writeHdfsPrivilege = new MPrivilege(hdfsLinkResource,SqoopActionConstant.WRITE, false); + MResource allConnector = new MResource(SqoopActionConstant.ALL, MResource.TYPE.CONNECTOR); + MPrivilege readConnectorPriv = new MPrivilege(allConnector,SqoopActionConstant.READ, false); + dropAndCreateRole(client, role4); + client.grantRole(Lists.newArrayList(role4), Lists.newArrayList(group4)); + client.grantPrivilege(Lists.newArrayList(new MPrincipal(role4.getName(), MPrincipal.TYPE.ROLE)), + Lists.newArrayList(writeHdfsPrivilege, readConnectorPriv)); + + // user4 can't show hdfs link + client = sqoopServerRunner.getSqoopClient(USER4); + try { + assertTrue(client.getLinks().size() == 0); + client.getLink(hdfsLink.getPersistenceId()); + fail("expected Authorization exception happend"); + } catch (Exception e) { + assertCausedMessage(e, SecurityError.AUTH_0014.getMessage()); + } + // user4 can update hdfs link + try { + hdfsLink.setName("hdfs_link_update_user4_1"); + client.updateLink(hdfsLink); + } catch (Exception e) { + fail("unexpected Authorization exception happend"); + } + // user3 can't update hdfs link + client = sqoopServerRunner.getSqoopClient(USER3); + try { + assertTrue(client.getLinks().size() == 0); + hdfsLink.setName("hdfs_link_update_user3_1"); + client.updateLink(hdfsLink); + fail("expected Authorization exception happend"); + } catch (Exception e) { + assertCausedMessage(e, SecurityError.AUTH_0014.getMessage()); + } + + // user3 can't delete hdfs link + try { + client.deleteLink(hdfsLink.getPersistenceId()); + fail("expected Authorization exception happend"); + } catch (Exception e) { + assertCausedMessage(e, SecurityError.AUTH_0014.getMessage()); + } + + //user4 can delete hdfs link because user4 has write privilege on hdfs link + client = sqoopServerRunner.getSqoopClient(USER4); + try { + client.deleteLink(hdfsLink.getPersistenceId()); + } catch (Exception e) { + fail("unexpected Authorization exception happend"); + } + + client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + client.dropRole(role4); + } + + @Test + public void testEnableLink() throws Exception { + /** + * ADMIN_USER create a hdfs link + */ + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MLink hdfsLink = client.createLink("hdfs-connector"); + sqoopServerRunner.fillHdfsLink(hdfsLink); + sqoopServerRunner.saveLink(client, hdfsLink); + + /** + * ADMIN_USER grant read privilege on hdfs link to role4 + * ADMIN_USER grant read privilege on all connector to role4 + */ + MRole role4 = new MRole(ROLE4); + MPrincipal group4 = new MPrincipal(GROUP4, MPrincipal.TYPE.GROUP); + MResource hdfsLinkResource = new MResource(String.valueOf(hdfsLink.getPersistenceId()), MResource.TYPE.LINK); + MPrivilege readHdfsPrivilege = new MPrivilege(hdfsLinkResource,SqoopActionConstant.READ, false); + MResource allConnector = new MResource(SqoopActionConstant.ALL, MResource.TYPE.CONNECTOR); + MPrivilege readConnectorPriv = new MPrivilege(allConnector,SqoopActionConstant.READ, false); + dropAndCreateRole(client, role4); + client.grantRole(Lists.newArrayList(role4), Lists.newArrayList(group4)); + client.grantPrivilege(Lists.newArrayList(new MPrincipal(role4.getName(), MPrincipal.TYPE.ROLE)), + Lists.newArrayList(readHdfsPrivilege, readConnectorPriv)); + + /** + * ADMIN_USER grant write privilege on hdfs link to role5 + * ADMIN_USER grant read privilege on all connector to role5 + */ + MRole role5 = new MRole(ROLE5); + MPrincipal group5 = new MPrincipal(GROUP5, MPrincipal.TYPE.GROUP); + MPrivilege writeHdfsPrivilege = new MPrivilege(hdfsLinkResource,SqoopActionConstant.WRITE, false); + dropAndCreateRole(client, role5); + client.grantRole(Lists.newArrayList(role5), Lists.newArrayList(group5)); + client.grantPrivilege(Lists.newArrayList(new MPrincipal(role5.getName(), MPrincipal.TYPE.ROLE)), + Lists.newArrayList(writeHdfsPrivilege, readConnectorPriv)); + + // user4 can't enable hdfs link + client = sqoopServerRunner.getSqoopClient(USER4); + try { + client.enableLink(hdfsLink.getPersistenceId(), true); + fail("expected Authorization exception happend"); + } catch (Exception e) { + assertCausedMessage(e, SecurityError.AUTH_0014.getMessage()); + } + // user5 can enbale hdfs link + client = sqoopServerRunner.getSqoopClient(USER5); + try { + client.enableLink(hdfsLink.getPersistenceId(), true); + } catch (Exception e) { + fail("unexpected Authorization exception happend"); + } + // user3 can't update hdfs link + client = sqoopServerRunner.getSqoopClient(USER3); + try { + client.enableLink(hdfsLink.getPersistenceId(), true); + fail("expected Authorization exception happend"); + } catch (Exception e) { + assertCausedMessage(e, SecurityError.AUTH_0014.getMessage()); + } + + client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + try { + client.dropRole(role4); + client.dropRole(role5); + } catch (Exception e) { + // nothing to do if cleanup fails + } + client.deleteLink(hdfsLink.getPersistenceId()); + } +} diff --git a/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestOwnerPrivilege.java b/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestOwnerPrivilege.java new file mode 100644 index 000000000..abef80c58 --- /dev/null +++ b/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestOwnerPrivilege.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.tests.e2e.sqoop; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import org.apache.sentry.core.model.sqoop.SqoopActionConstant; +import org.apache.sqoop.client.SqoopClient; +import org.apache.sqoop.model.MJob; +import org.apache.sqoop.model.MLink; +import org.apache.sqoop.model.MPrincipal; +import org.apache.sqoop.model.MPrivilege; +import org.apache.sqoop.model.MResource; +import org.apache.sqoop.model.MRole; +import org.apache.sqoop.security.SecurityError; +import org.junit.Test; + +import com.google.common.collect.Lists; + +public class TestOwnerPrivilege extends AbstractSqoopSentryTestBase { + + @Test + public void testLinkOwner() throws Exception { + // USER1 at firstly has no privilege on any Sqoop resource + SqoopClient client = sqoopServerRunner.getSqoopClient(USER1); + assertTrue(client.getConnectors().size() == 0); + /** + * ADMIN_USER grant read action privilege on connector all to role ROLE1 + * ADMIN_USER grant role ROLE1 to group GROUP1 + */ + client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MRole role1 = new MRole(ROLE1); + MPrincipal group1 = new MPrincipal(GROUP1, MPrincipal.TYPE.GROUP); + MResource allConnector = new MResource(SqoopActionConstant.ALL, MResource.TYPE.CONNECTOR); + MPrivilege readPriv = new MPrivilege(allConnector,SqoopActionConstant.READ, false); + client.createRole(role1); + client.grantRole(Lists.newArrayList(role1), Lists.newArrayList(group1)); + client.grantPrivilege(Lists.newArrayList(new MPrincipal(role1.getName(), MPrincipal.TYPE.ROLE)), + Lists.newArrayList(readPriv)); + + // check USER1 has the read privilege on all connector + client = sqoopServerRunner.getSqoopClient(USER1); + assertTrue(client.getConnectors().size() > 0); + + // USER1 create a new HDFS link + MLink hdfsLink = client.createLink("hdfs-connector"); + sqoopServerRunner.fillHdfsLink(hdfsLink); + sqoopServerRunner.saveLink(client, hdfsLink); + + // USER1 is the owner of HDFS link, so he can show and update HDFS link + assertEquals(client.getLink(hdfsLink.getPersistenceId()), hdfsLink); + + // USER1 update the name of HDFS link + hdfsLink.setName("HDFS_update1"); + sqoopServerRunner.updateLink(client, hdfsLink); + + // USER2 has no privilege on HDFS link + client = sqoopServerRunner.getSqoopClient(USER2); + assertTrue(client.getLinks().size() == 0); + + //delete the HDFS link + client = sqoopServerRunner.getSqoopClient(USER1); + client.deleteLink(hdfsLink.getPersistenceId()); + } + + @Test + public void testJobOwner() throws Exception { + // USER3 at firstly has no privilege on any Sqoop resource + SqoopClient client = sqoopServerRunner.getSqoopClient(USER3); + assertTrue(client.getConnectors().size() == 0); + /** + * ADMIN_USER grant read action privilege on connector all to role ROLE3 + * ADMIN_USER grant role ROLE3 to group GROUP3 + */ + client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MRole role3 = new MRole(ROLE3); + MPrincipal group3 = new MPrincipal(GROUP3, MPrincipal.TYPE.GROUP); + MResource allConnector = new MResource(SqoopActionConstant.ALL, MResource.TYPE.CONNECTOR); + MPrivilege readPriv = new MPrivilege(allConnector,SqoopActionConstant.READ, false); + client.createRole(role3); + client.grantRole(Lists.newArrayList(role3), Lists.newArrayList(group3)); + client.grantPrivilege(Lists.newArrayList(new MPrincipal(ROLE3, MPrincipal.TYPE.ROLE)), + Lists.newArrayList(readPriv)); + + // check USER3 has the read privilege on all connector + client = sqoopServerRunner.getSqoopClient(USER3); + assertTrue(client.getConnectors().size() > 0); + + // USER3 create two links: hdfs link and rdbm link + MLink rdbmsLink = client.createLink("generic-jdbc-connector"); + sqoopServerRunner.fillRdbmsLinkConfig(rdbmsLink); + sqoopServerRunner.saveLink(client, rdbmsLink); + + MLink hdfsLink = client.createLink("hdfs-connector"); + sqoopServerRunner.fillHdfsLink(hdfsLink); + sqoopServerRunner.saveLink(client, hdfsLink); + + // USER3 is the owner of hdfs and link, so he can show and update hdfs link + assertTrue(client.getLinks().size() == 2); + hdfsLink.setName("HDFS_update2"); + client.updateLink(hdfsLink); + rdbmsLink.setName("RDBM_update"); + client.updateLink(rdbmsLink); + + // USER_3 create a job: transfer date from HDFS to RDBM + MJob job1 = client.createJob(hdfsLink.getPersistenceId(), rdbmsLink.getPersistenceId()); + // set HDFS "FROM" config for the job, since the connector test case base class only has utilities for HDFS! + sqoopServerRunner.fillHdfsFromConfig(job1); + + // set the RDBM "TO" config here + sqoopServerRunner.fillRdbmsToConfig(job1); + + // create job + sqoopServerRunner.saveJob(client, job1); + + /** + * USER3 is the owner of job1 , so he can show and delete job1. + * USER4 has no privilege on job1 + */ + client = sqoopServerRunner.getSqoopClient(USER4); + assertTrue(client.getJobs().size() == 0); + try { + client.deleteJob(job1.getPersistenceId()); + fail("expected Authorization exception happend"); + } catch (Exception e) { + assertCausedMessage(e, SecurityError.AUTH_0014.getMessage()); + } + client = sqoopServerRunner.getSqoopClient(USER3); + assertEquals(client.getJob(job1.getPersistenceId()), job1); + client.deleteJob(job1.getPersistenceId()); + + // delete the HDFS and RDBM links + client.deleteLink(hdfsLink.getPersistenceId()); + client.deleteLink(rdbmsLink.getPersistenceId()); + } + +} diff --git a/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestRevokePrivilege.java b/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestRevokePrivilege.java new file mode 100644 index 000000000..f71595c06 --- /dev/null +++ b/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestRevokePrivilege.java @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.tests.e2e.sqoop; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import org.apache.sentry.core.model.sqoop.SqoopActionConstant; +import org.apache.sentry.sqoop.SentrySqoopError; +import org.apache.sqoop.client.SqoopClient; +import org.apache.sqoop.model.MPrincipal; +import org.apache.sqoop.model.MPrivilege; +import org.apache.sqoop.model.MResource; +import org.apache.sqoop.model.MRole; +import org.junit.Test; + +import com.google.common.collect.Lists; + +public class TestRevokePrivilege extends AbstractSqoopSentryTestBase { + @Test + public void testNotSupportRevokePrivilegeFromUser() throws Exception { + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MPrincipal user1 = new MPrincipal("not_support_revoke_user_1", MPrincipal.TYPE.GROUP); + MResource allConnector = new MResource(SqoopActionConstant.ALL, MResource.TYPE.CONNECTOR); + MPrivilege readPriv = new MPrivilege(allConnector,SqoopActionConstant.READ, false); + try { + client.revokePrivilege(Lists.newArrayList(user1), Lists.newArrayList(readPriv)); + fail("expected not support exception happend"); + } catch (Exception e) { + assertCausedMessage(e, SentrySqoopError.GRANT_REVOKE_PRIVILEGE_NOT_SUPPORT_FOR_PRINCIPAL); + } + } + + @Test + public void testNotSupportRevokePrivilegeFromGroup() throws Exception { + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MPrincipal group1 = new MPrincipal("not_support_revoke_group_1", MPrincipal.TYPE.GROUP); + MResource allConnector = new MResource(SqoopActionConstant.ALL, MResource.TYPE.CONNECTOR); + MPrivilege readPriv = new MPrivilege(allConnector,SqoopActionConstant.READ, false); + try { + client.revokePrivilege(Lists.newArrayList(group1), Lists.newArrayList(readPriv)); + fail("expected not support exception happend"); + } catch (Exception e) { + assertCausedMessage(e, SentrySqoopError.GRANT_REVOKE_PRIVILEGE_NOT_SUPPORT_FOR_PRINCIPAL); + } + } + + @Test + public void testRevokeNotExistPrivilege() throws Exception { + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MRole testRole = new MRole("noexist_privilege_role1"); + MPrincipal testPrinc = new MPrincipal(testRole.getName(), MPrincipal.TYPE.ROLE); + MResource allConnector = new MResource(SqoopActionConstant.ALL, MResource.TYPE.CONNECTOR); + MPrivilege readPrivilege = new MPrivilege(allConnector, SqoopActionConstant.READ, false); + client.createRole(testRole); + assertTrue(client.getPrivilegesByPrincipal(testPrinc, allConnector).size() == 0); + + client.revokePrivilege(Lists.newArrayList(testPrinc), Lists.newArrayList(readPrivilege)); + assertTrue(client.getPrivilegesByPrincipal(testPrinc, allConnector).size() == 0); + } + + + @Test + public void testRevokePrivilege() throws Exception { + /** + * user1 belongs to group group1 + * admin user grant role role1 to group group1 + * admin user grant read privilege on connector all to role role1 + */ + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MRole role1 = new MRole(ROLE1); + MPrincipal group1Princ = new MPrincipal(GROUP1, MPrincipal.TYPE.GROUP); + MPrincipal role1Princ = new MPrincipal(ROLE1, MPrincipal.TYPE.ROLE); + MResource allConnector = new MResource(SqoopActionConstant.ALL, MResource.TYPE.CONNECTOR); + MPrivilege readPrivilege = new MPrivilege(allConnector, SqoopActionConstant.READ, false); + client.createRole(role1); + client.grantRole(Lists.newArrayList(role1), Lists.newArrayList(group1Princ)); + client.grantPrivilege(Lists.newArrayList(role1Princ), Lists.newArrayList(readPrivilege)); + + // check user1 has privilege on role1 + client = sqoopServerRunner.getSqoopClient(USER1); + assertTrue(client.getPrivilegesByPrincipal(role1Princ, allConnector).size() == 1); + + // admin user revoke read privilege from role1 + client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + client.revokePrivilege(Lists.newArrayList(role1Princ), Lists.newArrayList(readPrivilege)); + + // check user1 has no privilege on role1 + client = sqoopServerRunner.getSqoopClient(USER1); + assertTrue(client.getPrivilegesByPrincipal(role1Princ, allConnector).size() == 0); + } + + @Test + public void testRevokeAllPrivilege() throws Exception { + /** + * user2 belongs to group group2 + * admin user grant role role2 to group group2 + * admin user grant read and write privilege on connector all to role role2 + */ + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MRole role2 = new MRole(ROLE2); + MPrincipal group2Princ = new MPrincipal(GROUP2, MPrincipal.TYPE.GROUP); + MPrincipal role2Princ = new MPrincipal(ROLE2, MPrincipal.TYPE.ROLE); + MResource allConnector = new MResource(SqoopActionConstant.ALL, MResource.TYPE.CONNECTOR); + MPrivilege writePrivilege = new MPrivilege(allConnector, SqoopActionConstant.WRITE, false); + MPrivilege readPrivilege = new MPrivilege(allConnector, SqoopActionConstant.READ, false); + client.createRole(role2); + client.grantRole(Lists.newArrayList(role2), Lists.newArrayList(group2Princ)); + client.grantPrivilege(Lists.newArrayList(role2Princ), Lists.newArrayList(writePrivilege, readPrivilege)); + + // check user2 has two privileges on role2 + client = sqoopServerRunner.getSqoopClient(USER2); + assertTrue(client.getPrivilegesByPrincipal(role2Princ, allConnector).size() == 2); + + // admin user revoke all privilege from role2 + MPrivilege allPrivilege = new MPrivilege(allConnector, SqoopActionConstant.ALL_NAME, false); + client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + client.revokePrivilege(Lists.newArrayList(role2Princ), Lists.newArrayList(allPrivilege)); + + // check user2 has no privilege on role2 + client = sqoopServerRunner.getSqoopClient(USER2); + assertTrue(client.getPrivilegesByPrincipal(role2Princ, allConnector).size() == 0); + } + + @Test + public void testRevokePrivilegeWithAllPrivilegeExist() throws Exception { + /** + * user3 belongs to group group3 + * admin user grant role role3 to group group3 + * admin user grant all privilege on connector all to role role3 + */ + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MRole role3 = new MRole(ROLE3); + MPrincipal group3Princ = new MPrincipal(GROUP3, MPrincipal.TYPE.GROUP); + MPrincipal role3Princ = new MPrincipal(ROLE3, MPrincipal.TYPE.ROLE); + MResource allConnector = new MResource(SqoopActionConstant.ALL, MResource.TYPE.CONNECTOR); + MPrivilege allPrivilege = new MPrivilege(allConnector, SqoopActionConstant.ALL_NAME, false); + client.createRole(role3); + client.grantRole(Lists.newArrayList(role3), Lists.newArrayList(group3Princ)); + client.grantPrivilege(Lists.newArrayList(role3Princ), Lists.newArrayList(allPrivilege)); + + // check user3 has one privilege on role3 + client = sqoopServerRunner.getSqoopClient(USER3); + assertTrue(client.getPrivilegesByPrincipal(role3Princ, allConnector).size() == 1); + // user3 has the all action on role3 + MPrivilege user3Privilege = client.getPrivilegesByPrincipal(role3Princ, allConnector).get(0); + assertEquals(user3Privilege.getAction(), SqoopActionConstant.ALL_NAME); + + // admin user revoke the read privilege on connector all from role role3 + MPrivilege readPrivilege = new MPrivilege(allConnector, SqoopActionConstant.READ, false); + client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + client.revokePrivilege(Lists.newArrayList(role3Princ), Lists.newArrayList(readPrivilege)); + + // check user3 has only the write privilege on role3 + client = sqoopServerRunner.getSqoopClient(USER3); + assertTrue(client.getPrivilegesByPrincipal(role3Princ, allConnector).size() == 1); + user3Privilege = client.getPrivilegesByPrincipal(role3Princ, allConnector).get(0); + assertEquals(user3Privilege.getAction().toLowerCase(), SqoopActionConstant.WRITE); + } +} diff --git a/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestRoleOperation.java b/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestRoleOperation.java new file mode 100644 index 000000000..d47f0adae --- /dev/null +++ b/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestRoleOperation.java @@ -0,0 +1,209 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.tests.e2e.sqoop; + +import org.apache.sentry.sqoop.SentrySqoopError; +import org.apache.sqoop.client.SqoopClient; +import org.apache.sqoop.model.MPrincipal; +import org.apache.sqoop.model.MRole; +import org.junit.Test; + +import com.google.common.collect.Lists; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +public class TestRoleOperation extends AbstractSqoopSentryTestBase { + + @Test + public void testAdminToCreateDeleteRole() throws Exception { + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MRole role1 = new MRole("create_delete_role_1"); + MRole role2 = new MRole("create_delete_role_2"); + client.createRole(role1); + client.createRole(role2); + assertTrue( client.getRoles().size() > 0); + } + + @Test + public void testNotAdminToCreateDeleteRole() throws Exception { + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MRole role1 = new MRole("not_admin_create_delete_role_1"); + MRole role2 = new MRole("not_admin_create_delete_role_2"); + client.createRole(role1); + + client = sqoopServerRunner.getSqoopClient(USER1); + try { + client.createRole(role2); + fail("expected SentryAccessDeniedException happend"); + } catch (Exception e) { + assertCausedMessage(e, "SentryAccessDeniedException"); + } + try { + client.dropRole(role1); + fail("expected SentryAccessDeniedException happend"); + } catch (Exception e) { + assertCausedMessage(e, "SentryAccessDeniedException"); + } + } + + @Test + public void testCreateExistedRole() throws Exception { + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MRole role1 = new MRole("create_exist_role_1"); + client.createRole(role1); + try { + client.createRole(role1); + fail("expected SentryAlreadyExistsException happend"); + } catch (Exception e) { + assertCausedMessage(e, "SentryAlreadyExistsException"); + } + } + + @Test + public void testDropNotExistedRole() throws Exception { + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + try { + client.dropRole(new MRole("drop_noexisted_role_1")); + fail("expect SentryNoSuchObjectException to throw"); + } catch (Exception e) { + assertCausedMessage(e, "SentryNoSuchObjectException"); + } + } + + @Test + public void testAdminShowAllRole() throws Exception { + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + client.createRole(new MRole("show_all_role")); + assertTrue(client.getRoles().size() > 0); + } + + @Test + public void testNotAdminShowAllRole() throws Exception { + SqoopClient client = sqoopServerRunner.getSqoopClient(USER1); + try { + client.getRoles(); + fail("expected SentryAccessDeniedException happend"); + } catch (Exception e) { + assertCausedMessage(e, "SentryAccessDeniedException"); + } + } + + @Test + public void testNotSupportAddRoleToUser() throws Exception { + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MRole role1 = new MRole("add_to_user_role"); + MPrincipal user1 = new MPrincipal("add_to_user", MPrincipal.TYPE.USER); + try { + client.grantRole(Lists.newArrayList(role1), Lists.newArrayList(user1)); + fail("expected not support exception happend"); + } catch (Exception e) { + assertCausedMessage(e, SentrySqoopError.GRANT_REVOKE_ROLE_NOT_SUPPORT_FOR_PRINCIPAL); + } + } + + @Test + public void testShowRoleOnGroup() throws Exception { + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + // admin user grant role1 to group1 + MRole role1 = new MRole(ROLE1); + client.createRole(role1); + MPrincipal group1 = new MPrincipal(GROUP1, MPrincipal.TYPE.GROUP); + client.grantRole(Lists.newArrayList(role1), Lists.newArrayList(group1)); + // admin user grant role2 to group2 + MRole role2 = new MRole(ROLE2); + client.createRole(role2); + MPrincipal group2 = new MPrincipal(GROUP2, MPrincipal.TYPE.GROUP); + client.grantRole(Lists.newArrayList(role2), Lists.newArrayList(group2)); + + // use1 can show role on group1 + client = sqoopServerRunner.getSqoopClient(USER1); + assertEquals(role1.getName(), client.getRolesByPrincipal(group1).get(0).getName()); + + // use1 can't show role on group2 + try { + client.getRolesByPrincipal(group2); + fail("expected SentryAccessDeniedException happend"); + } catch (Exception e) { + assertCausedMessage(e, "SentryAccessDeniedException"); + } + + // user2 can show role on group2 + client = sqoopServerRunner.getSqoopClient(USER2); + assertEquals(role2.getName(), client.getRolesByPrincipal(group2).get(0).getName()); + + // use2 can't show role on group1 + try { + client.getRolesByPrincipal(group1); + fail("expected SentryAccessDeniedException happend"); + } catch (Exception e) { + assertCausedMessage(e, "SentryAccessDeniedException"); + } + } + + @Test + public void testAddDeleteRoleOnGroup() throws Exception { + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + // admin user grant role3 to group3 + MRole role3 = new MRole(ROLE3); + client.createRole(role3); + MPrincipal group3 = new MPrincipal(GROUP3, MPrincipal.TYPE.GROUP); + client.grantRole(Lists.newArrayList(role3), Lists.newArrayList(group3)); + // admin user grant role4 to group4 + MRole role4 = new MRole(ROLE4); + client.createRole(role4); + MPrincipal group4 = new MPrincipal(GROUP4, MPrincipal.TYPE.GROUP); + client.grantRole(Lists.newArrayList(role4), Lists.newArrayList(group4)); + + // use3 can show role on group3 + client = sqoopServerRunner.getSqoopClient(USER3); + assertEquals(role3.getName(), client.getRolesByPrincipal(group3).get(0).getName()); + + // user4 can show role on group4 + client = sqoopServerRunner.getSqoopClient(USER4); + assertEquals(role4.getName(), client.getRolesByPrincipal(group4).get(0).getName()); + + /** + * admin delete role3 from group3 + * admin delete role4 from group4 + */ + client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + client.revokeRole(Lists.newArrayList(role3), Lists.newArrayList(group3)); + client.revokeRole(Lists.newArrayList(role4), Lists.newArrayList(group4)); + + // use3 show role on group3, empty role list return + client = sqoopServerRunner.getSqoopClient(USER3); + assertTrue(client.getRolesByPrincipal(group3).isEmpty()); + + // use4 show role on group4, empty role list return + client = sqoopServerRunner.getSqoopClient(USER4); + assertTrue(client.getRolesByPrincipal(group4).isEmpty()); + } + + @Test + public void testNotSupportShowRoleonUser() throws Exception { + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MPrincipal user1 = new MPrincipal("showRoleOnUser", MPrincipal.TYPE.USER); + try { + client.getRolesByPrincipal(user1); + fail("expected not support exception happend"); + } catch (Exception e) { + assertCausedMessage(e, SentrySqoopError.SHOW_GRANT_NOT_SUPPORTED_FOR_PRINCIPAL); + } + } +} diff --git a/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestServerScopeEndToEnd.java b/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestServerScopeEndToEnd.java new file mode 100644 index 000000000..85bae92b4 --- /dev/null +++ b/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestServerScopeEndToEnd.java @@ -0,0 +1,185 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.tests.e2e.sqoop; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.apache.sentry.core.model.sqoop.SqoopActionConstant; +import org.apache.sqoop.client.SqoopClient; +import org.apache.sqoop.model.MJob; +import org.apache.sqoop.model.MLink; +import org.apache.sqoop.model.MPrincipal; +import org.apache.sqoop.model.MPrivilege; +import org.apache.sqoop.model.MResource; +import org.apache.sqoop.model.MRole; +import org.apache.sqoop.security.SecurityError; +import org.junit.Test; + +import com.google.common.collect.Lists; + +public class TestServerScopeEndToEnd extends AbstractSqoopSentryTestBase { + + @Test + public void testServerScopePrivilege() throws Exception { + /** + * ADMIN_USER create two links and one job + */ + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MLink rdbmsLink = client.createLink("generic-jdbc-connector"); + sqoopServerRunner.fillRdbmsLinkConfig(rdbmsLink); + sqoopServerRunner.saveLink(client, rdbmsLink); + + MLink hdfsLink = client.createLink("hdfs-connector"); + sqoopServerRunner.fillHdfsLink(hdfsLink); + sqoopServerRunner.saveLink(client, hdfsLink); + + MJob job1 = client.createJob(hdfsLink.getPersistenceId(), rdbmsLink.getPersistenceId()); + // set HDFS "FROM" config for the job, since the connector test case base class only has utilities for HDFS! + sqoopServerRunner.fillHdfsFromConfig(job1); + // set the RDBM "TO" config here + sqoopServerRunner.fillRdbmsToConfig(job1); + // create job + sqoopServerRunner.saveJob(client, job1); + + + MResource sqoopServer1 = new MResource(SQOOP_SERVER_NAME, MResource.TYPE.SERVER); + /** + * ADMIN_USER grant read privilege on server SQOOP_SERVER_NAME to role1 + */ + MRole role1 = new MRole(ROLE1); + MPrincipal group1 = new MPrincipal(GROUP1, MPrincipal.TYPE.GROUP); + MPrivilege readPrivilege = new MPrivilege(sqoopServer1,SqoopActionConstant.READ, false); + client.createRole(role1); + client.grantRole(Lists.newArrayList(role1), Lists.newArrayList(group1)); + client.grantPrivilege(Lists.newArrayList(new MPrincipal(role1.getName(), MPrincipal.TYPE.ROLE)), + Lists.newArrayList(readPrivilege)); + + /** + * ADMIN_USER grant write privilege on server SQOOP_SERVER_NAME to role2 + * ADMIN_USER grant read privilege on connector all to role2 (for update link required) + * ADMIN_USER grant read privilege on link all to role2 (for update job required) + */ + MRole role2 = new MRole(ROLE2); + MPrincipal group2 = new MPrincipal(GROUP2, MPrincipal.TYPE.GROUP); + MPrivilege writePrivilege = new MPrivilege(sqoopServer1,SqoopActionConstant.WRITE, false); + client.createRole(role2); + + MResource allConnector = new MResource(SqoopActionConstant.ALL, MResource.TYPE.CONNECTOR); + MResource allLink = new MResource(SqoopActionConstant.ALL, MResource.TYPE.LINK); + MPrivilege readAllConPriv = new MPrivilege(allConnector,SqoopActionConstant.READ, false); + MPrivilege readAllLinkPriv = new MPrivilege(allLink,SqoopActionConstant.READ, false); + + client.grantRole(Lists.newArrayList(role2), Lists.newArrayList(group2)); + client.grantPrivilege(Lists.newArrayList(new MPrincipal(role2.getName(), MPrincipal.TYPE.ROLE)), + Lists.newArrayList(writePrivilege, readAllConPriv, readAllLinkPriv)); + + /** + * ADMIN_USER grant all privilege on server SQOOP_SERVER_NAME to role3 + */ + MRole role3 = new MRole(ROLE3); + MPrincipal group3 = new MPrincipal(GROUP3, MPrincipal.TYPE.GROUP); + MPrivilege allPrivilege = new MPrivilege(sqoopServer1,SqoopActionConstant.ALL_NAME, false); + client.createRole(role3); + client.grantRole(Lists.newArrayList(role3), Lists.newArrayList(group3)); + client.grantPrivilege(Lists.newArrayList(new MPrincipal(role3.getName(), MPrincipal.TYPE.ROLE)), + Lists.newArrayList(allPrivilege)); + + /** + * user1 has only the read privilege on server SQOOP_SERVER_NAME to role1, + * so user1 can show connector, link and jobs. The user1 can't update the link and + * job + */ + client = sqoopServerRunner.getSqoopClient(USER1); + try { + // show connector + assertTrue(client.getConnector("generic-jdbc-connector") != null); + assertTrue(client.getConnector("hdfs-connector") != null); + assertTrue(client.getConnectors().size() > 0); + // show link + assertTrue(client.getLink(hdfsLink.getPersistenceId()) != null); + assertTrue(client.getLink(rdbmsLink.getPersistenceId()) != null); + assertTrue(client.getLinks().size() == 2); + // show job + assertTrue(client.getJob(job1.getPersistenceId()) != null); + assertTrue(client.getJobs().size() == 1); + } catch (Exception e) { + fail("unexpected Authorization exception happend"); + } + // user1 can't update link and job + try { + hdfsLink.setName("hdfs1_update_user1"); + client.updateLink(hdfsLink); + fail("expected Authorization exception happend"); + } catch (Exception e) { + assertCausedMessage(e, SecurityError.AUTH_0014.getMessage()); + } + + try { + job1.setName("job1_update_user1"); + client.updateJob(job1); + fail("expected Authorization exception happend"); + } catch (Exception e) { + assertCausedMessage(e, SecurityError.AUTH_0014.getMessage()); + } + + /** + * user2 has the write privilege on server SQOOP_SERVER_NAME to role2. In order to update link and job, + * user2 also has the read privilege on connector all and link all + * user2 can update link and jobs. The user2 can't show job + */ + client = sqoopServerRunner.getSqoopClient(USER2); + try { + // update link and job + hdfsLink.setName("hdfs1_update_user2"); + client.updateLink(hdfsLink); + job1.setName("job1_update_user2"); + client.updateJob(job1); + } catch (Exception e) { + fail("unexpected Authorization exception happend"); + } + // user2 can't show job + assertTrue(client.getJobs().size() == 0); + + /** + * user3 has the all privilege on server SQOOP_SERVER_NAME to role3. + * user3 can do any operation on any sqoop resource + */ + client = sqoopServerRunner.getSqoopClient(USER3); + try { + // show connector + assertTrue(client.getConnector("generic-jdbc-connector") != null); + assertTrue(client.getConnector("hdfs-connector") != null); + assertTrue(client.getConnectors().size() > 0); + // show link + assertTrue(client.getLink(hdfsLink.getPersistenceId()) != null); + assertTrue(client.getLink(rdbmsLink.getPersistenceId()) != null); + assertTrue(client.getLinks().size() == 2); + // show job + assertTrue(client.getJob(job1.getPersistenceId()) != null); + assertTrue(client.getJobs().size() == 1); + // update link + hdfsLink.setName("hdfs1_update_user3"); + client.updateLink(hdfsLink); + // update job + job1.setName("job1_update_user3"); + client.updateJob(job1); + } catch (Exception e) { + fail("unexpected Authorization exception happend"); + } + } +} diff --git a/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestShowPrivilege.java b/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestShowPrivilege.java new file mode 100644 index 000000000..0ccbf5d32 --- /dev/null +++ b/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TestShowPrivilege.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.tests.e2e.sqoop; + +import org.apache.sentry.core.model.sqoop.SqoopActionConstant; +import org.apache.sentry.sqoop.SentrySqoopError; +import org.apache.sqoop.client.SqoopClient; +import org.apache.sqoop.model.MPrincipal; +import org.apache.sqoop.model.MPrivilege; +import org.apache.sqoop.model.MResource; +import org.apache.sqoop.model.MRole; +import org.junit.Test; + +import com.google.common.collect.Lists; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +public class TestShowPrivilege extends AbstractSqoopSentryTestBase { + + @Test + public void testNotSupportShowOnUser() throws Exception { + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MPrincipal user1 = new MPrincipal("not_support_user1", MPrincipal.TYPE.USER); + MResource resource1 = new MResource("all", MResource.TYPE.CONNECTOR); + try { + client.getPrivilegesByPrincipal(user1, resource1); + fail("expected not support exception happend"); + } catch (Exception e) { + assertCausedMessage(e, SentrySqoopError.SHOW_PRIVILEGE_NOT_SUPPORTED_FOR_PRINCIPAL); + } + } + + @Test + public void testNotSupportShowOnGroup() throws Exception { + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MPrincipal group1 = new MPrincipal("not_support_group1", MPrincipal.TYPE.GROUP); + MResource resource1 = new MResource("all", MResource.TYPE.CONNECTOR); + try { + client.getPrivilegesByPrincipal(group1, resource1); + fail("expected not support exception happend"); + } catch (Exception e) { + assertCausedMessage(e, SentrySqoopError.SHOW_PRIVILEGE_NOT_SUPPORTED_FOR_PRINCIPAL); + } + } + + @Test + public void testShowPrivileges() throws Exception { + /** + * user1 belongs to group group1 + * admin user grant role role1 to group group1 + * admin user grant read privilege on connector all to role role1 + */ + SqoopClient client = sqoopServerRunner.getSqoopClient(ADMIN_USER); + MRole role1 = new MRole(ROLE1); + MPrincipal group1Princ = new MPrincipal(GROUP1, MPrincipal.TYPE.GROUP); + MPrincipal role1Princ = new MPrincipal(ROLE1, MPrincipal.TYPE.ROLE); + MResource allConnector = new MResource(SqoopActionConstant.ALL, MResource.TYPE.CONNECTOR); + MPrivilege readPriv = new MPrivilege(allConnector, SqoopActionConstant.READ, false); + client.createRole(role1); + client.grantRole(Lists.newArrayList(role1), Lists.newArrayList(group1Princ)); + client.grantPrivilege(Lists.newArrayList(role1Princ), Lists.newArrayList(readPriv)); + + // user1 show privilege on role1 + client = sqoopServerRunner.getSqoopClient(USER1); + assertTrue(client.getPrivilegesByPrincipal(role1Princ, allConnector).size() == 1); + + // user2 can't show privilege on role1, because user2 doesn't belong to role1 + client = sqoopServerRunner.getSqoopClient(USER2); + try { + client.getPrivilegesByPrincipal(role1Princ, allConnector); + fail("expected SentryAccessDeniedException happend"); + } catch (Exception e) { + assertCausedMessage(e, "SentryAccessDeniedException"); + } + } +} diff --git a/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TomcatSqoopRunner.java b/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TomcatSqoopRunner.java new file mode 100644 index 000000000..cea9acc58 --- /dev/null +++ b/sentry-tests/sentry-tests-sqoop/src/test/java/org/apache/sentry/tests/e2e/sqoop/TomcatSqoopRunner.java @@ -0,0 +1,319 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.tests.e2e.sqoop; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotSame; + +import java.io.File; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import org.apache.log4j.Logger; +import org.apache.sqoop.client.SqoopClient; +import org.apache.sqoop.common.test.db.DatabaseProvider; +import org.apache.sqoop.common.test.db.DatabaseProviderFactory; +import org.apache.sqoop.common.test.db.TableName; +import org.apache.sqoop.common.test.utils.NetworkUtils; +import org.apache.sqoop.model.MConfigList; +import org.apache.sqoop.model.MJob; +import org.apache.sqoop.model.MLink; +import org.apache.sqoop.model.MPersistableEntity; +import org.apache.sqoop.test.minicluster.SqoopMiniCluster; +import org.apache.sqoop.validation.Status; +import org.codehaus.cargo.container.ContainerType; +import org.codehaus.cargo.container.InstalledLocalContainer; +import org.codehaus.cargo.container.configuration.ConfigurationType; +import org.codehaus.cargo.container.configuration.LocalConfiguration; +import org.codehaus.cargo.container.deployable.WAR; +import org.codehaus.cargo.container.installer.Installer; +import org.codehaus.cargo.container.installer.ZipURLInstaller; +import org.codehaus.cargo.container.property.ServletPropertySet; +import org.codehaus.cargo.container.tomcat.TomcatPropertySet; +import org.codehaus.cargo.generic.DefaultContainerFactory; +import org.codehaus.cargo.generic.configuration.DefaultConfigurationFactory; + +import com.google.common.base.Joiner; + +public class TomcatSqoopRunner { + private static final Logger LOG = Logger.getLogger(TomcatSqoopRunner.class); + private SqoopServerEnableSentry server; + private DatabaseProvider provider; + private String temporaryPath; + + public TomcatSqoopRunner(String temporaryPath, String serverName, String sentrySite) + throws Exception { + this.temporaryPath = temporaryPath; + this.server = new SqoopServerEnableSentry(temporaryPath, serverName, sentrySite); + this.provider = DatabaseProviderFactory.getProvider(System.getProperties()); + } + + public void start() throws Exception { + server.start(); + provider.start(); + } + + public void stop() throws Exception { + server.stop(); + provider.stop(); + } + + + /** + * create link. + * + * With asserts to make sure that it was created correctly. + * @param sqoopClient + * @param link + */ + public void saveLink(SqoopClient client, MLink link) { + assertEquals(Status.OK, client.saveLink(link)); + assertNotSame(MPersistableEntity.PERSISTANCE_ID_DEFAULT, link.getPersistenceId()); + } + + /** + * create link. + * + * With asserts to make sure that it was created correctly. + * @param sqoopClient + * @param link + */ + public void updateLink(SqoopClient client, MLink link) { + assertEquals(Status.OK, client.updateLink(link)); + assertNotSame(MPersistableEntity.PERSISTANCE_ID_DEFAULT, link.getPersistenceId()); + } + + /** + * Create job. + * + * With asserts to make sure that it was created correctly. + * + * @param job + */ + public void saveJob(SqoopClient client, MJob job) { + assertEquals(Status.OK, client.saveJob(job)); + assertNotSame(MPersistableEntity.PERSISTANCE_ID_DEFAULT, job.getPersistenceId()); + } + + /** + * fill link. + * + * With asserts to make sure that it was filled correctly. + * + * @param link + */ + public void fillHdfsLink(MLink link) { + MConfigList configs = link.getConnectorLinkConfig(); + configs.getStringInput("linkConfig.confDir").setValue(server.getConfigurationPath()); + } + + /** + * Fill link config based on currently active provider. + * + * @param link MLink object to fill + */ + public void fillRdbmsLinkConfig(MLink link) { + MConfigList configs = link.getConnectorLinkConfig(); + configs.getStringInput("linkConfig.jdbcDriver").setValue(provider.getJdbcDriver()); + configs.getStringInput("linkConfig.connectionString").setValue(provider.getConnectionUrl()); + configs.getStringInput("linkConfig.username").setValue(provider.getConnectionUsername()); + configs.getStringInput("linkConfig.password").setValue(provider.getConnectionPassword()); + } + + public void fillHdfsFromConfig(MJob job) { + MConfigList fromConfig = job.getFromJobConfig(); + fromConfig.getStringInput("fromJobConfig.inputDirectory").setValue(temporaryPath + "/output"); + } + + public void fillRdbmsToConfig(MJob job) { + MConfigList toConfig = job.getToJobConfig(); + toConfig.getStringInput("toJobConfig.tableName").setValue(provider. + escapeTableName(new TableName(getClass().getSimpleName()).getTableName())); + } + + /** + * get a sqoopClient for specific user + * @param user + */ + public SqoopClient getSqoopClient(String user) { + setAuthenticationUser(user); + return new SqoopClient(server.getServerUrl()); + } + + /** + * Set the mock user in the Sqoop simple authentication + * @param user + */ + private void setAuthenticationUser(String user) { + System.setProperty("user.name", user); + } + + private static class SqoopServerEnableSentry extends SqoopMiniCluster { + private static final String WAR_PATH = "thirdparty/sqoop.war"; + private static final String TOMCAT_PATH = "thirdparty/apache-tomcat-6.0.36.zip"; + + private InstalledLocalContainer container = null; + private Integer port; + private Integer ajpPort; + private String sentrySite; + private String serverName; + + SqoopServerEnableSentry(String temporaryPath, String serverName, String sentrySite) + throws Exception { + super(temporaryPath); + this.serverName = serverName; + this.sentrySite = sentrySite; + // Random port + this.port = NetworkUtils.findAvailablePort(); + this.ajpPort = NetworkUtils.findAvailablePort(); + } + + @Override + public Map getSecurityConfiguration() { + Map properties = new HashMap(); + configureAuthentication(properties); + configureSentryAuthorization(properties); + return properties; + } + + private void configureAuthentication(Map properties) { + /** Simple Authentication */ + properties.put("org.apache.sqoop.authentication.type", "SIMPLE"); + properties.put("org.apache.sqoop.authentication.handler", + "org.apache.sqoop.security.SimpleAuthenticationHandler"); + } + + private void configureSentryAuthorization(Map properties) { + properties.put("org.apache.sqoop.security.authorization.handler", + "org.apache.sentry.sqoop.authz.SentryAuthorizationHander"); + properties.put("org.apache.sqoop.security.authorization.access_controller", + "org.apache.sentry.sqoop.authz.SentryAccessController"); + properties.put("org.apache.sqoop.security.authorization.validator", + "org.apache.sentry.sqoop.authz.SentryAuthorizationValidator"); + properties.put("org.apache.sqoop.security.authorization.server_name", serverName); + properties.put("sentry.sqoop.site.url", sentrySite); + /** set Sentry related jars into classpath */ + List extraClassPath = new LinkedList(); + for (String jar : System.getProperty("java.class.path").split(":")) { + if ((jar.contains("sentry") || jar.contains("shiro-core") || jar.contains("libthrift")) + && jar.endsWith("jar")) { + extraClassPath.add(jar); + } + } + properties.put("org.apache.sqoop.classpath.extra",Joiner.on(":").join(extraClassPath)); + } + + @Override + public void start() throws Exception { + // Container has already been started + if (container != null) { + return; + } + prepareTemporaryPath(); + + // Source: http://cargo.codehaus.org/Functional+testing + String tomcatPath = getTemporaryPath() + "/tomcat"; + String extractPath = tomcatPath + "/extract"; + String confPath = tomcatPath + "/conf"; + + Installer installer = new ZipURLInstaller(new File(TOMCAT_PATH).toURI().toURL(), null, extractPath); + installer.install(); + + LocalConfiguration configuration = (LocalConfiguration) new DefaultConfigurationFactory() + .createConfiguration("tomcat6x", ContainerType.INSTALLED, ConfigurationType.STANDALONE, + confPath); + container = (InstalledLocalContainer) new DefaultContainerFactory().createContainer("tomcat6x", + ContainerType.INSTALLED, configuration); + + // Set home to our installed tomcat instance + container.setHome(installer.getHome()); + + // Store tomcat logs into file as they are quite handy for debugging + container.setOutput(getTemporaryPath() + "/log/tomcat.log"); + + // Propagate system properties to the container + Map map = new HashMap((Map) System.getProperties()); + container.setSystemProperties(map); + + // Propagate Hadoop jars to the container classpath + // In real world, they would be installed manually by user + List extraClassPath = new LinkedList(); + String[] classpath = System.getProperty("java.class.path").split(":"); + for (String jar : classpath) { + if (jar.contains("hadoop-") || // Hadoop jars + jar.contains("hive-") || // Hive jars + jar.contains("commons-") || // Apache Commons libraries + jar.contains("httpcore-") || // Apache Http Core libraries + jar.contains("httpclient-") || // Apache Http Client libraries + jar.contains("htrace-") || // htrace-core libraries, new added in + // Hadoop 2.6.0 + jar.contains("zookeeper-") || // zookeeper libraries, new added in + // Hadoop 2.6.0 + jar.contains("curator-") || // curator libraries, new added in Hadoop + // 2.6.0 + jar.contains("log4j-") || // Log4j + jar.contains("slf4j-") || // Slf4j + jar.contains("jackson-") || // Jackson + jar.contains("derby") || // Derby drivers + jar.contains("avro-") || // Avro + jar.contains("parquet-") || // Parquet + jar.contains("mysql") || // MySQL JDBC driver + jar.contains("postgre") || // PostgreSQL JDBC driver + jar.contains("oracle") || // Oracle driver + jar.contains("terajdbc") || // Teradata driver + jar.contains("tdgs") || // Teradata driver + jar.contains("nzjdbc") || // Netezza driver + jar.contains("sqljdbc") || // Microsoft SQL Server driver + jar.contains("libfb303") || // Facebook thrift lib + jar.contains("datanucleus-") || // Data nucleus libs + jar.contains("google") // Google libraries (guava, ...) + ) { + extraClassPath.add(jar); + } + } + container.setExtraClasspath(extraClassPath.toArray(new String[extraClassPath.size()])); + + // Finally deploy Sqoop server war file + configuration.addDeployable(new WAR(WAR_PATH)); + configuration.setProperty(ServletPropertySet.PORT, port.toString()); + configuration.setProperty(TomcatPropertySet.AJP_PORT, ajpPort.toString()); + //configuration.setProperty(GeneralPropertySet.JVMARGS, "\"-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=8006\""); + LOG.info("Tomcat extract path: " + extractPath); + LOG.info("Tomcat home path: " + installer.getHome()); + LOG.info("Tomcat config home path: " + confPath); + LOG.info("Starting tomcat server on port " + port); + container.start(); + } + + @Override + public void stop() throws Exception { + if (container != null) { + container.stop(); + } + } + + /** + * Return server URL. + */ + public String getServerUrl() { + // We're not doing any changes, so return default URL + return "http://localhost:" + port + "/sqoop/"; + } + } +}