@@ -73,10 +73,8 @@ public Configuration getConfiguration(Configuration confToClone) {
7373
7474 @ Test
7575 public void basicWriteAndReadBackTest () throws SQLException {
76- SparkConf sparkConf = new SparkConf ().setMaster ("local" ).setAppName ("phoenix-test" )
77- .set ("spark.hadoopRDD.ignoreEmptySplits" , "false" );
78- JavaSparkContext jsc = new JavaSparkContext (sparkConf );
79- SQLContext sqlContext = new SQLContext (jsc );
76+
77+ SparkSession spark = SparkUtil .getSparkSession ();
8078 String tableName = generateUniqueName ();
8179
8280 try (Connection conn = DriverManager .getConnection (getUrl ());
@@ -85,141 +83,122 @@ public void basicWriteAndReadBackTest() throws SQLException {
8583 "CREATE TABLE " + tableName + " (id INTEGER PRIMARY KEY, v1 VARCHAR)" );
8684 }
8785
88- try (SparkSession spark = sqlContext .sparkSession ()) {
86+ StructType schema =
87+ new StructType (new StructField [] {
88+ new StructField ("id" , DataTypes .IntegerType , false , Metadata .empty ()),
89+ new StructField ("v1" , DataTypes .StringType , false , Metadata .empty ()) });
8990
90- StructType schema =
91- new StructType (new StructField [] {
92- new StructField ("id" , DataTypes .IntegerType , false , Metadata .empty ()),
93- new StructField ("v1" , DataTypes .StringType , false , Metadata .empty ()) });
91+ // Use old zkUrl
92+ Dataset <Row > df1 =
93+ spark .createDataFrame (
94+ Arrays .asList (RowFactory .create (1 , "x" )),
95+ schema );
9496
95- // Use old zkUrl
96- Dataset <Row > df1 =
97- spark .createDataFrame (
98- Arrays .asList (RowFactory .create (1 , "x" )),
99- schema );
97+ df1 .write ().format ("phoenix" ).mode (SaveMode .Overwrite )
98+ .option ("table" , tableName )
99+ .option (ZOOKEEPER_URL , getUrl ())
100+ .save ();
101+
102+ // Use jdbcUrl
103+ // In Phoenix 5.2+ getUrl() return a JDBC URL, in earlier versions it returns a ZK
104+ // quorum
105+ String jdbcUrl = getUrl ();
106+ if (!jdbcUrl .startsWith (JDBC_PROTOCOL )) {
107+ jdbcUrl = JDBC_PROTOCOL_ZK + JDBC_PROTOCOL_SEPARATOR + jdbcUrl ;
108+ }
109+ Dataset <Row > df2 =
110+ spark .createDataFrame (
111+ Arrays .asList (RowFactory .create (2 , "x" )),
112+ schema );
100113
101- df1 .write ().format ("phoenix" ).mode (SaveMode .Overwrite )
114+ df2 .write ().format ("phoenix" ).mode (SaveMode .Overwrite )
102115 .option ("table" , tableName )
103- .option (ZOOKEEPER_URL , getUrl () )
116+ .option (JDBC_URL , jdbcUrl )
104117 .save ();
105118
106- // Use jdbcUrl
107- // In Phoenix 5.2+ getUrl() return a JDBC URL, in earlier versions it returns a ZK
108- // quorum
109- String jdbcUrl = getUrl ();
110- if (!jdbcUrl .startsWith (JDBC_PROTOCOL )) {
111- jdbcUrl = JDBC_PROTOCOL_ZK + JDBC_PROTOCOL_SEPARATOR + jdbcUrl ;
112- }
113- Dataset <Row > df2 =
114- spark .createDataFrame (
115- Arrays .asList (RowFactory .create (2 , "x" )),
116- schema );
119+ // Use default from hbase-site.xml
120+ Dataset <Row > df3 =
121+ spark .createDataFrame (
122+ Arrays .asList (RowFactory .create (3 , "x" )),
123+ schema );
117124
118- df2 .write ().format ("phoenix" ).mode (SaveMode .Overwrite )
119- .option ("table" , tableName )
120- .option (JDBC_URL , jdbcUrl )
121- .save ();
125+ df3 .write ().format ("phoenix" ).mode (SaveMode .Overwrite )
126+ .option ("table" , tableName )
127+ .save ();
122128
123- // Use default from hbase-site.xml
124- Dataset <Row > df3 =
125- spark .createDataFrame (
126- Arrays .asList (RowFactory .create (3 , "x" )),
127- schema );
129+ try (Connection conn = DriverManager .getConnection (getUrl ());
130+ Statement stmt = conn .createStatement ()) {
131+ ResultSet rs = stmt .executeQuery ("SELECT * FROM " + tableName );
132+ assertTrue (rs .next ());
133+ assertEquals (1 , rs .getInt (1 ));
134+ assertEquals ("x" , rs .getString (2 ));
135+ assertTrue (rs .next ());
136+ assertEquals (2 , rs .getInt (1 ));
137+ assertEquals ("x" , rs .getString (2 ));
138+ assertTrue (rs .next ());
139+ assertEquals (3 , rs .getInt (1 ));
140+ assertEquals ("x" , rs .getString (2 ));
141+ assertFalse (rs .next ());
142+ }
128143
129- df3 . write ().format ("phoenix" ). mode ( SaveMode . Overwrite )
144+ Dataset df1Read = spark . read ().format ("phoenix" )
130145 .option ("table" , tableName )
131- .save ();
132-
133- try (Connection conn = DriverManager .getConnection (getUrl ());
134- Statement stmt = conn .createStatement ()) {
135- ResultSet rs = stmt .executeQuery ("SELECT * FROM " + tableName );
136- assertTrue (rs .next ());
137- assertEquals (1 , rs .getInt (1 ));
138- assertEquals ("x" , rs .getString (2 ));
139- assertTrue (rs .next ());
140- assertEquals (2 , rs .getInt (1 ));
141- assertEquals ("x" , rs .getString (2 ));
142- assertTrue (rs .next ());
143- assertEquals (3 , rs .getInt (1 ));
144- assertEquals ("x" , rs .getString (2 ));
145- assertFalse (rs .next ());
146- }
147-
148- Dataset df1Read = spark .read ().format ("phoenix" )
149- .option ("table" , tableName )
150- .option (PhoenixDataSource .JDBC_URL , getUrl ()).load ();
146+ .option (PhoenixDataSource .JDBC_URL , getUrl ()).load ();
151147
152- assertEquals (3l , df1Read .count ());
148+ assertEquals (3l , df1Read .count ());
153149
154- // Use jdbcUrl
155- Dataset df2Read = spark .read ().format ("phoenix" )
156- .option ("table" , tableName )
157- .option (PhoenixDataSource .JDBC_URL , jdbcUrl )
158- .load ();
159-
160- assertEquals (3l , df2Read .count ());
150+ // Use jdbcUrl
151+ Dataset df2Read = spark .read ().format ("phoenix" )
152+ .option ("table" , tableName )
153+ .option (PhoenixDataSource .JDBC_URL , jdbcUrl )
154+ .load ();
161155
162- // Use default
163- Dataset df3Read = spark .read ().format ("phoenix" )
164- .option ("table" , tableName )
165- .load ();
156+ assertEquals (3l , df2Read .count ());
166157
167- assertEquals (3l , df3Read .count ());
158+ // Use default
159+ Dataset df3Read = spark .read ().format ("phoenix" )
160+ .option ("table" , tableName )
161+ .load ();
168162
169- } finally {
170- jsc .stop ();
171- }
163+ assertEquals (3l , df3Read .count ());
172164 }
173165
174166 @ Test
175167 public void lowerCaseWriteTest () throws SQLException {
176- SparkConf sparkConf = new SparkConf ().setMaster ("local" ).setAppName ("phoenix-test" )
177- .set ("spark.hadoopRDD.ignoreEmptySplits" , "false" );
178- JavaSparkContext jsc = new JavaSparkContext (sparkConf );
179- SQLContext sqlContext = new SQLContext (jsc );
168+ SparkSession spark = SparkUtil .getSparkSession ();
180169 String tableName = generateUniqueName ();
181170
182171 try (Connection conn = DriverManager .getConnection (getUrl ());
183172 Statement stmt = conn .createStatement ()){
184173 stmt .executeUpdate ("CREATE TABLE " + tableName + " (id INTEGER PRIMARY KEY, v1 VARCHAR, \" v1\" VARCHAR)" );
185174 }
175+ StructType schema = new StructType (new StructField []{
176+ new StructField ("ID" , DataTypes .IntegerType , false , Metadata .empty ()),
177+ new StructField ("V1" , DataTypes .StringType , false , Metadata .empty ()),
178+ new StructField ("\" v1\" " , DataTypes .StringType , false , Metadata .empty ())
179+ });
186180
187- try (SparkSession spark = sqlContext .sparkSession ()) {
188- //Doesn't help
189- spark .conf ().set ("spark.sql.caseSensitive" , true );
190-
191- StructType schema = new StructType (new StructField []{
192- new StructField ("ID" , DataTypes .IntegerType , false , Metadata .empty ()),
193- new StructField ("V1" , DataTypes .StringType , false , Metadata .empty ()),
194- new StructField ("\" v1\" " , DataTypes .StringType , false , Metadata .empty ())
195- });
196-
197- Dataset <Row > df = spark .createDataFrame (
198- Arrays .asList (
199- RowFactory .create (1 , "x" , "y" )),
200- schema );
201-
202- df .write ()
203- .format ("phoenix" )
204- .mode (SaveMode .Overwrite )
205- .option ("table" , tableName )
206- .option (PhoenixDataSource .SKIP_NORMALIZING_IDENTIFIER ,"true" )
207- .option (JDBC_URL , getUrl ())
208- .save ();
209-
210- try (Connection conn = DriverManager .getConnection (getUrl ());
211- Statement stmt = conn .createStatement ()) {
212- ResultSet rs = stmt .executeQuery ("SELECT * FROM " + tableName );
213- assertTrue (rs .next ());
214- assertEquals (1 , rs .getInt (1 ));
215- assertEquals ("x" , rs .getString (2 ));
216- assertEquals ("y" , rs .getString (3 ));
217- assertFalse (rs .next ());
218- }
181+ Dataset <Row > df = spark .createDataFrame (
182+ Arrays .asList (
183+ RowFactory .create (1 , "x" , "y" )),
184+ schema );
219185
186+ df .write ()
187+ .format ("phoenix" )
188+ .mode (SaveMode .Overwrite )
189+ .option ("table" , tableName )
190+ .option (PhoenixDataSource .SKIP_NORMALIZING_IDENTIFIER ,"true" )
191+ .option (JDBC_URL , getUrl ())
192+ .save ();
220193
221- } finally {
222- jsc .stop ();
194+ try (Connection conn = DriverManager .getConnection (getUrl ());
195+ Statement stmt = conn .createStatement ()) {
196+ ResultSet rs = stmt .executeQuery ("SELECT * FROM " + tableName );
197+ assertTrue (rs .next ());
198+ assertEquals (1 , rs .getInt (1 ));
199+ assertEquals ("x" , rs .getString (2 ));
200+ assertEquals ("y" , rs .getString (3 ));
201+ assertFalse (rs .next ());
223202 }
224203 }
225204
0 commit comments