Skip to content

Commit

Permalink
Code Refactoring
Browse files Browse the repository at this point in the history
  • Loading branch information
rahulbhatia023 committed Dec 20, 2023
1 parent 41f88cd commit c60851d
Show file tree
Hide file tree
Showing 3 changed files with 44 additions and 44 deletions.
16 changes: 8 additions & 8 deletions build.sbt
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ ThisBuild / resolvers ++= Seq(

ThisBuild / organization := "com.clairvoyant"

ThisBuild / version := "3.1.0"
ThisBuild / version := "3.1.1"

// ----- PUBLISH TO GITHUB PACKAGES ----- //

Expand Down Expand Up @@ -77,9 +77,12 @@ ThisBuild / wartremoverErrors ++= Warts.allBut(
// ----- TOOL VERSIONS ----- //

val catsVersion = "2.10.0"
val dataScalaxyReaderVersion = "1.1.0"
val dataScalaxyReaderVersion = "2.0.0"
val dataScalaxyTestUtilVersion = "1.0.0"
val dataScalaxyTransformerVersion = "1.2.0"
val dataScalaxyWriterAWSVersion = "2.0.0"
val dataScalaxyWriterGCPVersion = "2.0.0"
val dataScalaxyWriterLocalFileSystemVersion = "2.0.0"
val googleCloudStorageVersion = "2.30.1"
val jsonPathVersion = "2.8.0"
val jwtCoreVersion = "9.4.5"
Expand All @@ -91,9 +94,6 @@ val sparkMLLibVersion = "3.5.0"
val sttpVersion = "3.9.1"
val testContainersScalaVersion = "0.41.0"
val wireMockVersion = "3.0.1"
val writerAWSVersion = "1.1.0"
val writerGCPVersion = "1.1.0"
val writerLocalFileSystemVersion = "1.0.0"
val zioConfigVersion = "4.0.0-RC16"

// ----- TOOL DEPENDENCIES ----- //
Expand All @@ -115,9 +115,9 @@ val dataScalaxyTransformerDependencies = Seq(
)

val dataScalaxyWriterDependencies = Seq(
"com.clairvoyant.data.scalaxy" %% "writer-local-file-system" % writerLocalFileSystemVersion,
"com.clairvoyant.data.scalaxy" %% "writer-aws" % writerAWSVersion,
"com.clairvoyant.data.scalaxy" %% "writer-gcp" % writerGCPVersion
"com.clairvoyant.data.scalaxy" %% "writer-local-file-system" % dataScalaxyWriterLocalFileSystemVersion,
"com.clairvoyant.data.scalaxy" %% "writer-aws" % dataScalaxyWriterAWSVersion,
"com.clairvoyant.data.scalaxy" %% "writer-gcp" % dataScalaxyWriterGCPVersion
)

val googleCloudStorageDependencies = Seq("com.google.cloud" % "google-cloud-storage" % googleCloudStorageVersion)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,35 +17,35 @@ case class Text(
override def read(restonomerResponseBody: Seq[String])(using sparkSession: SparkSession): DataFrame =
textFormat match {
case csvTextFormat: CSVTextFormat =>
TextToDataFrameReader
.read[CSVTextFormat](
TextToDataFrameReader[CSVTextFormat]
.read(
text = restonomerResponseBody,
textFormat = csvTextFormat,
originalSchema = None,
adaptSchemaColumns = identity
)

case jsonTextFormat: JSONTextFormat =>
TextToDataFrameReader
.read[JSONTextFormat](
TextToDataFrameReader[JSONTextFormat]
.read(
text = restonomerResponseBody,
textFormat = jsonTextFormat,
originalSchema = None,
adaptSchemaColumns = identity
)

case xmlTextFormat: XMLTextFormat =>
TextToDataFrameReader
.read[XMLTextFormat](
TextToDataFrameReader[XMLTextFormat]
.read(
text = restonomerResponseBody,
textFormat = xmlTextFormat,
originalSchema = None,
adaptSchemaColumns = identity
)

case htmlTableTextFomat: HTMLTableTextFormat =>
TextToDataFrameReader
.read[HTMLTableTextFormat](
TextToDataFrameReader[HTMLTableTextFormat]
.read(
text = restonomerResponseBody,
textFormat = htmlTableTextFomat,
originalSchema = None,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,32 +30,32 @@ case class LocalFileSystem(
def persist(restonomerResponseDF: DataFrame)(using sparkSession: SparkSession): Unit =
fileFormat match {
case csvFileFormat: LocalCSVFileFormat =>
DataFrameToLocalFileSystemWriter
.write[LocalCSVFileFormat](
DataFrameToLocalFileSystemWriter[LocalCSVFileFormat]
.write(
dataFrame = restonomerResponseDF,
fileFormat = csvFileFormat,
path = filePath
)

case jsonFileFormat: LocalJSONFileFormat =>
DataFrameToLocalFileSystemWriter
.write[LocalJSONFileFormat](
DataFrameToLocalFileSystemWriter[LocalJSONFileFormat]
.write(
dataFrame = restonomerResponseDF,
fileFormat = jsonFileFormat,
path = filePath
)

case xmlFileFormat: LocalXMLFileFormat =>
DataFrameToLocalFileSystemWriter
.write[LocalXMLFileFormat](
DataFrameToLocalFileSystemWriter[LocalXMLFileFormat]
.write(
dataFrame = restonomerResponseDF,
fileFormat = xmlFileFormat,
path = filePath
)

case parquetFileFormat: LocalParquetFileFormat =>
DataFrameToLocalFileSystemWriter
.write[LocalParquetFileFormat](
DataFrameToLocalFileSystemWriter[LocalParquetFileFormat]
.write(
dataFrame = restonomerResponseDF,
fileFormat = parquetFileFormat,
path = filePath
Expand All @@ -72,35 +72,35 @@ case class S3Bucket(
override def persist(restonomerResponseDF: DataFrame)(using sparkSession: SparkSession): Unit =
fileFormat match {
case csvFileFormat: S3CSVFileFormat =>
DataFrameToS3BucketWriter
.write[S3CSVFileFormat](
DataFrameToS3BucketWriter[S3CSVFileFormat]
.write(
dataFrame = restonomerResponseDF,
fileFormat = csvFileFormat,
bucketName = bucketName,
path = filePath
)

case jsonFileFormat: S3JSONFileFormat =>
DataFrameToS3BucketWriter
.write[S3JSONFileFormat](
DataFrameToS3BucketWriter[S3JSONFileFormat]
.write(
dataFrame = restonomerResponseDF,
fileFormat = jsonFileFormat,
bucketName = bucketName,
path = filePath
)

case xmlFileFormat: S3XMLFileFormat =>
DataFrameToS3BucketWriter
.write[S3XMLFileFormat](
DataFrameToS3BucketWriter[S3XMLFileFormat]
.write(
dataFrame = restonomerResponseDF,
fileFormat = xmlFileFormat,
bucketName = bucketName,
path = filePath
)

case parquetFileFormat: S3ParquetFileFormat =>
DataFrameToS3BucketWriter
.write[S3ParquetFileFormat](
DataFrameToS3BucketWriter[S3ParquetFileFormat]
.write(
dataFrame = restonomerResponseDF,
fileFormat = parquetFileFormat,
bucketName = bucketName,
Expand Down Expand Up @@ -128,35 +128,35 @@ case class GCSBucket(

fileFormat match {
case csvFileFormat: GCSCSVFileFormat =>
DataFrameToGCSBucketWriter
.write[GCSCSVFileFormat](
DataFrameToGCSBucketWriter[GCSCSVFileFormat]
.write(
dataFrame = restonomerResponseDF,
fileFormat = csvFileFormat,
bucketName = bucketName,
path = filePath
)

case jsonFileFormat: GCSJSONFileFormat =>
DataFrameToGCSBucketWriter
.write[GCSJSONFileFormat](
DataFrameToGCSBucketWriter[GCSJSONFileFormat]
.write(
dataFrame = restonomerResponseDF,
fileFormat = jsonFileFormat,
bucketName = bucketName,
path = filePath
)

case xmlFileFormat: GCSXMLFileFormat =>
DataFrameToGCSBucketWriter
.write[GCSXMLFileFormat](
DataFrameToGCSBucketWriter[GCSXMLFileFormat]
.write(
dataFrame = restonomerResponseDF,
fileFormat = xmlFileFormat,
bucketName = bucketName,
path = filePath
)

case parquetFileFormat: GCSParquetFileFormat =>
DataFrameToGCSBucketWriter
.write[GCSParquetFileFormat](
DataFrameToGCSBucketWriter[GCSParquetFileFormat]
.write(
dataFrame = restonomerResponseDF,
fileFormat = parquetFileFormat,
bucketName = bucketName,
Expand Down Expand Up @@ -188,8 +188,8 @@ case class BigQuery(

writerType match {
case directBigQueryWriterType: DirectBigQueryWriterType =>
DataFrameToBigQueryWriter
.write[DirectBigQueryWriterType](
DataFrameToBigQueryWriter[DirectBigQueryWriterType]
.write(
dataFrame = restonomerResponseDF,
table = table,
dataset = dataset,
Expand All @@ -200,8 +200,8 @@ case class BigQuery(
)

case indirectBigQueryWriterType: IndirectBigQueryWriterType =>
DataFrameToBigQueryWriter
.write[IndirectBigQueryWriterType](
DataFrameToBigQueryWriter[IndirectBigQueryWriterType]
.write(
dataFrame = restonomerResponseDF,
table = table,
dataset = dataset,
Expand Down

0 comments on commit c60851d

Please sign in to comment.