Skip to content

CSV

To "connect" to a CSV file, is possible to import the CSVConnector class from the hamana.connector.file.csv module. As a shortcut, the CSVConnector class is also available in the hamana.connector.file module as CSV.

import hamana as hm

# connect
customers_csv = hm.connector.file.CSV("customers.csv")
customers = customers.execute()

# perform operations
# ...

hamana.connector.file.csv

CSVConnector

CSVConnector(
    file_path: str | Path,
    dialect: type[csv.Dialect] | None = None,
    has_header: bool | None = None,
    columns: list[Column] | None = None,
    encoding: str = getencoding(),
)

Class representing the connector to a CSV file.

Observe that when the object is initialized, the class is not going to read the CSV file; the class only performs checks on the file and extract metadata.

To process the CSV file, use the methods execute() or to_sqlite().

Example:

import hamana as hm

csv_file = hm.connector.file.CSV('path/to/file.csv')
query = csv_file.execute()

print(query.result.head())

Parameters:

Name Type Description Default
file_path str | Path

Path to the CSV file.

required
dialect type[csv.Dialect] | None

Dialect of the CSV file; the dialect is a class that defines the parameters for reading and writing CSV files, such as the delimiter, quotechar, and quoting. Commonly used dialects are: csv.excel (the dialect to use for CSV generated by Excel.), csv.excel_tab(the dialect to use for tab-delimited files that are generated by Excel.), csv.unix_dialect (the dialect to use for Unix-style CSV files.). If None, the class will try to infer the dialect of the CSV file by using the csv.Sniffer.sniff() method.

None
has_header bool | None

Flag to indicate if the CSV file has a header.
If None, the class will try to infer it; observe that this method could lead to false positives.

None
columns list[Column] | None

List of columns in the CSV file. If columns are provided, ensure to list all the columns. By default, the class will try to infer the columns directly from the file. If the header is not available, then by default, the names of the columns will be column_1, column_2, and so on. The data type of the columns will be inferred automatically by taking a sample of 1000 rows from the file, converted into a DataFrame, and using the data types from the DataFrame.

None
encoding str

Define the encoding to use during the reading process of the file. By default, the class uses the system encoding retrieved by the locale.getencoding() method.

getencoding()
Source code in src/hamana/connector/file/csv.py
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
def __init__(
    self,
    file_path: str | Path,
    dialect: type[csv.Dialect] | None = None,
    has_header: bool | None = None,
    columns: list[Column] | None = None,
    encoding: str = getencoding()
) -> None:
    logger.debug("start")

    self.file_path = Path(file_path)
    logger.debug(f"file_path: {self.file_path}")

    self.encoding = encoding
    logger.debug(f"encoding: {self.encoding}")

    # check file existance
    if not self.file_path.exists():
        error_msg = f"File not found: {self.file_path}"
        logger.error(error_msg)
        raise FileNotFoundError(error_msg)

    # set file name
    self.file_name = self.file_path.name
    logger.debug(f"file_name: {self.file_name}")

    # set dialect
    infer_dialect = self._infer_dialect()
    if dialect is None:
        logger.info("dialect is not provided, trying to infer..")
        self.dialect = infer_dialect
    else:
        # compare dialect with inferred dialect
        self._compare_dialects(dialect, infer_dialect)
        self.dialect = dialect

    # set has_header
    if has_header is None:
        logger.info("has_header is not provided, trying to infer..")
        has_header = self._check_has_header()
    self.has_header = has_header
    logger.debug(f"has_header: {self.has_header}")

    # set columns
    infer_columns = self._infer_columns()
    self.columns = self._compute_columns(infer_columns, columns)
    logger.debug(f"columns: {[column.name for column in self.columns]}")

    logger.debug("end")
    return

dialect instance-attribute

dialect: type[csv.Dialect]

Dialect of the CSV file.

file_path instance-attribute

file_path: Path = Path(file_path)

Path and name of the CSV file.

encoding instance-attribute

encoding: str = encoding

Encoding to use during the reading process of the file.

file_name instance-attribute

file_name: str = self.file_path.name

Name of the CSV file.

has_header instance-attribute

has_header: bool = has_header

Flag to indicate if the CSV file has a header.

columns instance-attribute

columns: list[Column] = self._compute_columns(
    infer_columns, columns
)

List of columns in the CSV file.

execute

execute() -> Query

Function used to extract data from the CSV file.

Returns:

Type Description
Query

The function automatically creates a Query object, executes the extraction and returns the object created with the resulting rows.

Source code in src/hamana/connector/file/csv.py
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
def execute(self) -> Query:
    """
        Function used to extract data from the CSV file.

        Returns:
            The function automatically creates a `Query` object, 
                executes the extraction and returns the object created
                with the resulting rows.
    """
    logger.debug("start")

    # set query
    query = Query(
        query = f"SELECT * FROM '{self.file_name}'",
        columns = self.columns
    )
    logger.info(f"query created: {query.query}")

    # read CSV file
    logger.debug("reading CSV file")
    df_result = pd.read_csv(
        filepath_or_buffer = self.file_path,
        dialect = self.dialect, # type: ignore
        header = 0 if self.has_header else None,
        names = [column.name for column in self.columns],
        encoding = self.encoding,
        dtype = "object"
    )
    logger.info(f"data extracted, rows: {df_result.shape[0]}, columns: {df_result.shape[1]}")

    # adjust columns
    logger.debug("adjusting columns and data types")
    df_result = query.adjust_df(df_result)

    # set result
    query.result = df_result

    logger.debug("end")
    return query

batch_execute

batch_execute(
    batch_size: int,
) -> Generator[list[list], None, None]

Function used to extract data from the CSV file and return the results in batches. This approach is used to avoid memory issues when dealing with large datasets.

Observe that the returned data are not adjusted in terms of data types, but provided as raw data.

Parameters:

Name Type Description Default
batch_size int

size of the batch to return.

required

Returns:

Type Description
None

Generator used to return the results in batches.

Source code in src/hamana/connector/file/csv.py
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
def batch_execute(self, batch_size: int) -> Generator[list[list], None, None]:
    """
        Function used to extract data from the CSV file and return the results in batches. 
        This approach is used to avoid memory issues when dealing with large datasets.

        Observe that the returned data are not adjusted in terms of data types, but 
        provided as raw data.

        Parameters:
            batch_size: size of the batch to return.

        Returns:
            Generator used to return the results in batches.
    """
    logger.debug("start")
    logger.debug(f"batch size: {batch_size}")

    # open file
    dt_start = datetime.now()
    logger.debug(f"open file {self.file_path}")
    with open(self.file_path, "r", newline = "", encoding = self.encoding) as file:
        reader = csv.reader(file, dialect = self.dialect)

        # skip header
        if self.has_header:
            logger.debug("header skipped")
            next(reader)

        # read rows
        batch = []
        row_count = 1
        batch_count = 1

        try:
            for row in reader:
                batch.append(row)
                row_count += 1
                if len(batch) == batch_size:
                    if batch_count % 100 == 0:
                        logger.info(f"batch {batch_count} read")
                    yield batch

                    # reset batch
                    batch = []
                    batch_count += 1
        except UnicodeDecodeError as e:
            err_msg = f"ERROR: parsing {row_count + 1} row."
            logger.error(err_msg)
            logger.exception(e)
            raise CSVDecodeRowError(err_msg)

        # yield remaining rows
        if len(batch) > 0:
            yield batch

    dt_end = datetime.now()
    logger.info(f"file read, elapsed time: {dt_end - dt_start}")
    logger.info(f"{row_count} rows processed ({batch_count} batches)")
    logger.debug("end")

to_sqlite

to_sqlite(
    table_name: str,
    raw_insert: bool = False,
    batch_size: int = 10000,
    mode: SQLiteDataImportMode = SQLiteDataImportMode.REPLACE,
) -> None

This function is used to extract data from the CSV file and insert it into the hamana internal database (HamanaConnector).

The hamana db is a SQLite database, for this reason bool, datetime and timestamp data types are not supported. If some of the columns are defined with these data types, then the method could perform an automatic conversion to a SQLite data type.

The conversions are:

  • bool columns are mapped to INTEGER data type, with the values True and False converted to 1 and 0.
  • datetime columns are mapped to REAL data type, with the values converted to a float number using the following format: YYYYMMDD.HHmmss. Observe that the integer part represents the date in the format YYYYMMDD, while the decimal part represents the time component in the format HHmmss.

By default, the method performs the automatic datatype conversion. However, use the parameter raw_insert to avoid this conversion and improve the INSERT efficiency.

Parameters:

Name Type Description Default
table_name str

name of the table to insert the data. By assumption, the table's name is converted to uppercase.

required
raw_insert bool

bool value to disable/activate the datatype conversion during the INSERT process. By default, it is set to False.

False
batch_size int

size of the batch used during the inserting process.

10000
mode SQLiteDataImportMode

mode of importing the data into the database.

SQLiteDataImportMode.REPLACE
Source code in src/hamana/connector/file/csv.py
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
def to_sqlite(
    self,
    table_name: str,
    raw_insert: bool = False,
    batch_size: int = 10_000,
    mode: SQLiteDataImportMode = SQLiteDataImportMode.REPLACE
) -> None:
    """
        This function is used to extract data from the CSV file and 
        insert it into the `hamana` internal database (`HamanaConnector`).

        The `hamana` db is a SQLite database, for this reason 
        `bool`, `datetime` and `timestamp` data types are not supported.
        If some of the columns are defined with these data types, 
        then the method could perform an automatic conversion to 
        a SQLite data type.

        The conversions are:

        - `bool` columns are mapped to `INTEGER` data type, with the values 
            `True` and `False` converted to `1` and `0`.
        - `datetime` columns are mapped to `REAL` data type, with the values 
            converted to a float number using the following format: `YYYYMMDD.HHmmss`.
            Observe that the integer part represents the date in the format `YYYYMMDD`,
            while the decimal part represents the time component in the format `HHmmss`.

        By default, the method performs the automatic datatype 
        conversion. However, use the parameter `raw_insert` to 
        **avoid** this conversion and improve the INSERT efficiency. 

        Parameters:
            table_name: name of the table to insert the data.
                By assumption, the table's name is converted to uppercase.
            raw_insert: bool value to disable/activate the datatype 
                conversion during the INSERT process. By default, it is 
                set to `False`.
            batch_size: size of the batch used during the inserting process.
            mode: mode of importing the data into the database.
    """
    logger.debug("start")

    table_name_upper = table_name.upper()
    insert_query: str = ""
    column_names: list[str] = []
    query = Query(
        query = f"SELECT * FROM '{self.file_name}'",
        columns = self.columns
    )

    # import internal database
    from ..db.hamana import HamanaConnector
    logger.debug("imported internal database")

    # get instance
    hamana_db = HamanaConnector.get_instance()
    hamana_connection = hamana_db.get_connection()
    logger.debug("internal database instance obtained")

    # check table existance
    query_check_table = Query(
        query = """SELECT COUNT(1) AS flag_exists FROM sqlite_master WHERE type = 'table' AND name = :table_name""",
        params = {"table_name": table_name_upper},
        columns = [
            BooleanColumn(order = 0, name = "flag_exists", true_value = 1, false_value = 0)
        ]
    )
    hamana_db.execute(query_check_table)
    logger.debug("table check query executed")

    flag_table_exists = False
    if query_check_table.result is not None:
        flag_table_exists = query_check_table.result["flag_exists"].values[0]
    logger.info(f"table exists: {flag_table_exists}")

    # block insert if mode is fail and table exists
    if flag_table_exists and mode == SQLiteDataImportMode.FAIL:
        logger.error(f"table {table_name_upper} already exists")
        raise TableAlreadyExists(table_name_upper)

    # execute extraction
    logger.info(f"extracting data, batch size: {batch_size}")
    flag_first_batch = True
    hamana_cursor = hamana_connection.cursor()
    for raw_batch in self.batch_execute(batch_size):

        if flag_first_batch:
            logger.info("generating insert query")
            insert_query = query.get_insert_query(table_name_upper)
            column_names = query.get_column_names()

            # create table
            if not flag_table_exists or mode == SQLiteDataImportMode.REPLACE:

                # drop if exists (for replace)
                if flag_table_exists:
                    logger.info(f"drop table {table_name_upper}")
                    hamana_cursor.execute(f"DROP TABLE {table_name_upper}")
                    hamana_connection.commit()
                    logger.debug("table dropped")

                logger.info(f"creating table {table_name_upper}")
                hamana_cursor.execute(query.get_create_query(table_name_upper))
                hamana_connection.commit()
                logger.debug("table created")

            # set flag
            flag_first_batch = False

        # adjust data types
        if raw_insert:
            # no data type conversion
            hamana_cursor.executemany(insert_query, raw_batch)
            hamana_connection.commit()
        else:
            # create temporary query
            query_temp = Query(query = query.query, columns = query.columns)

            # assign result (adjust data types)
            df_temp = pd.DataFrame(raw_batch, columns = column_names)
            df_temp = query_temp.adjust_df(df_temp)
            query_temp.result = df_temp

            # insert into table
            query_temp.to_sqlite(table_name_upper, SQLiteDataImportMode.APPEND)

    logger.info(f"data inserted into table {table_name_upper}")
    hamana_cursor.close()

    logger.debug("end")
    return