File size: 19,167 Bytes
c26b6eb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
# database_api.py
import duckdb
import pandas as pd
import pyarrow as pa
import pyarrow.ipc
from pathlib import Path
import tempfile
import os
import shutil
from typing import Optional, List, Dict, Any, Union, Iterator, Generator, Tuple
# No need for pybind11 import here anymore

# --- Custom Exceptions ---
class DatabaseAPIError(Exception):
    """Base exception for our custom API."""
    pass

class QueryError(DatabaseAPIError):
    """Exception raised for errors during query execution."""
    pass

# --- Helper function to format COPY options ---
def _format_copy_options(options: Optional[Dict[str, Any]]) -> str:
    if not options:
        return ""
    opts_parts = []
    for k, v in options.items():
        key_upper = k.upper()
        if isinstance(v, bool):
            value_repr = str(v).upper()
        elif isinstance(v, (int, float)):
             value_repr = str(v)
        elif isinstance(v, str):
             escaped_v = v.replace("'", "''")
             value_repr = f"'{escaped_v}'"
        else:
             value_repr = repr(v)
        opts_parts.append(f"{key_upper} {value_repr}")

    opts_str = ", ".join(opts_parts)
    return f"WITH ({opts_str})"

# --- Main DatabaseAPI Class ---
class DatabaseAPI:
    def __init__(self,
                 db_path: Union[str, Path] = ":memory:",
                 read_only: bool = False,
                 config: Optional[Dict[str, str]] = None):
        self._db_path = str(db_path)
        self._config = config or {}
        self._read_only = read_only
        self._conn: Optional[duckdb.DuckDBPyConnection] = None
        try:
            self._conn = duckdb.connect(
                database=self._db_path,
                read_only=self._read_only,
                config=self._config
            )
            print(f"Connected to DuckDB database at '{self._db_path}'")
        except duckdb.Error as e:
            print(f"Failed to connect to DuckDB: {e}")
            raise DatabaseAPIError(f"Failed to connect to DuckDB: {e}") from e

    def _ensure_connection(self):
        if self._conn is None:
            raise DatabaseAPIError("Database connection is not established or has been closed.")
        try:
             self._conn.execute("SELECT 1", [])
        except (duckdb.ConnectionException, RuntimeError) as e:
             if "Connection has already been closed" in str(e) or "connection closed" in str(e).lower():
                 self._conn = None
                 raise DatabaseAPIError("Database connection is closed.") from e
             else:
                 raise DatabaseAPIError(f"Database connection error: {e}") from e

    # --- Basic Query Methods --- (Keep as before)
    def execute_sql(self, sql: str, parameters: Optional[List[Any]] = None) -> None:
        self._ensure_connection()
        print(f"Executing SQL: {sql}")
        try:
            self._conn.execute(sql, parameters)
        except duckdb.Error as e:
            print(f"Error executing SQL: {e}")
            raise QueryError(f"Error executing SQL: {e}") from e

    def query_sql(self, sql: str, parameters: Optional[List[Any]] = None) -> duckdb.DuckDBPyRelation:
        self._ensure_connection()
        print(f"Querying SQL: {sql}")
        try:
            return self._conn.sql(sql, params=parameters)
        except duckdb.Error as e:
            print(f"Error querying SQL: {e}")
            raise QueryError(f"Error querying SQL: {e}") from e

    def query_df(self, sql: str, parameters: Optional[List[Any]] = None) -> pd.DataFrame:
        self._ensure_connection()
        print(f"Querying SQL to DataFrame: {sql}")
        try:
            return self._conn.execute(sql, parameters).df()
        except ImportError:
             print("Pandas library is required for DataFrame operations.")
             raise
        except duckdb.Error as e:
            print(f"Error querying SQL to DataFrame: {e}")
            raise QueryError(f"Error querying SQL to DataFrame: {e}") from e

    def query_arrow(self, sql: str, parameters: Optional[List[Any]] = None) -> pa.Table:
        self._ensure_connection()
        print(f"Querying SQL to Arrow Table: {sql}")
        try:
            return self._conn.execute(sql, parameters).arrow()
        except ImportError:
             print("PyArrow library is required for Arrow operations.")
             raise
        except duckdb.Error as e:
            print(f"Error querying SQL to Arrow Table: {e}")
            raise QueryError(f"Error querying SQL to Arrow Table: {e}") from e

    def query_fetchall(self, sql: str, parameters: Optional[List[Any]] = None) -> List[Tuple[Any, ...]]:
        self._ensure_connection()
        print(f"Querying SQL and fetching all: {sql}")
        try:
            return self._conn.execute(sql, parameters).fetchall()
        except duckdb.Error as e:
            print(f"Error querying SQL: {e}")
            raise QueryError(f"Error querying SQL: {e}") from e

    def query_fetchone(self, sql: str, parameters: Optional[List[Any]] = None) -> Optional[Tuple[Any, ...]]:
        self._ensure_connection()
        print(f"Querying SQL and fetching one: {sql}")
        try:
            return self._conn.execute(sql, parameters).fetchone()
        except duckdb.Error as e:
            print(f"Error querying SQL: {e}")
            raise QueryError(f"Error querying SQL: {e}") from e

    # --- Registration Methods --- (Keep as before)
    def register_df(self, name: str, df: pd.DataFrame):
        self._ensure_connection()
        print(f"Registering DataFrame as '{name}'")
        try:
            self._conn.register(name, df)
        except duckdb.Error as e:
            print(f"Error registering DataFrame: {e}")
            raise QueryError(f"Error registering DataFrame: {e}") from e

    def unregister_df(self, name: str):
        self._ensure_connection()
        print(f"Unregistering virtual table '{name}'")
        try:
            self._conn.unregister(name)
        except duckdb.Error as e:
            if "not found" in str(e).lower():
                 print(f"Warning: Virtual table '{name}' not found for unregistering.")
            else:
                print(f"Error unregistering virtual table: {e}")
                raise QueryError(f"Error unregistering virtual table: {e}") from e

    # --- Extension Methods --- (Keep as before)
    def install_extension(self, extension_name: str, force_install: bool = False):
        self._ensure_connection()
        print(f"Installing extension: {extension_name}")
        try:
            self._conn.install_extension(extension_name, force_install=force_install)
        except duckdb.Error as e:
            print(f"Error installing extension '{extension_name}': {e}")
            raise DatabaseAPIError(f"Error installing extension '{extension_name}': {e}") from e

    def load_extension(self, extension_name: str):
        self._ensure_connection()
        print(f"Loading extension: {extension_name}")
        try:
            self._conn.load_extension(extension_name)
        # Catch specific DuckDB errors that indicate failure but aren't API errors
        except (duckdb.IOException, duckdb.CatalogException) as load_err:
             print(f"Error loading extension '{extension_name}': {load_err}")
             raise QueryError(f"Error loading extension '{extension_name}': {load_err}") from load_err
        except duckdb.Error as e: # Catch other DuckDB errors
            print(f"Unexpected DuckDB error loading extension '{extension_name}': {e}")
            raise DatabaseAPIError(f"Unexpected DuckDB error loading extension '{extension_name}': {e}") from e

    # --- Export Methods ---
    def export_database(self, directory_path: Union[str, Path]):
        self._ensure_connection()
        path_str = str(directory_path)
        if not os.path.isdir(path_str):
             try:
                 os.makedirs(path_str)
                 print(f"Created export directory: {path_str}")
             except OSError as e:
                 raise DatabaseAPIError(f"Could not create export directory '{path_str}': {e}") from e
        print(f"Exporting database to directory: {path_str}")
        sql = f"EXPORT DATABASE '{path_str}' (FORMAT CSV)"
        try:
            self._conn.execute(sql)
            print("Database export completed successfully.")
        except duckdb.Error as e:
            print(f"Error exporting database: {e}")
            raise DatabaseAPIError(f"Error exporting database: {e}") from e

    def _export_data(self,
                      source: str,
                      output_path: Union[str, Path],
                      file_format: str,
                      options: Optional[Dict[str, Any]] = None):
        self._ensure_connection()
        path_str = str(output_path)
        options_str = _format_copy_options(options)
        source_safe = source.strip()
        # --- MODIFIED: Use f-string quoting instead of quote_identifier ---
        if ' ' in source_safe or source_safe.upper().startswith(('SELECT', 'WITH', 'VALUES')):
            copy_source = f"({source})"
        else:
             # Simple quoting, might need refinement for complex identifiers
             copy_source = f'"{source_safe}"'
        # --- END MODIFICATION ---

        sql = f"COPY {copy_source} TO '{path_str}' {options_str}"
        print(f"Exporting data to {path_str} (Format: {file_format}) with options: {options or {}}")
        try:
            self._conn.execute(sql)
            print("Data export completed successfully.")
        except duckdb.Error as e:
            print(f"Error exporting data: {e}")
            raise QueryError(f"Error exporting data to {file_format}: {e}") from e

    # --- Keep export_data_to_csv, parquet, json, jsonl as before ---
    def export_data_to_csv(self,
                           source: str,
                           output_path: Union[str, Path],
                           options: Optional[Dict[str, Any]] = None):
        csv_options = options.copy() if options else {}
        csv_options['FORMAT'] = 'CSV'
        if 'HEADER' not in {k.upper() for k in csv_options}:
            csv_options['HEADER'] = True
        self._export_data(source, output_path, "CSV", csv_options)

    def export_data_to_parquet(self,
                             source: str,
                             output_path: Union[str, Path],
                             options: Optional[Dict[str, Any]] = None):
        parquet_options = options.copy() if options else {}
        parquet_options['FORMAT'] = 'PARQUET'
        self._export_data(source, output_path, "Parquet", parquet_options)

    def export_data_to_json(self,
                            source: str,
                            output_path: Union[str, Path],
                            array_format: bool = True,
                            options: Optional[Dict[str, Any]] = None):
        json_options = options.copy() if options else {}
        json_options['FORMAT'] = 'JSON'
        if 'ARRAY' not in {k.upper() for k in json_options}:
             json_options['ARRAY'] = array_format
        self._export_data(source, output_path, "JSON", json_options)

    def export_data_to_jsonl(self,
                             source: str,
                             output_path: Union[str, Path],
                             options: Optional[Dict[str, Any]] = None):
        self.export_data_to_json(source, output_path, array_format=False, options=options)


    # # --- Streaming Read Methods --- (Keep as before)
    # def stream_query_arrow(self,
    #                        sql: str,
    #                        parameters: Optional[List[Any]] = None,
    #                        batch_size: int = 1000000
    #                       ) -> Iterator[pa.RecordBatch]:
    #     self._ensure_connection()
    #     print(f"Streaming Arrow query (batch size {batch_size}): {sql}")
    #     try:
    #         result_set = self._conn.execute(sql, parameters)
    #         while True:
    #             batch = result_set.fetch_record_batch(batch_size)
    #             if not batch:
    #                 break
    #             yield batch
    #     except ImportError:
    #          print("PyArrow library is required for Arrow streaming.")
    #          raise
    #     except duckdb.Error as e:
    #         print(f"Error streaming Arrow query: {e}")
    #         raise QueryError(f"Error streaming Arrow query: {e}") from e

    def stream_query_df(self,
                        sql: str,
                        parameters: Optional[List[Any]] = None,
                        vectors_per_chunk: int = 1
                       ) -> Iterator[pd.DataFrame]:
        self._ensure_connection()
        print(f"Streaming DataFrame query (vectors per chunk {vectors_per_chunk}): {sql}")
        try:
            result_set = self._conn.execute(sql, parameters)
            while True:
                chunk_df = result_set.fetch_df_chunk(vectors_per_chunk)
                if chunk_df.empty:
                    break
                yield chunk_df
        except ImportError:
             print("Pandas library is required for DataFrame streaming.")
             raise
        except duckdb.Error as e:
            print(f"Error streaming DataFrame query: {e}")
            raise QueryError(f"Error streaming DataFrame query: {e}") from e

    def stream_query_arrow(self,
                           sql: str,
                           parameters: Optional[List[Any]] = None,
                           batch_size: int = 1000000
                          ) -> Iterator[pa.RecordBatch]:
        """
        Executes a SQL query and streams the results as Arrow RecordBatches.
        Useful for processing large results iteratively in Python without
        loading the entire result set into memory.

        Args:
            sql: The SQL query to execute.
            parameters: Optional list of parameters for prepared statements.
            batch_size: The approximate number of rows per Arrow RecordBatch.

        Yields:
            pyarrow.RecordBatch: Chunks of the result set.

        Raises:
            QueryError: If the query execution or fetching fails.
            ImportError: If pyarrow is not installed.
        """
        self._ensure_connection()
        print(f"Streaming Arrow query (batch size {batch_size}): {sql}")
        record_batch_reader = None
        try:
            # Use execute() to get a result object that supports streaming fetch
            result_set = self._conn.execute(sql, parameters)
            # --- MODIFICATION: Get the reader first ---
            record_batch_reader = result_set.fetch_record_batch(batch_size)
            # --- Iterate through the reader ---
            for batch in record_batch_reader:
                yield batch
            # --- END MODIFICATION ---
        except ImportError:
             print("PyArrow library is required for Arrow streaming.")
             raise
        except duckdb.Error as e:
            print(f"Error streaming Arrow query: {e}")
            raise QueryError(f"Error streaming Arrow query: {e}") from e
        finally:
            # Clean up the reader if it was created
            if record_batch_reader is not None:
                # PyArrow readers don't have an explicit close, relying on GC.
                # Forcing cleanup might involve ensuring references are dropped.
                del record_batch_reader # Help GC potentially
            # The original result_set from execute() might also hold resources,
            # although fetch_record_batch typically consumes it.
            # Explicitly closing it if possible, or letting it go out of scope.
            if 'result_set' in locals() and result_set:
                 try:
                     # DuckDBPyResult doesn't have an explicit close, relies on __del__
                     del result_set
                 except Exception:
                     pass # Best effort

    # --- Resource Management Methods --- (Keep as before)
    def close(self):
        if self._conn:
            conn_id = id(self._conn)
            print(f"Closing connection to '{self._db_path}' (ID: {conn_id})")
            try:
                self._conn.close()
            except duckdb.Error as e:
                print(f"Error closing DuckDB connection (ID: {conn_id}): {e}")
            finally:
                self._conn = None
        else:
            print("Connection already closed or never opened.")

    def __enter__(self):
        self._ensure_connection()
        return self

    def __exit__(self, exc_type, exc_value, traceback):
        self.close()

    def __del__(self):
        if self._conn:
            print(f"ResourceWarning: DatabaseAPI for '{self._db_path}' was not explicitly closed. Closing now in __del__.")
            try:
                 self.close()
            except Exception as e:
                 print(f"Exception during implicit close in __del__: {e}")
                 self._conn = None


# --- Example Usage --- (Keep as before)
if __name__ == "__main__":
    # ... (rest of the example usage code from previous response) ...
    temp_dir_obj = tempfile.TemporaryDirectory()
    temp_dir = temp_dir_obj.name
    print(f"\n--- Using temporary directory: {temp_dir} ---")
    db_file = Path(temp_dir) / "export_test.db"
    try:
        with DatabaseAPI(db_path=db_file) as db_api:
            db_api.execute_sql("CREATE OR REPLACE TABLE products(id INTEGER, name VARCHAR, price DECIMAL(8,2))")
            db_api.execute_sql("INSERT INTO products VALUES (101, 'Gadget', 19.99), (102, 'Widget', 35.00), (103, 'Thing''amajig', 9.50)")
            db_api.execute_sql("CREATE OR REPLACE TABLE sales(product_id INTEGER, sale_date DATE, quantity INTEGER)")
            db_api.execute_sql("INSERT INTO sales VALUES (101, '2023-10-26', 5), (102, '2023-10-26', 2), (101, '2023-10-27', 3)")
            export_dir = Path(temp_dir) / "exported_db"
            db_api.export_database(export_dir)
            csv_path = Path(temp_dir) / "products_export.csv"
            db_api.export_data_to_csv('products', csv_path, options={'HEADER': True})
            parquet_path = Path(temp_dir) / "high_value_products.parquet"
            db_api.export_data_to_parquet("SELECT * FROM products WHERE price > 20", parquet_path, options={'COMPRESSION': 'SNAPPY'})
            json_path = Path(temp_dir) / "sales.json"
            db_api.export_data_to_json("SELECT * FROM sales", json_path, array_format=True)
            jsonl_path = Path(temp_dir) / "sales.jsonl"
            db_api.export_data_to_jsonl("SELECT * FROM sales ORDER BY sale_date", jsonl_path)

        with DatabaseAPI() as db_api:
            db_api.execute_sql("CREATE TABLE large_range AS SELECT range AS id, range % 100 AS category FROM range(1000)")
            for batch in db_api.stream_query_arrow("SELECT * FROM large_range", batch_size=200):
                pass
            for df_chunk in db_api.stream_query_df("SELECT * FROM large_range", vectors_per_chunk=1):
                pass
    finally:
        temp_dir_obj.cleanup()
        print(f"\n--- Cleaned up temporary directory: {temp_dir} ---")