From f86e59c8828075985ba6e2606da972ca74a277bb Mon Sep 17 00:00:00 2001 From: sizhky <1992chinna@gmail.com> Date: Thu, 9 May 2024 13:21:04 +0530 Subject: [PATCH] 0.532 --- changelog.md | 5 + conda/torch_snippets/meta.yaml | 6 +- nbs/adapters.ipynb | 81 ++- nbs/bokeh_plotting.ipynb | 23 + nbs/bounding_boxes.ipynb | 171 ++++- nbs/charts.ipynb | 932 ++++++++++++++++++++++------ nbs/config.ipynb | 35 ++ nbs/decorators.ipynb | 53 ++ nbs/imgaug_loader.ipynb | 118 ++++ nbs/markups.ipynb | 560 ++++++----------- nbs/paths.ipynb | 20 +- scripts.ipynb | 725 ++++++---------------- settings.ini | 2 +- torch_snippets.egg-info/PKG-INFO | 3 +- torch_snippets.egg-info/SOURCES.txt | 2 + torch_snippets/__init__.py | 2 +- torch_snippets/_modidx.py | 9 + torch_snippets/adapters.py | 79 ++- torch_snippets/bb_utils.py | 171 ++++- torch_snippets/bokeh_loader.py | 23 + torch_snippets/charts.py | 475 +++++++++++++- torch_snippets/decorators.py | 43 ++ torch_snippets/imgaug_loader.py | 118 ++++ torch_snippets/loader.py | 13 +- torch_snippets/markup.py | 134 ++-- torch_snippets/markup2.py | 24 +- torch_snippets/paths.py | 20 +- torch_snippets/scp.py | 94 +++ torch_snippets/sklegos.py | 20 +- torch_snippets/trainer/config.py | 22 + 30 files changed, 2817 insertions(+), 1166 deletions(-) create mode 100644 torch_snippets/scp.py diff --git a/changelog.md b/changelog.md index f5a415d..48f8fc7 100644 --- a/changelog.md +++ b/changelog.md @@ -1,9 +1,14 @@ # Changelog +#### 0.531 +๐ AD `__contains__` can do a nested `in` 'x.y.z' in AD(x={'y': {'z': 10}}) == True + #### 0.530 โ ๏ธ Stop using rich's print and revert back to builtin print ๐ Decouple AD and torch ๐ Add a new chart - spider / radar +๐ Add scp client with download upload functionality + #### 0.529 ๐งน change code to remove future warnings in text_utils diff --git a/conda/torch_snippets/meta.yaml b/conda/torch_snippets/meta.yaml index 6d9b3b0..e8bcc32 100644 --- a/conda/torch_snippets/meta.yaml +++ b/conda/torch_snippets/meta.yaml @@ -1,9 +1,9 @@ package: name: torch_snippets - version: '0.528' + version: '0.530' source: - sha256: 06f5ca72a5cefe10bbf448c71886b15d0e5ec59a199528ff248c2a0918de32cd - url: https://files.pythonhosted.org/packages/fc/f6/0beee4f16392f42a8c5a23948e09549617479540e724e40b8e291b8c2b06/torch_snippets-0.528.tar.gz + sha256: b0c5de8108d4ec175ec40b73dcfd0e8cec29c0e83171cec8aaeb865f9200f696 + url: https://files.pythonhosted.org/packages/33/29/ec172cc92aaf82303a25784f9470114ed76acfd52b734a1a9cfd93d20c64/torch_snippets-0.530.tar.gz about: description: "# Utilities for simple needs\n\n\n\n## torch snippets does a lot of\ \ default importing for you\nWhether it is numpy, pandas, matplotlib or the useful\ diff --git a/nbs/adapters.ipynb b/nbs/adapters.ipynb index 983627a..be6c00a 100644 --- a/nbs/adapters.ipynb +++ b/nbs/adapters.ipynb @@ -51,6 +51,14 @@ "\n", "\n", "def b64_2_np(input: str) -> np.ndarray:\n", + " \"\"\"Converts a base64 encoded image to a NumPy array.\n", + "\n", + " Args:\n", + " input (str): The base64 encoded image.\n", + "\n", + " Returns:\n", + " np.ndarray: The NumPy array representation of the image in RGB format.\n", + " \"\"\"\n", " input = bytes(input, \"utf-8\")\n", " input = base64.b64decode(input)\n", " img_nparr = np.frombuffer(input, np.uint8)\n", @@ -99,6 +107,17 @@ "def _process(\n", " df: pd.DataFrame, label_column=\"readable_label\", default_label=\"Background\"\n", "):\n", + " \"\"\"\n", + " Process the given DataFrame and convert it into a list of records.\n", + "\n", + " Args:\n", + " df (pd.DataFrame): The input DataFrame to be processed.\n", + " label_column (str, optional): The column name for the label. Defaults to \"readable_label\".\n", + " default_label (str, optional): The default label value. Defaults to \"Background\".\n", + "\n", + " Returns:\n", + " list: A list of records, where each record is a dictionary representing a row in the DataFrame.\n", + " \"\"\"\n", " df[\"@xbr\"] = df[\"X\"]\n", " df[\"@xtl\"] = df[\"x\"]\n", " df[\"@ybr\"] = df[\"Y\"]\n", @@ -148,6 +167,22 @@ " default_label=\"Background\",\n", " extension=\"jpg\",\n", "):\n", + " \"\"\"\n", + " Convert CSV annotations to CVAT XML format.\n", + "\n", + " Args:\n", + " images_folder (str): Path to the folder containing the images.\n", + " csvs_folder (str): Path to the folder containing the CSV annotations.\n", + " xml_output_file (str): Path to the output XML file.\n", + " items (list, optional): List of items to process. If None, all items will be processed. Defaults to None.\n", + " parquet (bool, optional): Whether the annotations are stored in Parquet format. Defaults to False.\n", + " relative_df (bool, optional): Whether the bounding box coordinates in the CSV are relative to the image size. Defaults to True.\n", + " default_label (str, optional): Default label for the bounding boxes. Defaults to \"Background\".\n", + " extension (str, optional): Image file extension. Defaults to \"jpg\".\n", + "\n", + " Returns:\n", + " None\n", + " \"\"\"\n", " images_folder, csvs_folder = [P(_) for _ in [images_folder, csvs_folder]]\n", " data = AttrDict({\"annotations\": {\"image\": []}})\n", " if items is None:\n", @@ -174,6 +209,16 @@ "\n", "\n", "def _get_attribute_columns(column):\n", + " \"\"\"\n", + " Get attribute columns from a given column.\n", + "\n", + " Args:\n", + " column (pandas.Series): The input column.\n", + "\n", + " Returns:\n", + " set: A set of attribute columns extracted from the input column.\n", + " \"\"\"\n", + "\n", " def _get_columns_from_row(item):\n", " if item != item:\n", " return []\n", @@ -187,6 +232,16 @@ "\n", "\n", "def _get_attribute_data(item, column_name):\n", + " \"\"\"\n", + " Retrieves the attribute data for a given item and column name.\n", + "\n", + " Parameters:\n", + " - item: The item to retrieve the attribute data from.\n", + " - column_name: The name of the column to retrieve the attribute data for.\n", + "\n", + " Returns:\n", + " - The attribute data for the given item and column name, or np.nan if not found.\n", + " \"\"\"\n", " if item != item:\n", " return item\n", " if isinstance(item, dict):\n", @@ -203,6 +258,15 @@ "\n", "\n", "def _cvat_ann_2_csv(ann):\n", + " \"\"\"\n", + " Convert CVAT annotation to a pandas DataFrame in CSV format.\n", + "\n", + " Args:\n", + " ann (dict): CVAT annotation dictionary.\n", + "\n", + " Returns:\n", + " pandas.DataFrame: DataFrame containing the converted annotation data in CSV format.\n", + " \"\"\"\n", " if \"box\" not in ann:\n", " return pd.DataFrame()\n", " if isinstance(ann.box, AttrDict):\n", @@ -236,8 +300,21 @@ "\n", "\n", "def cvat_2_csvs(xmlfile, csvs_folder):\n", + " \"\"\"\n", + " Convert CVAT XML annotations to CSV files.\n", + "\n", + " Args:\n", + " xmlfile (str): Path to the CVAT XML file.\n", + " csvs_folder (str): Path to the folder where the CSV files will be saved.\n", + "\n", + " Returns:\n", + " None\n", + " \"\"\"\n", " data = read_xml(xmlfile)\n", - " for item in data.annotations.image:\n", + " items = data.annotations.image\n", + " if not isinstance(items, list):\n", + " items = [items]\n", + " for item in items:\n", " try:\n", " df = _cvat_ann_2_csv(item)\n", " save_at = f'{csvs_folder}/{stem(item[\"@name\"])}.csv'\n", @@ -271,7 +348,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.15" + "version": "3.8.undefined" }, "orig_nbformat": 4, "vscode": { diff --git a/nbs/bokeh_plotting.ipynb b/nbs/bokeh_plotting.ipynb index 86450ef..cc70852 100644 --- a/nbs/bokeh_plotting.ipynb +++ b/nbs/bokeh_plotting.ipynb @@ -65,6 +65,18 @@ "# | export\n", "# | hide\n", "def parse_sz(size):\n", + " \"\"\"\n", + " Parses the size argument and returns a tuple of width and height.\n", + "\n", + " Args:\n", + " size (int or tuple): The size argument to be parsed.\n", + "\n", + " Returns:\n", + " tuple: A tuple of width and height.\n", + "\n", + " Raises:\n", + " NotImplementedError: If the size argument is not an int or a tuple of length 2.\n", + " \"\"\"\n", " if isinstance(size, int):\n", " return size, size\n", " elif isinstance(size, tuple):\n", @@ -74,6 +86,17 @@ "\n", "\n", "def get_bplot(sz=500, **kwargs):\n", + " \"\"\"\n", + " Create a Bokeh plot with specified size and tools.\n", + "\n", + " Parameters:\n", + " - sz (int): Size of the plot in pixels.\n", + " - **kwargs: Additional keyword arguments for customizing the plot.\n", + "\n", + " Returns:\n", + " - plot (bokeh.plotting.Figure): Bokeh plot object.\n", + "\n", + " \"\"\"\n", " h, w = parse_sz(sz)\n", " output_notebook()\n", " plot = figure(\n", diff --git a/nbs/bounding_boxes.ipynb b/nbs/bounding_boxes.ipynb index a03f815..f0021e9 100644 --- a/nbs/bounding_boxes.ipynb +++ b/nbs/bounding_boxes.ipynb @@ -254,6 +254,15 @@ "# | export\n", "# | hide\n", "def df2bbs(df):\n", + " \"\"\"\n", + " Convert a DataFrame to bounding boxes.\n", + "\n", + " Parameters:\n", + " df (pd.DataFrame): The DataFrame to convert.\n", + "\n", + " Returns:\n", + " list: A list of bounding boxes.\n", + " \"\"\"\n", " if \"bb\" in df.columns:\n", " try:\n", " return bbfy(df[\"bb\"].values.tolist())\n", @@ -263,19 +272,56 @@ "\n", "\n", "def bbs2df(bbs):\n", + " \"\"\"\n", + " Convert bounding boxes to a DataFrame.\n", + "\n", + " Parameters:\n", + " bbs (list): The bounding boxes to convert.\n", + "\n", + " Returns:\n", + " pd.DataFrame: A DataFrame representing the bounding boxes.\n", + " \"\"\"\n", " bbs = [list(bb) for bb in bbs]\n", " return pd.DataFrame(bbs, columns=[\"x\", \"y\", \"X\", \"Y\"])\n", "\n", "\n", "def bbfy(bbs):\n", + " \"\"\"\n", + " Convert bounding boxes to BB objects.\n", + "\n", + " Parameters:\n", + " bbs (list): The bounding boxes to convert.\n", + "\n", + " Returns:\n", + " list: A list of BB objects.\n", + " \"\"\"\n", " return [BB(bb) for bb in bbs]\n", "\n", "\n", "def jitter(bbs, noise):\n", + " \"\"\"\n", + " Add noise to bounding boxes. Useful when you have a lot of overlapping boxes.\n", + "\n", + " Parameters:\n", + " bbs (list): The bounding boxes to add noise to.\n", + " noise (float): The amount of noise to add.\n", + "\n", + " Returns:\n", + " list: A list of bounding boxes with added noise.\n", + " \"\"\"\n", " return [BB(bb).jitter(noise) for bb in bbs]\n", "\n", "\n", "def compute_eps(eps):\n", + " \"\"\"\n", + " Compute epsilon values for bounding box manipulation.\n", + "\n", + " Parameters:\n", + " eps (float or tuple): The epsilon value(s) to compute.\n", + "\n", + " Returns:\n", + " tuple: A tuple of epsilon values.\n", + " \"\"\"\n", " if isinstance(eps, tuple):\n", " if len(eps) == 4:\n", " epsx, epsy, epsX, epsY = eps\n", @@ -288,7 +334,16 @@ "\n", "\n", "def enlarge_bbs(bbs, eps=0.2):\n", - " \"enlarge all `bbs` by `eps` fraction (i.e., eps*100 percent)\"\n", + " \"\"\"\n", + " Enlarge bounding boxes by a certain fraction.\n", + "\n", + " Parameters:\n", + " bbs (list): The bounding boxes to enlarge.\n", + " eps (float, optional): The fraction to enlarge by. Defaults to 0.2.\n", + "\n", + " Returns:\n", + " list: A list of enlarged bounding boxes.\n", + " \"\"\"\n", " bbs = bbfy(bbs)\n", " epsx, epsy, epsX, epsY = compute_eps(eps)\n", " bbs = bbfy(bbs)\n", @@ -300,7 +355,16 @@ "\n", "\n", "def shrink_bbs(bbs, eps=0.2):\n", - " \"shrink all `bbs` by `eps` fraction (i.e., eps*100 percent)\"\n", + " \"\"\"\n", + " Shrink bounding boxes by a certain fraction.\n", + "\n", + " Parameters:\n", + " bbs (list): The bounding boxes to shrink.\n", + " eps (float, optional): The fraction to shrink by. Defaults to 0.2.\n", + "\n", + " Returns:\n", + " list: A list of shrunk bounding boxes.\n", + " \"\"\"\n", " bbs = bbfy(bbs)\n", " epsx, epsy, epsX, epsY = compute_eps(eps)\n", " bbs = bbfy(bbs)\n", @@ -320,6 +384,17 @@ "# | export\n", "# | hide\n", "def iou(bboxes1, bboxes2):\n", + " \"\"\"\n", + " Calculates the Intersection over Union (IoU) between two sets of bounding boxes.\n", + "\n", + " Args:\n", + " bboxes1 (list or numpy array): The first set of bounding boxes in the format [x, y, X, Y].\n", + " bboxes2 (list or numpy array): The second set of bounding boxes in the format [x, y, X, Y].\n", + "\n", + " Returns:\n", + " numpy array: The IoU between each pair of bounding boxes.\n", + "\n", + " \"\"\"\n", " bboxes1 = np.array(bboxes1)\n", " bboxes2 = np.array(bboxes2)\n", " x11, y11, x12, y12 = np.split(bboxes1, 4, axis=1)\n", @@ -336,6 +411,17 @@ "\n", "\n", "def compute_distance_matrix(bboxes1, bboxes2):\n", + " \"\"\"\n", + " Compute the distance matrix between two sets of bounding boxes.\n", + "\n", + " Parameters:\n", + " - bboxes1 (list): List of bounding boxes in the format [x, y, X, Y].\n", + " - bboxes2 (list): List of bounding boxes in the format [x, y, X, Y].\n", + "\n", + " Returns:\n", + " - distance_matrix (ndarray): 2D array containing the Euclidean distances between all pairs of bounding boxes.\n", + " \"\"\"\n", + "\n", " # Convert the bounding box lists to NumPy arrays\n", " bboxes1 = np.array(bboxes1)\n", " bboxes2 = np.array(bboxes2)\n", @@ -354,7 +440,17 @@ "\n", "\n", "def compute_distances(df1, df2, shrink_factors=(1, 1)):\n", - " \"\"\"Return euclidean distance mxn matrix for all boxes from df1 with all boxes from df2\"\"\"\n", + " \"\"\"\n", + " Compute the Euclidean distance matrix between bounding boxes in df1 and df2.\n", + "\n", + " Parameters:\n", + " - df1 (DataFrame): The first DataFrame containing bounding boxes.\n", + " - df2 (DataFrame): The second DataFrame containing bounding boxes.\n", + " - shrink_factors (tuple, optional): The shrink factors to apply to the bounding boxes. Default is (1, 1).\n", + "\n", + " Returns:\n", + " - distances (ndarray): The Euclidean distance matrix between the bounding boxes in df1 and df2.\n", + " \"\"\"\n", " sx, sy = shrink_factors\n", " bbs1 = np.array(df2bbs(df1)) / np.array([sx, sy, sx, sy])\n", " bbs2 = np.array(df2bbs(df2)) / np.array([sx, sy, sx, sy])\n", @@ -372,7 +468,18 @@ "# | export\n", "# | hide\n", "def split_bb_to_xyXY(df):\n", - " \"convert bb column to separate x,y,X,Y columns\"\n", + " \"\"\"\n", + " Convert the 'bb' column in the DataFrame to separate 'x', 'y', 'X', 'Y' columns.\n", + "\n", + " Args:\n", + " df (pd.DataFrame): The DataFrame containing the bounding box information.\n", + "\n", + " Returns:\n", + " pd.DataFrame: The DataFrame with separate 'x', 'y', 'X', 'Y' columns.\n", + "\n", + " Raises:\n", + " AssertionError: If the input is not a DataFrame or if the 'bb' column is missing.\n", + " \"\"\"\n", " df = df.copy()\n", " assert isinstance(df, pd.DataFrame)\n", " if all([item in df.columns for item in \"xyXY\"]):\n", @@ -391,7 +498,18 @@ "\n", "\n", "def combine_xyXY_to_bb(df):\n", - " \"combine `x,y,X,Y` to `bb` column\"\n", + " \"\"\"\n", + " Combine `x`, `y`, `X`, `Y` columns into a single `bb` column.\n", + "\n", + " Args:\n", + " df (pandas.DataFrame): The input DataFrame containing `x`, `y`, `X`, `Y` columns.\n", + "\n", + " Returns:\n", + " pandas.DataFrame: The modified DataFrame with the `bb` column.\n", + "\n", + " Raises:\n", + " AssertionError: If any of the columns `x`, `y`, `X`, `Y` are missing in the DataFrame.\n", + " \"\"\"\n", " df = df.copy()\n", " assert all(\n", " [item in df.columns for item in \"xyXY\"]\n", @@ -402,16 +520,47 @@ "\n", "\n", "def is_absolute(df):\n", + " \"\"\"\n", + " Check if the bounding boxes in the given DataFrame are absolute.\n", + "\n", + " Args:\n", + " df (pandas.DataFrame): The DataFrame containing bounding box coordinates.\n", + "\n", + " Returns:\n", + " bool: True if the maximum value of the bounding box coordinates is greater than 1.1, False otherwise.\n", + " \"\"\"\n", " bbs = df2bbs(df)\n", " bbs = np.array(bbs)\n", " return bbs.max() > 1.1\n", "\n", "\n", "def is_relative(df):\n", + " \"\"\"\n", + " Check if the bounding box coordinates in the DataFrame are relative.\n", + "\n", + " Args:\n", + " df (pandas.DataFrame): The DataFrame containing bounding box coordinates.\n", + "\n", + " Returns:\n", + " bool: True if the bounding box coordinates are relative, False otherwise.\n", + " \"\"\"\n", " return not is_absolute(df)\n", "\n", "\n", "def to_relative(df, height, width, force=False):\n", + " \"\"\"\n", + " Converts bounding box coordinates in a DataFrame to relative coordinates.\n", + "\n", + " Args:\n", + " df (pandas.DataFrame): The DataFrame containing bounding box coordinates.\n", + " height (int): The height of the image.\n", + " width (int): The width of the image.\n", + " force (bool, optional): If True, forces conversion even if the coordinates are already relative.\n", + " Defaults to False.\n", + "\n", + " Returns:\n", + " pandas.DataFrame: The DataFrame with bounding box coordinates converted to relative coordinates.\n", + " \"\"\"\n", " if not force and is_relative(df):\n", " return df\n", " df = df.copy()\n", @@ -430,6 +579,18 @@ "\n", "\n", "def to_absolute(df, height, width, force=False):\n", + " \"\"\"\n", + " Converts bounding box coordinates from relative to absolute values.\n", + "\n", + " Args:\n", + " df (pandas.DataFrame): The DataFrame containing the bounding box coordinates.\n", + " height (int): The height of the image.\n", + " width (int): The width of the image.\n", + " force (bool, optional): If True, forces the conversion even if the coordinates are already in absolute values. Defaults to False.\n", + "\n", + " Returns:\n", + " pandas.DataFrame: The DataFrame with the bounding box coordinates converted to absolute values.\n", + " \"\"\"\n", " if not force and is_absolute(df):\n", " return df\n", " df = df.copy()\n", diff --git a/nbs/charts.ipynb b/nbs/charts.ipynb index afe00d5..eb9656e 100644 --- a/nbs/charts.ipynb +++ b/nbs/charts.ipynb @@ -53,31 +53,34 @@ } }, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/altair/utils/core.py:317: FutureWarning: iteritems is deprecated and will be removed in a future version. Use .items instead.\n", - " for col_name, dtype in df.dtypes.iteritems():\n" - ] - }, { "data": { "text/html": [ "\n", - "
\n", + "\n", + "\n", "" ], "text/plain": [ @@ -166,6 +169,33 @@ "# | export\n", "# | hide\n", "def confusion_matrix(df=None, truth=None, pred=None, mapping=None, save_to=None):\n", + " \"\"\"\n", + " Generate a confusion matrix chart based on the given DataFrame or truth/prediction arrays.\n", + "\n", + " Parameters:\n", + " - df (DataFrame, optional): The input DataFrame containing the truth and prediction columns. If not provided, the truth and prediction arrays must be provided separately.\n", + " - truth (array-like, optional): The array-like object containing the true labels.\n", + " - pred (array-like, optional): The array-like object containing the predicted labels.\n", + " - mapping (dict, optional): A dictionary mapping the labels to their corresponding names.\n", + " - save_to (str, optional): The file path to save the chart in HTML format.\n", + "\n", + " Returns:\n", + " - chart (altair.Chart): The confusion matrix chart.\n", + "\n", + " Note:\n", + " - If `df` is not provided, `truth` and `pred` must be provided separately.\n", + " - If `save_to` is provided, the chart will be saved at the specified file path in HTML format.\n", + " - If `mapping` is provided, the labels in the chart will be replaced with their corresponding names.\n", + "\n", + " Example usage:\n", + " ```\n", + " # Generate confusion matrix from DataFrame\n", + " confusion_matrix(df=my_df, mapping=my_mapping, save_to=\"confusion_matrix.html\")\n", + "\n", + " # Generate confusion matrix from separate truth and prediction arrays\n", + " confusion_matrix(truth=my_truth, pred=my_pred, mapping=my_mapping)\n", + " ```\n", + " \"\"\"\n", " if df is None:\n", " df = pd.DataFrame({\"truth\": truth, \"pred\": pred})\n", " truth = \"truth\"\n", @@ -233,63 +263,50 @@ }, "outputs": [ { - "data": { - "text/html": [ - "precision recall f1-score support\n", - "\n", - " 0 0.25 0.25 0.25 250150\n", - " 1 0.25 0.25 0.25 250245\n", - " 2 0.25 0.25 0.25 249836\n", - " 3 0.25 0.25 0.25 249769\n", - "\n", - " accuracy 0.25 1000000\n", - " macro avg 0.25 0.25 0.25 1000000\n", - "weighted avg 0.25 0.25 0.25 1000000\n", - "\n", - "\n" - ], - "text/plain": [ - " precision recall f1-score support\n", - "\n", - " \u001b[1;36m0\u001b[0m \u001b[1;36m0.25\u001b[0m \u001b[1;36m0.25\u001b[0m \u001b[1;36m0.25\u001b[0m \u001b[1;36m250150\u001b[0m\n", - " \u001b[1;36m1\u001b[0m \u001b[1;36m0.25\u001b[0m \u001b[1;36m0.25\u001b[0m \u001b[1;36m0.25\u001b[0m \u001b[1;36m250245\u001b[0m\n", - " \u001b[1;36m2\u001b[0m \u001b[1;36m0.25\u001b[0m \u001b[1;36m0.25\u001b[0m \u001b[1;36m0.25\u001b[0m \u001b[1;36m249836\u001b[0m\n", - " \u001b[1;36m3\u001b[0m \u001b[1;36m0.25\u001b[0m \u001b[1;36m0.25\u001b[0m \u001b[1;36m0.25\u001b[0m \u001b[1;36m249769\u001b[0m\n", - "\n", - " accuracy \u001b[1;36m0.25\u001b[0m \u001b[1;36m1000000\u001b[0m\n", - " macro avg \u001b[1;36m0.25\u001b[0m \u001b[1;36m0.25\u001b[0m \u001b[1;36m0.25\u001b[0m \u001b[1;36m1000000\u001b[0m\n", - "weighted avg \u001b[1;36m0.25\u001b[0m \u001b[1;36m0.25\u001b[0m \u001b[1;36m0.25\u001b[0m \u001b[1;36m1000000\u001b[0m\n", - "\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stderr", + "name": "stdout", "output_type": "stream", "text": [ - "/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/altair/utils/core.py:317: FutureWarning: iteritems is deprecated and will be removed in a future version. Use .items instead.\n", - " for col_name, dtype in df.dtypes.iteritems():\n" + " precision recall f1-score support\n", + "\n", + " 0 0.25 0.25 0.25 250150\n", + " 1 0.25 0.25 0.25 250245\n", + " 2 0.25 0.25 0.25 249836\n", + " 3 0.25 0.25 0.25 249769\n", + "\n", + " accuracy 0.25 1000000\n", + " macro avg 0.25 0.25 0.25 1000000\n", + "weighted avg 0.25 0.25 0.25 1000000\n", + "\n" ] }, { "data": { "text/html": [ "\n", - "\n", + "\n", + "\n", "" ], "text/plain": [ @@ -371,75 +388,56 @@ }, "outputs": [ { - "data": { - "text/html": [ - "
precision recall f1-score support\n", - "\n", - " 0 0.13 0.14 0.13 92\n", - " 1 0.08 0.09 0.08 101\n", - " 2 0.13 0.12 0.13 107\n", - " 3 0.06 0.06 0.06 105\n", - " 4 0.12 0.11 0.11 94\n", - " 5 0.12 0.09 0.10 115\n", - " 6 0.08 0.10 0.09 88\n", - " 7 0.08 0.07 0.08 113\n", - " 8 0.09 0.09 0.09 99\n", - " 9 0.12 0.15 0.13 86\n", - "\n", - " accuracy 0.10 1000\n", - " macro avg 0.10 0.10 0.10 1000\n", - "weighted avg 0.10 0.10 0.10 1000\n", - "\n", - "\n" - ], - "text/plain": [ - " precision recall f1-score support\n", - "\n", - " \u001b[1;36m0\u001b[0m \u001b[1;36m0.13\u001b[0m \u001b[1;36m0.14\u001b[0m \u001b[1;36m0.13\u001b[0m \u001b[1;36m92\u001b[0m\n", - " \u001b[1;36m1\u001b[0m \u001b[1;36m0.08\u001b[0m \u001b[1;36m0.09\u001b[0m \u001b[1;36m0.08\u001b[0m \u001b[1;36m101\u001b[0m\n", - " \u001b[1;36m2\u001b[0m \u001b[1;36m0.13\u001b[0m \u001b[1;36m0.12\u001b[0m \u001b[1;36m0.13\u001b[0m \u001b[1;36m107\u001b[0m\n", - " \u001b[1;36m3\u001b[0m \u001b[1;36m0.06\u001b[0m \u001b[1;36m0.06\u001b[0m \u001b[1;36m0.06\u001b[0m \u001b[1;36m105\u001b[0m\n", - " \u001b[1;36m4\u001b[0m \u001b[1;36m0.12\u001b[0m \u001b[1;36m0.11\u001b[0m \u001b[1;36m0.11\u001b[0m \u001b[1;36m94\u001b[0m\n", - " \u001b[1;36m5\u001b[0m \u001b[1;36m0.12\u001b[0m \u001b[1;36m0.09\u001b[0m \u001b[1;36m0.10\u001b[0m \u001b[1;36m115\u001b[0m\n", - " \u001b[1;36m6\u001b[0m \u001b[1;36m0.08\u001b[0m \u001b[1;36m0.10\u001b[0m \u001b[1;36m0.09\u001b[0m \u001b[1;36m88\u001b[0m\n", - " \u001b[1;36m7\u001b[0m \u001b[1;36m0.08\u001b[0m \u001b[1;36m0.07\u001b[0m \u001b[1;36m0.08\u001b[0m \u001b[1;36m113\u001b[0m\n", - " \u001b[1;36m8\u001b[0m \u001b[1;36m0.09\u001b[0m \u001b[1;36m0.09\u001b[0m \u001b[1;36m0.09\u001b[0m \u001b[1;36m99\u001b[0m\n", - " \u001b[1;36m9\u001b[0m \u001b[1;36m0.12\u001b[0m \u001b[1;36m0.15\u001b[0m \u001b[1;36m0.13\u001b[0m \u001b[1;36m86\u001b[0m\n", - "\n", - " accuracy \u001b[1;36m0.10\u001b[0m \u001b[1;36m1000\u001b[0m\n", - " macro avg \u001b[1;36m0.10\u001b[0m \u001b[1;36m0.10\u001b[0m \u001b[1;36m0.10\u001b[0m \u001b[1;36m1000\u001b[0m\n", - "weighted avg \u001b[1;36m0.10\u001b[0m \u001b[1;36m0.10\u001b[0m \u001b[1;36m0.10\u001b[0m \u001b[1;36m1000\u001b[0m\n", - "\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stderr", + "name": "stdout", "output_type": "stream", "text": [ - "/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/altair/utils/core.py:317: FutureWarning: iteritems is deprecated and will be removed in a future version. Use .items instead.\n", - " for col_name, dtype in df.dtypes.iteritems():\n" + " precision recall f1-score support\n", + "\n", + " 0 0.13 0.14 0.13 92\n", + " 1 0.08 0.09 0.08 101\n", + " 2 0.13 0.12 0.13 107\n", + " 3 0.06 0.06 0.06 105\n", + " 4 0.12 0.11 0.11 94\n", + " 5 0.12 0.09 0.10 115\n", + " 6 0.08 0.10 0.09 88\n", + " 7 0.08 0.07 0.08 113\n", + " 8 0.09 0.09 0.09 99\n", + " 9 0.12 0.15 0.13 86\n", + "\n", + " accuracy 0.10 1000\n", + " macro avg 0.10 0.10 0.10 1000\n", + "weighted avg 0.10 0.10 0.10 1000\n", + "\n" ] }, { "data": { "text/html": [ "\n", - "\n", + "\n", + "\n", "" ], "text/plain": [ @@ -515,63 +513,50 @@ "metadata": {}, "outputs": [ { - "data": { - "text/html": [ - "
precision recall f1-score support\n", - "\n", - " a 0.25 0.29 0.27 229\n", - " b 0.28 0.29 0.28 256\n", - " c 0.27 0.24 0.26 267\n", - " d 0.26 0.25 0.25 248\n", - "\n", - " accuracy 0.27 1000\n", - " macro avg 0.26 0.27 0.26 1000\n", - "weighted avg 0.27 0.27 0.26 1000\n", - "\n", - "\n" - ], - "text/plain": [ - " precision recall f1-score support\n", - "\n", - " a \u001b[1;36m0.25\u001b[0m \u001b[1;36m0.29\u001b[0m \u001b[1;36m0.27\u001b[0m \u001b[1;36m229\u001b[0m\n", - " b \u001b[1;36m0.28\u001b[0m \u001b[1;36m0.29\u001b[0m \u001b[1;36m0.28\u001b[0m \u001b[1;36m256\u001b[0m\n", - " c \u001b[1;36m0.27\u001b[0m \u001b[1;36m0.24\u001b[0m \u001b[1;36m0.26\u001b[0m \u001b[1;36m267\u001b[0m\n", - " d \u001b[1;36m0.26\u001b[0m \u001b[1;36m0.25\u001b[0m \u001b[1;36m0.25\u001b[0m \u001b[1;36m248\u001b[0m\n", - "\n", - " accuracy \u001b[1;36m0.27\u001b[0m \u001b[1;36m1000\u001b[0m\n", - " macro avg \u001b[1;36m0.26\u001b[0m \u001b[1;36m0.27\u001b[0m \u001b[1;36m0.26\u001b[0m \u001b[1;36m1000\u001b[0m\n", - "weighted avg \u001b[1;36m0.27\u001b[0m \u001b[1;36m0.27\u001b[0m \u001b[1;36m0.26\u001b[0m \u001b[1;36m1000\u001b[0m\n", - "\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stderr", + "name": "stdout", "output_type": "stream", "text": [ - "/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/altair/utils/core.py:317: FutureWarning: iteritems is deprecated and will be removed in a future version. Use .items instead.\n", - " for col_name, dtype in df.dtypes.iteritems():\n" + " precision recall f1-score support\n", + "\n", + " a 0.25 0.29 0.27 229\n", + " b 0.28 0.29 0.28 256\n", + " c 0.27 0.24 0.26 267\n", + " d 0.26 0.25 0.25 248\n", + "\n", + " accuracy 0.27 1000\n", + " macro avg 0.26 0.27 0.26 1000\n", + "weighted avg 0.27 0.27 0.26 1000\n", + "\n" ] }, { "data": { "text/html": [ "\n", - "\n", + "\n", + "\n", "" ], "text/plain": [ @@ -635,25 +620,81 @@ }, { "cell_type": "code", - "execution_count": 40, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "# | export\n", - "def spider(df, *, id_column, title=None, max_values=None, padding=1.25):\n", + "def spider(\n", + " df,\n", + " *,\n", + " id_column=None,\n", + " title=None,\n", + " max_values=None,\n", + " padding=1.25,\n", + " global_scale=False,\n", + " ax=None,\n", + " sz=10,\n", + "):\n", + " \"\"\"\n", + " Plot a spider chart based on the given dataframe.\n", + "\n", + " Parameters:\n", + " - df: pandas DataFrame\n", + " The input dataframe containing the data to be plotted.\n", + " - id_column: str, optional\n", + " The column name to be used as the identifier for each data point. If not provided, the index of the dataframe will be used.\n", + " - title: str, optional\n", + " The title of the spider chart.\n", + " - max_values: dict, optional\n", + " A dictionary specifying the maximum values for each category. If not provided, the maximum values will be calculated based on the data.\n", + " - padding: float, optional\n", + " The padding factor to be applied when calculating the maximum values. Default is 1.25.\n", + " - global_scale: bool or float, optional\n", + " If False, each category will have its own maximum value. If True, a single maximum value will be used for all categories. If a float value is provided, it will be used as the maximum value for all categories.\n", + " - ax: matplotlib Axes, optional\n", + " The axes on which to plot the spider chart. If not provided, a new figure and axes will be created.\n", + " - sz: float, optional\n", + " The size of the figure (both width and height) in inches. Default is 10.\n", + "\n", + " Returns:\n", + " - None\n", + "\n", + " Example usage:\n", + " spider(df, id_column='model', title='Spider Chart', max_values={'category1': 10, 'category2': 20}, padding=1.5)\n", + " \"\"\"\n", + " if id_column is None:\n", + " df = df.copy().reset_index(names=\"index\")\n", + " id_column = \"index\"\n", + " df = df.sort_values(id_column, ascending=True)\n", + " df = df[sorted(df.columns)]\n", " categories = df.dtypes[(df.dtypes == \"float\") | (df.dtypes == \"int\")].index.tolist()\n", " data = df[categories].to_dict(orient=\"list\")\n", - " ids = df[id_column].tolist()\n", + " ids = sorted(df[id_column].tolist())\n", " if max_values is None:\n", - " max_values = {key: padding * max(value) for key, value in data.items()}\n", + " if not global_scale:\n", + " max_values = {key: padding * max(value) for key, value in data.items()}\n", + " else:\n", + " if isinstance(global_scale, bool):\n", + " max_value = np.array(list(data.values())).max()\n", + " elif isinstance(global_scale, (int, float)):\n", + " max_value = global_scale\n", + " padding = 1.0\n", + " max_values = {key: padding * max_value for key, _ in data.items()}\n", " normalized_data = {\n", " key: np.array(value) / max_values[key] for key, value in data.items()\n", " }\n", + "\n", " num_vars = len(data.keys())\n", " tiks = list(data.keys())\n", " tiks += tiks[:1]\n", " angles = np.linspace(0, 2 * np.pi, num_vars, endpoint=False).tolist() + [0]\n", - " fig, ax = plt.subplots(figsize=(8, 8), subplot_kw=dict(polar=True))\n", + "\n", + " if ax is None:\n", + " fig, ax = plt.subplots(figsize=(sz, sz), subplot_kw=dict(polar=True))\n", + " _show = True\n", + " else:\n", + " _show = False\n", " for i, model_name in enumerate(ids):\n", " values = [normalized_data[key][i] for key in data.keys()]\n", " actual_values = [data[key][i] for key in data.keys()]\n", @@ -671,7 +712,8 @@ " ax.legend(loc=\"upper right\", bbox_to_anchor=(0.1, 0.1))\n", " if title is not None:\n", " plt.suptitle(title)\n", - " plt.show()\n", + " if _show:\n", + " plt.show()\n", "\n", "\n", "radar = spider" @@ -679,7 +721,7 @@ }, { "cell_type": "code", - "execution_count": 41, + "execution_count": 9, "metadata": {}, "outputs": [ { @@ -705,14 +747,556 @@ " \"c3\": [1e5, 2e5, 3.5e5, 8e4, 5e4],\n", " \"c4\": [9, 12, 5, 2, 0.2],\n", " \"test\": [1, 1, 1, 1, 5],\n", - " }\n", + " },\n", + " index=[*\"abcde\"],\n", " ),\n", - " id_column=\"x\",\n", " title=\"Sample Spider\",\n", " padding=1.1,\n", ")" ] }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "# | export\n", + "# Top-level altair configuration\n", + "def upsetaltair_top_level_configuration(\n", + " base, legend_orient=\"top-left\", legend_symbol_size=30\n", + "):\n", + " \"\"\"\n", + " Configure the top-level settings for an UpSet plot in Altair.\n", + "\n", + " Parameters:\n", + " - base: The base chart to configure.\n", + " - legend_orient: The orientation of the legend. Default is \"top-left\".\n", + " - legend_symbol_size: The size of the legend symbols. Default is 30.\n", + "\n", + " Returns:\n", + " - The configured chart.\n", + "\n", + " \"\"\"\n", + " return (\n", + " base.configure_view(stroke=None)\n", + " .configure_title(\n", + " fontSize=18, fontWeight=400, anchor=\"start\", subtitlePadding=10\n", + " )\n", + " .configure_axis(\n", + " labelFontSize=14,\n", + " labelFontWeight=300,\n", + " titleFontSize=16,\n", + " titleFontWeight=400,\n", + " titlePadding=10,\n", + " )\n", + " .configure_legend(\n", + " titleFontSize=16,\n", + " titleFontWeight=400,\n", + " labelFontSize=14,\n", + " labelFontWeight=300,\n", + " padding=20,\n", + " orient=legend_orient,\n", + " symbolType=\"circle\",\n", + " symbolSize=legend_symbol_size,\n", + " )\n", + " .configure_concat(spacing=0)\n", + " )\n", + "\n", + "\n", + "def UpSetAltair(\n", + " data=None,\n", + " title=\"\",\n", + " subtitle=\"\",\n", + " sets=None,\n", + " abbre=None,\n", + " sort_by=\"frequency\",\n", + " sort_order=\"ascending\",\n", + " width=1200,\n", + " height=700,\n", + " height_ratio=0.6,\n", + " horizontal_bar_chart_width=300,\n", + " color_range=[\"#55A8DB\", \"#3070B5\", \"#30363F\", \"#F1AD60\", \"#DF6234\", \"#BDC6CA\"],\n", + " highlight_color=\"#EA4667\",\n", + " glyph_size=200,\n", + " set_label_bg_size=1000,\n", + " line_connection_size=2,\n", + " horizontal_bar_size=20,\n", + " vertical_bar_label_size=16,\n", + " vertical_bar_padding=20,\n", + "):\n", + " \"\"\"This function generates Altair-based interactive UpSet plots.\n", + "\n", + " Parameters:\n", + " - data (pandas.DataFrame): Tabular data containing the membership of each element (row) in\n", + " exclusive intersecting sets (column).\n", + " - sets (list): List of set names of interest to show in the UpSet plots.\n", + " This list reflects the order of sets to be shown in the plots as well.\n", + " - abbre (list): Abbreviated set names.\n", + " - sort_by (str): \"frequency\" or \"degree\"\n", + " - sort_order (str): \"ascending\" or \"descending\"\n", + " - width (int): Vertical size of the UpSet plot.\n", + " - height (int): Horizontal size of the UpSet plot.\n", + " - height_ratio (float): Ratio of height between upper and under views, ranges from 0 to 1.\n", + " - horizontal_bar_chart_width (int): Width of horizontal bar chart on the bottom-right.\n", + " - color_range (list): Color to encode sets.\n", + " - highlight_color (str): Color to encode intersecting sets upon mouse hover.\n", + " - glyph_size (int): Size of UpSet glyph (โฌค).\n", + " - set_label_bg_size (int): Size of label background in the horizontal bar chart.\n", + " - line_connection_size (int): width of lines in matrix view.\n", + " - horizontal_bar_size (int): Height of bars in the horizontal bar chart.\n", + " - vertical_bar_label_size (int): Font size of texts in the vertical bar chart on the top.\n", + " - vertical_bar_padding (int): Gap between a pair of bars in the vertical bar charts.\n", + " \"\"\"\n", + "\n", + " if (data is None) or (sets is None):\n", + " print(\"No data and/or a list of sets are provided\")\n", + " return\n", + " if (height_ratio < 0) or (1 < height_ratio):\n", + " print(\"height_ratio set to 0.5\")\n", + " height_ratio = 0.5\n", + " if len(sets) != len(abbre):\n", + " abbre = None\n", + " print(\n", + " \"Dropping the `abbre` list because the lengths of `sets` and `abbre` are not identical.\"\n", + " )\n", + "\n", + " \"\"\"\n", + " Data Preprocessing\n", + " \"\"\"\n", + " data[\"count\"] = 0\n", + " data = data[sets + [\"count\"]]\n", + " data = data.groupby(sets).count().reset_index()\n", + "\n", + " data[\"intersection_id\"] = data.index\n", + " data[\"degree\"] = data[sets].sum(axis=1)\n", + " data = data.sort_values(\n", + " by=[\"count\"], ascending=True if sort_order == \"ascending\" else False\n", + " )\n", + "\n", + " data = pd.melt(data, id_vars=[\"intersection_id\", \"count\", \"degree\"])\n", + " data = data.rename(columns={\"variable\": \"set\", \"value\": \"is_intersect\"})\n", + "\n", + " if abbre == None:\n", + " abbre = sets\n", + "\n", + " set_to_abbre = pd.DataFrame(\n", + " [[sets[i], abbre[i]] for i in range(len(sets))], columns=[\"set\", \"set_abbre\"]\n", + " )\n", + " set_to_order = pd.DataFrame(\n", + " [[sets[i], 1 + sets.index(sets[i])] for i in range(len(sets))],\n", + " columns=[\"set\", \"set_order\"],\n", + " )\n", + "\n", + " degree_calculation = \"\"\n", + " for s in sets:\n", + " degree_calculation += f\"(isDefined(datum['{s}']) ? datum['{s}'] : 0)\"\n", + " if sets[-1] != s:\n", + " degree_calculation += \"+\"\n", + "\n", + " \"\"\"\n", + " Selections\n", + " \"\"\"\n", + " legend_selection = alt.selection_multi(fields=[\"set\"], bind=\"legend\")\n", + " color_selection = alt.selection_single(fields=[\"intersection_id\"], on=\"mouseover\")\n", + " opacity_selection = alt.selection_single(fields=[\"intersection_id\"])\n", + "\n", + " \"\"\"\n", + " Styles\n", + " \"\"\"\n", + " vertical_bar_chart_height = height * height_ratio\n", + " matrix_height = height - vertical_bar_chart_height\n", + " matrix_width = width - horizontal_bar_chart_width\n", + "\n", + " vertical_bar_size = min(\n", + " 30,\n", + " width / len(data[\"intersection_id\"].unique().tolist()) - vertical_bar_padding,\n", + " )\n", + "\n", + " main_color = \"#3A3A3A\"\n", + " brush_opacity = alt.condition(~opacity_selection, alt.value(1), alt.value(0.6))\n", + " brush_color = alt.condition(\n", + " ~color_selection, alt.value(main_color), alt.value(highlight_color)\n", + " )\n", + "\n", + " is_show_horizontal_bar_label_bg = len(abbre[0]) <= 2\n", + " horizontal_bar_label_bg_color = (\n", + " \"white\" if is_show_horizontal_bar_label_bg else \"black\"\n", + " )\n", + "\n", + " x_sort = alt.Sort(\n", + " field=\"count\" if sort_by == \"frequency\" else \"degree\", order=sort_order\n", + " )\n", + " tooltip = [\n", + " alt.Tooltip(\"max(count):Q\", title=\"Cardinality\"),\n", + " alt.Tooltip(\"degree:Q\", title=\"Degree\"),\n", + " ]\n", + "\n", + " \"\"\"\n", + " Plots\n", + " \"\"\"\n", + " # To use native interactivity in Altair, we are using the data transformation functions\n", + " # supported in Altair.\n", + " base = (\n", + " alt.Chart(data)\n", + " .transform_filter(legend_selection)\n", + " .transform_pivot(\n", + " # Right before this operation, columns should be:\n", + " # `count`, `set`, `is_intersect`, (`intersection_id`, `degree`, `set_order`, `set_abbre`)\n", + " # where (fields with brackets) should be dropped and recalculated later.\n", + " \"set\",\n", + " op=\"max\",\n", + " groupby=[\"intersection_id\", \"count\"],\n", + " value=\"is_intersect\",\n", + " )\n", + " .transform_aggregate(\n", + " # count, set1, set2, ...\n", + " count=\"sum(count)\",\n", + " groupby=sets,\n", + " )\n", + " .transform_calculate(\n", + " # count, set1, set2, ...\n", + " degree=degree_calculation\n", + " )\n", + " .transform_filter(\n", + " # count, set1, set2, ..., degree\n", + " alt.datum[\"degree\"]\n", + " != 0\n", + " )\n", + " .transform_window(\n", + " # count, set1, set2, ..., degree\n", + " intersection_id=\"row_number()\",\n", + " frame=[None, None],\n", + " )\n", + " .transform_fold(\n", + " # count, set1, set2, ..., degree, intersection_id\n", + " sets,\n", + " as_=[\"set\", \"is_intersect\"],\n", + " )\n", + " .transform_lookup(\n", + " # count, set, is_intersect, degree, intersection_id\n", + " lookup=\"set\",\n", + " from_=alt.LookupData(set_to_abbre, \"set\", [\"set_abbre\"]),\n", + " )\n", + " .transform_lookup(\n", + " # count, set, is_intersect, degree, intersection_id, set_abbre\n", + " lookup=\"set\",\n", + " from_=alt.LookupData(set_to_order, \"set\", [\"set_order\"]),\n", + " )\n", + " .transform_filter(\n", + " # Make sure to remove the filtered sets.\n", + " legend_selection\n", + " )\n", + " .transform_window(\n", + " # count, set, is_intersect, degree, intersection_id, set_abbre\n", + " set_order=\"distinct(set)\",\n", + " frame=[None, 0],\n", + " sort=[{\"field\": \"set_order\"}],\n", + " )\n", + " )\n", + " # Now, we have data in the following format:\n", + " # count, set, is_intersect, degree, intersection_id, set_abbre\n", + "\n", + " # Cardinality by intersecting sets (vertical bar chart)\n", + " vertical_bar = (\n", + " base.mark_bar(color=main_color, size=vertical_bar_size)\n", + " .encode(\n", + " x=alt.X(\n", + " \"intersection_id:N\",\n", + " axis=alt.Axis(grid=False, labels=False, ticks=False, domain=True),\n", + " sort=x_sort,\n", + " title=None,\n", + " ),\n", + " y=alt.Y(\n", + " \"max(count):Q\",\n", + " axis=alt.Axis(grid=False, tickCount=3, orient=\"right\"),\n", + " title=\"Intersection Size\",\n", + " ),\n", + " color=brush_color,\n", + " tooltip=tooltip,\n", + " )\n", + " .properties(width=matrix_width, height=vertical_bar_chart_height)\n", + " )\n", + "\n", + " vertical_bar_text = vertical_bar.mark_text(\n", + " color=main_color, dy=-10, size=vertical_bar_label_size\n", + " ).encode(text=alt.Text(\"count:Q\", format=\".0f\"))\n", + "\n", + " vertical_bar_chart = (vertical_bar + vertical_bar_text).add_selection(\n", + " color_selection\n", + " )\n", + "\n", + " # UpSet glyph view (matrix view)\n", + " circle_bg = (\n", + " vertical_bar.mark_circle(size=glyph_size, opacity=1)\n", + " .encode(\n", + " x=alt.X(\n", + " \"intersection_id:N\",\n", + " axis=alt.Axis(grid=False, labels=False, ticks=False, domain=False),\n", + " sort=x_sort,\n", + " title=None,\n", + " ),\n", + " y=alt.Y(\n", + " \"set_order:N\",\n", + " axis=alt.Axis(grid=False, labels=False, ticks=False, domain=False),\n", + " title=None,\n", + " ),\n", + " color=alt.value(\"#E6E6E6\"),\n", + " )\n", + " .properties(height=matrix_height)\n", + " )\n", + "\n", + " rect_bg = (\n", + " circle_bg.mark_rect()\n", + " .transform_filter(alt.datum[\"set_order\"] % 2 == 1)\n", + " .encode(color=alt.value(\"#F7F7F7\"))\n", + " )\n", + "\n", + " circle = circle_bg.transform_filter(alt.datum[\"is_intersect\"] == 1).encode(\n", + " color=brush_color\n", + " )\n", + "\n", + " line_connection = (\n", + " vertical_bar.mark_bar(size=line_connection_size, color=main_color)\n", + " .transform_filter(alt.datum[\"is_intersect\"] == 1)\n", + " .encode(y=alt.Y(\"min(set_order):N\"), y2=alt.Y2(\"max(set_order):N\"))\n", + " )\n", + "\n", + " matrix_view = (\n", + " circle + rect_bg + circle_bg + line_connection + circle\n", + " ).add_selection(\n", + " # Duplicate `circle` is to properly show tooltips.\n", + " color_selection\n", + " )\n", + "\n", + " # Cardinality by sets (horizontal bar chart)\n", + " horizontal_bar_label_bg = base.mark_circle(size=set_label_bg_size).encode(\n", + " y=alt.Y(\n", + " \"set_order:N\",\n", + " axis=alt.Axis(grid=False, labels=False, ticks=False, domain=False),\n", + " title=None,\n", + " ),\n", + " color=alt.Color(\n", + " \"set:N\", scale=alt.Scale(domain=sets, range=color_range), title=None\n", + " ),\n", + " opacity=alt.value(1),\n", + " )\n", + " horizontal_bar_label = horizontal_bar_label_bg.mark_text(\n", + " align=(\"center\" if is_show_horizontal_bar_label_bg else \"center\")\n", + " ).encode(\n", + " text=alt.Text(\"set_abbre:N\"), color=alt.value(horizontal_bar_label_bg_color)\n", + " )\n", + " horizontal_bar_axis = (\n", + " (horizontal_bar_label_bg + horizontal_bar_label)\n", + " if is_show_horizontal_bar_label_bg\n", + " else horizontal_bar_label\n", + " )\n", + "\n", + " horizontal_bar = (\n", + " horizontal_bar_label_bg.mark_bar(size=horizontal_bar_size)\n", + " .transform_filter(alt.datum[\"is_intersect\"] == 1)\n", + " .encode(\n", + " x=alt.X(\n", + " \"sum(count):Q\", axis=alt.Axis(grid=False, tickCount=3), title=\"Set Size\"\n", + " )\n", + " )\n", + " .properties(width=horizontal_bar_chart_width)\n", + " )\n", + "\n", + " # Concat Plots\n", + " upsetaltair = alt.vconcat(\n", + " vertical_bar_chart,\n", + " alt.hconcat(\n", + " matrix_view,\n", + " horizontal_bar_axis,\n", + " horizontal_bar, # horizontal bar chart\n", + " spacing=5,\n", + " ).resolve_scale(y=\"shared\"),\n", + " spacing=20,\n", + " ).add_selection(legend_selection)\n", + "\n", + " # Apply top-level configuration\n", + " upsetaltair = upsetaltair_top_level_configuration(\n", + " upsetaltair, legend_orient=\"top\", legend_symbol_size=set_label_bg_size / 2.0\n", + " ).properties(\n", + " title={\n", + " \"text\": title,\n", + " \"subtitle\": subtitle,\n", + " \"fontSize\": 20,\n", + " \"fontWeight\": 500,\n", + " \"subtitleColor\": main_color,\n", + " \"subtitleFontSize\": 14,\n", + " }\n", + " )\n", + "\n", + " return upsetaltair" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + " | truth | \n", + "pred | \n", + "
---|---|---|
0 | \n", + "c | \n", + "d | \n", + "
1 | \n", + "c | \n", + "c | \n", + "
2 | \n", + "d | \n", + "d | \n", + "
3 | \n", + "c | \n", + "a | \n", + "
4 | \n", + "d | \n", + "c | \n", + "
... | \n", + "... | \n", + "... | \n", + "
995 | \n", + "c | \n", + "c | \n", + "
996 | \n", + "a | \n", + "c | \n", + "
997 | \n", + "b | \n", + "a | \n", + "
998 | \n", + "b | \n", + "c | \n", + "
999 | \n", + "a | \n", + "d | \n", + "
1000 rows ร 2 columns
\n", + "<class '__main__.AttrDict'>\n", - "\n" - ], - "text/plain": [ - "\u001b[1m<\u001b[0m\u001b[1;95mclass\u001b[0m\u001b[39m \u001b[0m\u001b[32m'__main__.AttrDict'\u001b[0m\u001b[1m>\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
<class 'dict'>\n", - "\n" - ], - "text/plain": [ - "\u001b[1m<\u001b[0m\u001b[1;95mclass\u001b[0m\u001b[39m \u001b[0m\u001b[32m'dict'\u001b[0m\u001b[1m>\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
{\n", - " \"a\": 1,\n", - " \"b\": [\n", - " {\n", - " \"c\": 2,\n", - " \"d\": 4\n", - " },\n", - " {\n", - " \"e\": 3\n", - " }\n", - " ]\n", - "}\n", - "\n" - ], - "text/plain": [ - "\u001b[1m{\u001b[0m\n", - " \u001b[32m\"a\"\u001b[0m: \u001b[1;36m1\u001b[0m,\n", - " \u001b[32m\"b\"\u001b[0m: \u001b[1m[\u001b[0m\n", - " \u001b[1m{\u001b[0m\n", - " \u001b[32m\"c\"\u001b[0m: \u001b[1;36m2\u001b[0m,\n", - " \u001b[32m\"d\"\u001b[0m: \u001b[1;36m4\u001b[0m\n", - " \u001b[1m}\u001b[0m,\n", - " \u001b[1m{\u001b[0m\n", - " \u001b[32m\"e\"\u001b[0m: \u001b[1;36m3\u001b[0m\n", - " \u001b[1m}\u001b[0m\n", - " \u001b[1m]\u001b[0m\n", - "\u001b[1m}\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" + "name": "stdout", + "output_type": "stream", + "text": [ + "
{\n", - " \"abc\": {\n", - " \"b\": 10\n", - " },\n", - " \"d\": {\n", - " \"e\": {\n", - " \"f\": [\n", - " 2,\n", - " {\n", - " \"abc\": \"pqrs\"\n", - " },\n", - " 2.234\n", - " ],\n", - " \"g\": 11\n", - " }\n", - " }\n", - "}\n", - "\n" - ], - "text/plain": [ - "\u001b[1m{\u001b[0m\n", - " \u001b[32m\"abc\"\u001b[0m: \u001b[1m{\u001b[0m\n", - " \u001b[32m\"b\"\u001b[0m: \u001b[1;36m10\u001b[0m\n", - " \u001b[1m}\u001b[0m,\n", - " \u001b[32m\"d\"\u001b[0m: \u001b[1m{\u001b[0m\n", - " \u001b[32m\"e\"\u001b[0m: \u001b[1m{\u001b[0m\n", - " \u001b[32m\"f\"\u001b[0m: \u001b[1m[\u001b[0m\n", - " \u001b[1;36m2\u001b[0m,\n", - " \u001b[1m{\u001b[0m\n", - " \u001b[32m\"abc\"\u001b[0m: \u001b[32m\"pqrs\"\u001b[0m\n", - " \u001b[1m}\u001b[0m,\n", - " \u001b[1;36m2.234\u001b[0m\n", - " \u001b[1m]\u001b[0m,\n", - " \u001b[32m\"g\"\u001b[0m: \u001b[1;36m11\u001b[0m\n", - " \u001b[1m}\u001b[0m\n", - " \u001b[1m}\u001b[0m\n", - "\u001b[1m}\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
True\n",
- "
\n"
- ],
- "text/plain": [
- "\u001b[3;92mTrue\u001b[0m\n"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "{\n",
+ " \"abc\": {\n",
+ " \"b\": 10\n",
+ " },\n",
+ " \"d\": {\n",
+ " \"e\": {\n",
+ " \"f\": [\n",
+ " 2,\n",
+ " {\n",
+ " \"abc\": \"pqrs\"\n",
+ " },\n",
+ " 2.234\n",
+ " ],\n",
+ " \"g\": 11\n",
+ " }\n",
+ " }\n",
+ "}\n",
+ "True\n"
+ ]
}
],
"source": [
@@ -720,7 +599,7 @@
},
{
"cell_type": "code",
- "execution_count": 22,
+ "execution_count": 8,
"id": "ebeef579",
"metadata": {},
"outputs": [],
@@ -764,48 +643,35 @@
},
{
"cell_type": "code",
- "execution_count": 24,
+ "execution_count": 9,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
- "[12/08/23 11:23:15] INFO loaded 8 lines 2422220380.py::5\" target=\"_blank\"><module>:5\n", + "[05/07/24 13:41:06] INFO loaded 8 lines 1840608379.py::6\" target=\"_blank\"><module>:6\n", "\n" ], "text/plain": [ - "\u001b[2;36m[12/08/23 11:23:15]\u001b[0m\u001b[2;36m \u001b[0m\u001b[2;33mINFO \u001b[0m loaded \u001b[1;36m8\u001b[0m lines \u001b]8;id=350045;file:///var/folders/cp/1fbgq2n922j8ztdsq6551vldkr5sdy/T/ipykernel_10481/2422220380.py:5\u001b\\\u001b[2m2422220380.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=671896;file:///var/folders/cp/1fbgq2n922j8ztdsq6551vldkr5sdy/T/ipykernel_10481/2422220380.py:5#:5\u001b\\\u001b[2m :5\u001b[0m\u001b]8;;\u001b\\\n" + "\u001b[2;36m[05/07/24 13:41:06]\u001b[0m\u001b[2;36m \u001b[0m\u001b[2;33mINFO \u001b[0m loaded \u001b[1;36m8\u001b[0m lines \u001b]8;id=904087;file:///var/folders/cp/1fbgq2n922j8ztdsq6551vldkr5sdy/T/ipykernel_43425/1840608379.py:6\u001b\\\u001b[2m1840608379.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=61923;file:///var/folders/cp/1fbgq2n922j8ztdsq6551vldkr5sdy/T/ipykernel_43425/1840608379.py:6# :6\u001b\\\u001b[2m :6\u001b[0m\u001b]8;;\u001b\\\n" ] }, "metadata": {}, "output_type": "display_data" }, { - "data": { - "text/html": [ - " {\n", - "\"a\": [\n", - "1.0,\n", - "2.0,\n", - "3.0\n", - "],\n", - "\"b\": \"hello\"\n", - "}\n", - "\n" - ], - "text/plain": [ - "\u001b[1m{\u001b[0m\n", - "\u001b[32m\"a\"\u001b[0m: \u001b[1m[\u001b[0m\n", - "\u001b[1;36m1.0\u001b[0m,\n", - "\u001b[1;36m2.0\u001b[0m,\n", - "\u001b[1;36m3.0\u001b[0m\n", - "\u001b[1m]\u001b[0m,\n", - "\u001b[32m\"b\"\u001b[0m: \u001b[32m\"hello\"\u001b[0m\n", - "\u001b[1m}\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + "\"a\": [\n", + "1.0,\n", + "2.0,\n", + "3.0\n", + "],\n", + "\"b\": \"hello\"\n", + "}\n" + ] } ], "source": [ @@ -821,24 +687,18 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 10, "id": "2f9a8810-5782-4bcc-a34f-33f04c855ada", "metadata": { "tags": [] }, "outputs": [ { - "data": { - "text/html": [ - "/tmp/test.json\n", - "\n" - ], - "text/plain": [ - "\u001b[35m/tmp/\u001b[0m\u001b[95mtest.json\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" + "name": "stdout", + "output_type": "stream", + "text": [ + "/tmp/test.json\n" + ] }, { "data": { @@ -846,7 +706,7 @@ "[1, {'1': 1, '2': 2}, 3]" ] }, - "execution_count": 9, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } @@ -864,7 +724,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 11, "id": "7dde0198", "metadata": {}, "outputs": [], @@ -886,7 +746,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 12, "id": "02f27598-c32e-46e3-a2ca-1d74914c300c", "metadata": {}, "outputs": [], @@ -907,7 +767,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 13, "id": "2befdc24", "metadata": {}, "outputs": [], @@ -932,7 +792,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 14, "id": "6b8f7bb4-beeb-4d81-95df-c7534b5327b9", "metadata": {}, "outputs": [], @@ -944,7 +804,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 15, "metadata": {}, "outputs": [ { @@ -953,7 +813,7 @@ "{'abc': {'b': 10, 'c': 11}, 'd': {'e': {'f': [2, {'abc': 'pqrs'}, 2.234]}}}" ] }, - "execution_count": 17, + "execution_count": 15, "metadata": {}, "output_type": "execute_result" } @@ -968,29 +828,17 @@ "metadata": {}, "outputs": [ { - "data": { - "text/html": [ - "abc\n", - " b - int\n", - " c - int\n", - "d\n", - " e\n", - " f - L\n", - "\n", - "\n" - ], - "text/plain": [ - "abc\n", - " b - int\n", - " c - int\n", - "d\n", - " e\n", - " f - L\n", - "\n" - ] - }, - "metadata": {}, - "output_type": "display_data" + "name": "stdout", + "output_type": "stream", + "text": [ + "abc\n", + "\tb - int\n", + "\tc - int\n", + "d\n", + "\te\n", + "\t\tf - L\n", + "\n" + ] } ], "source": [ diff --git a/nbs/paths.ipynb b/nbs/paths.ipynb index ad3b614..c2a0f20 100644 --- a/nbs/paths.ipynb +++ b/nbs/paths.ipynb @@ -919,20 +919,20 @@ "print_folder_summary = lambda x: print(folder_summary(x))\n", "\n", "\n", - "def tree(folder_path, *additional_flags):\n", - " import subprocess\n", + "def tree(directory):\n", " from builtins import print\n", "\n", + " # Construct the shell command\n", + " shell_command = f\"tree \\\"{directory}\\\" --filelimit=20 | sed 's/โ/ /g; s/โ/ /g; s/|/ /g; s/`/ /g; s/-/โ/g; s/โ/ /g; s/+/ /g'\"\n", + " # Execute the shell command\n", " try:\n", - " # Construct the command by combining \"tree\" with the folder path and additional flags\n", - " command = [\"tree\", folder_path]\n", - " command.extend(additional_flags)\n", - "\n", - " # Run the command and capture the output\n", - " result = subprocess.check_output(command, universal_newlines=True)\n", - " return print(result)\n", + " result = subprocess.run(\n", + " shell_command, shell=True, capture_output=True, text=True\n", + " )\n", + " # Print the output\n", + " print(result.stdout)\n", " except subprocess.CalledProcessError as e:\n", - " return f\"Error: {e}\"" + " print(f\"Error executing command: {e}\")" ] }, { diff --git a/scripts.ipynb b/scripts.ipynb index 0fcc071..3177efd 100644 --- a/scripts.ipynb +++ b/scripts.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 3, + "execution_count": 1, "metadata": {}, "outputs": [ { @@ -10,14 +10,13 @@ "output_type": "stream", "text": [ "\u001b[1mreformatted torch_snippets/load_defaults.py\u001b[0m\n", - "\u001b[1mreformatted torch_snippets/decorators.py\u001b[0m\n", "\u001b[1mreformatted torch_snippets/bokeh_loader.py\u001b[0m\n", - "\u001b[1mreformatted torch_snippets/inspector.py\u001b[0m\n", + "\u001b[1mreformatted torch_snippets/decorators.py\u001b[0m\n", "\u001b[1mreformatted torch_snippets/misc.py\u001b[0m\n", - "\u001b[1mreformatted torch_snippets/charts.py\u001b[0m\n", + "\u001b[1mreformatted torch_snippets/inspector.py\u001b[0m\n", "\u001b[1mreformatted torch_snippets/registry.py\u001b[0m\n", - "\u001b[1mreformatted torch_snippets/ipython.py\u001b[0m\n", "\u001b[1mreformatted torch_snippets/pdf_loader.py\u001b[0m\n", + "\u001b[1mreformatted torch_snippets/ipython.py\u001b[0m\n", "\u001b[1mreformatted torch_snippets/imgaug_loader.py\u001b[0m\n", "\u001b[1mreformatted torch_snippets/trainer/config.py\u001b[0m\n", "\u001b[1mreformatted torch_snippets/logger.py\u001b[0m\n", @@ -25,132 +24,134 @@ "\u001b[1mreformatted torch_snippets/interactive_show.py\u001b[0m\n", "\u001b[1mreformatted torch_snippets/sklegos.py\u001b[0m\n", "\u001b[1mreformatted torch_snippets/trainer/capsule.py\u001b[0m\n", - "\u001b[1mreformatted torch_snippets/markup.py\u001b[0m\n", "\u001b[1mreformatted torch_snippets/paths.py\u001b[0m\n", + "\u001b[1mreformatted torch_snippets/markup.py\u001b[0m\n", + "\u001b[1mreformatted torch_snippets/charts.py\u001b[0m\n", "\u001b[1mreformatted torch_snippets/bb_utils.py\u001b[0m\n", "\u001b[1mreformatted torch_snippets/_modidx.py\u001b[0m\n", "\n", "\u001b[1mAll done! โจ ๐ฐ โจ\u001b[0m\n", - "\u001b[34m\u001b[1m20 files \u001b[0m\u001b[1mreformatted\u001b[0m, \u001b[34m39 files \u001b[0mleft unchanged.\n", + "\u001b[34m\u001b[1m20 files \u001b[0m\u001b[1mreformatted\u001b[0m, \u001b[34m40 files \u001b[0mleft unchanged.\n", "Obtaining file:///Users/yeshwanth.y/code/torch_snippets\n", " Preparing metadata (setup.py) ... \u001b[?25ldone\n", - "\u001b[?25hRequirement already satisfied: fastcore in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (1.5.27)\n", - "Requirement already satisfied: matplotlib in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (3.6.2)\n", - "Requirement already satisfied: Pillow in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (9.3.0)\n", - "Requirement already satisfied: altair in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (4.2.0)\n", - "Requirement already satisfied: dill in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (0.3.6)\n", - "Requirement already satisfied: ipython in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (8.7.0)\n", - "Requirement already satisfied: loguru in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (0.7.0)\n", - "Requirement already satisfied: numpy in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (1.23.5)\n", - "Requirement already satisfied: pandas in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (1.5.2)\n", - "Requirement already satisfied: tqdm in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (4.64.1)\n", - "Requirement already satisfied: rich in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (12.6.0)\n", - "Requirement already satisfied: PyYAML in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (6.0)\n", - "Requirement already satisfied: catalogue in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (2.0.8)\n", - "Requirement already satisfied: confection in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (0.0.3)\n", - "Requirement already satisfied: pydantic in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (1.10.2)\n", - "Requirement already satisfied: typing in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (3.7.4.3)\n", - "Requirement already satisfied: srsly in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (2.4.5)\n", - "Requirement already satisfied: typing_extensions in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (4.9.0)\n", - "Requirement already satisfied: wasabi in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (1.1.0)\n", - "Requirement already satisfied: jsonlines in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (3.1.0)\n", - "Requirement already satisfied: imgaug>=0.4.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (0.4.0)\n", - "Requirement already satisfied: xmltodict in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (0.13.0)\n", - "Requirement already satisfied: fuzzywuzzy in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (0.18.0)\n", - "Requirement already satisfied: scikit-learn in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (1.2.0)\n", - "Requirement already satisfied: nltk in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (3.8)\n", - "Requirement already satisfied: python-Levenshtein in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (0.20.8)\n", - "Requirement already satisfied: pre-commit in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (3.0.4)\n", - "Requirement already satisfied: pymupdf in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (1.23.25)\n", - "Requirement already satisfied: nbconvert in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (6.5.4)\n", - "Requirement already satisfied: nbformat in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (5.7.0)\n", - "Requirement already satisfied: icecream in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.530) (2.1.3)\n", - "Requirement already satisfied: opencv-python in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from imgaug>=0.4.0->torch-snippets==0.530) (4.6.0.66)\n", - "Requirement already satisfied: imageio in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from imgaug>=0.4.0->torch-snippets==0.530) (2.22.4)\n", - "Requirement already satisfied: scipy in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from imgaug>=0.4.0->torch-snippets==0.530) (1.9.3)\n", - "Requirement already satisfied: Shapely in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from imgaug>=0.4.0->torch-snippets==0.530) (2.0.0)\n", - "Requirement already satisfied: scikit-image>=0.14.2 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from imgaug>=0.4.0->torch-snippets==0.530) (0.19.3)\n", - "Requirement already satisfied: six in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from imgaug>=0.4.0->torch-snippets==0.530) (1.16.0)\n", - "Requirement already satisfied: entrypoints in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from altair->torch-snippets==0.530) (0.4)\n", - "Requirement already satisfied: toolz in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from altair->torch-snippets==0.530) (0.12.0)\n", - "Requirement already satisfied: jinja2 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from altair->torch-snippets==0.530) (3.1.2)\n", - "Requirement already satisfied: jsonschema>=3.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from altair->torch-snippets==0.530) (4.16.0)\n", - "Requirement already satisfied: python-dateutil>=2.8.1 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from pandas->torch-snippets==0.530) (2.8.2)\n", - "Requirement already satisfied: pytz>=2020.1 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from pandas->torch-snippets==0.530) (2022.1)\n", - "Requirement already satisfied: pip in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from fastcore->torch-snippets==0.530) (22.3.1)\n", - "Requirement already satisfied: packaging in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from fastcore->torch-snippets==0.530) (21.3)\n", - "Requirement already satisfied: colorama>=0.3.9 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from icecream->torch-snippets==0.530) (0.4.6)\n", - "Requirement already satisfied: asttokens>=2.0.1 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from icecream->torch-snippets==0.530) (2.0.5)\n", - "Requirement already satisfied: executing>=0.3.1 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from icecream->torch-snippets==0.530) (0.8.3)\n", - "Requirement already satisfied: pygments>=2.2.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from icecream->torch-snippets==0.530) (2.11.2)\n", - "Requirement already satisfied: matplotlib-inline in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from ipython->torch-snippets==0.530) (0.1.6)\n", - "Requirement already satisfied: traitlets>=5 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from ipython->torch-snippets==0.530) (5.1.1)\n", - "Requirement already satisfied: appnope in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from ipython->torch-snippets==0.530) (0.1.2)\n", - "Requirement already satisfied: stack-data in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from ipython->torch-snippets==0.530) (0.2.0)\n", - "Requirement already satisfied: pexpect>4.3 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from ipython->torch-snippets==0.530) (4.8.0)\n", - "Requirement already satisfied: decorator in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from ipython->torch-snippets==0.530) (5.1.1)\n", - "Requirement already satisfied: jedi>=0.16 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from ipython->torch-snippets==0.530) (0.18.1)\n", - "Requirement already satisfied: backcall in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from ipython->torch-snippets==0.530) (0.2.0)\n", - "Requirement already satisfied: prompt-toolkit<3.1.0,>=3.0.11 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from ipython->torch-snippets==0.530) (3.0.20)\n", - "Requirement already satisfied: pickleshare in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from ipython->torch-snippets==0.530) (0.7.5)\n", - "Requirement already satisfied: attrs>=19.2.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from jsonlines->torch-snippets==0.530) (22.1.0)\n", - "Requirement already satisfied: fonttools>=4.22.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from matplotlib->torch-snippets==0.530) (4.38.0)\n", - "Requirement already satisfied: cycler>=0.10 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from matplotlib->torch-snippets==0.530) (0.11.0)\n", - "Requirement already satisfied: pyparsing>=2.2.1 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from matplotlib->torch-snippets==0.530) (3.0.9)\n", - "Requirement already satisfied: contourpy>=1.0.1 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from matplotlib->torch-snippets==0.530) (1.0.6)\n", - "Requirement already satisfied: kiwisolver>=1.0.1 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from matplotlib->torch-snippets==0.530) (1.4.4)\n", - "Requirement already satisfied: jupyterlab-pygments in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbconvert->torch-snippets==0.530) (0.1.2)\n", - "Requirement already satisfied: defusedxml in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbconvert->torch-snippets==0.530) (0.7.1)\n", - "Requirement already satisfied: nbclient>=0.5.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbconvert->torch-snippets==0.530) (0.5.13)\n", - "Requirement already satisfied: jupyter-core>=4.7 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbconvert->torch-snippets==0.530) (4.11.2)\n", - "Requirement already satisfied: mistune<2,>=0.8.1 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbconvert->torch-snippets==0.530) (0.8.4)\n", - "Requirement already satisfied: bleach in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbconvert->torch-snippets==0.530) (4.1.0)\n", - "Requirement already satisfied: beautifulsoup4 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbconvert->torch-snippets==0.530) (4.11.1)\n", - "Requirement already satisfied: MarkupSafe>=2.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbconvert->torch-snippets==0.530) (2.1.1)\n", - "Requirement already satisfied: pandocfilters>=1.4.1 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbconvert->torch-snippets==0.530) (1.5.0)\n", - "Requirement already satisfied: tinycss2 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbconvert->torch-snippets==0.530) (1.2.1)\n", - "Requirement already satisfied: lxml in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbconvert->torch-snippets==0.530) (4.9.1)\n", - "Requirement already satisfied: fastjsonschema in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbformat->torch-snippets==0.530) (2.16.2)\n", - "Requirement already satisfied: click in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nltk->torch-snippets==0.530) (8.1.3)\n", - "Requirement already satisfied: joblib in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nltk->torch-snippets==0.530) (1.2.0)\n", - "Requirement already satisfied: regex>=2021.8.3 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nltk->torch-snippets==0.530) (2022.10.31)\n", - "Requirement already satisfied: identify>=1.0.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from pre-commit->torch-snippets==0.530) (2.5.17)\n", - "Requirement already satisfied: virtualenv>=20.10.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from pre-commit->torch-snippets==0.530) (20.19.0)\n", - "Requirement already satisfied: nodeenv>=0.11.1 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from pre-commit->torch-snippets==0.530) (1.7.0)\n", - "Requirement already satisfied: cfgv>=2.0.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from pre-commit->torch-snippets==0.530) (3.3.1)\n", - "Requirement already satisfied: PyMuPDFb==1.23.22 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from pymupdf->torch-snippets==0.530) (1.23.22)\n", - "Requirement already satisfied: Levenshtein==0.20.8 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from python-Levenshtein->torch-snippets==0.530) (0.20.8)\n", - "Requirement already satisfied: rapidfuzz<3.0.0,>=2.3.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from Levenshtein==0.20.8->python-Levenshtein->torch-snippets==0.530) (2.13.6)\n", - "Requirement already satisfied: commonmark<0.10.0,>=0.9.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from rich->torch-snippets==0.530) (0.9.1)\n", - "Requirement already satisfied: threadpoolctl>=2.0.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from scikit-learn->torch-snippets==0.530) (3.1.0)\n", - "Requirement already satisfied: parso<0.9.0,>=0.8.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from jedi>=0.16->ipython->torch-snippets==0.530) (0.8.3)\n", - "Requirement already satisfied: pyrsistent!=0.17.0,!=0.17.1,!=0.17.2,>=0.14.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from jsonschema>=3.0->altair->torch-snippets==0.530) (0.18.0)\n", - "Requirement already satisfied: importlib-resources>=1.4.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from jsonschema>=3.0->altair->torch-snippets==0.530) (5.2.0)\n", - "Requirement already satisfied: pkgutil-resolve-name>=1.3.10 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from jsonschema>=3.0->altair->torch-snippets==0.530) (1.3.10)\n", - "Requirement already satisfied: jupyter-client>=6.1.5 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbclient>=0.5.0->nbconvert->torch-snippets==0.530) (7.4.7)\n", - "Requirement already satisfied: nest-asyncio in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbclient>=0.5.0->nbconvert->torch-snippets==0.530) (1.5.5)\n", - "Requirement already satisfied: setuptools in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nodeenv>=0.11.1->pre-commit->torch-snippets==0.530) (65.5.0)\n", - "Requirement already satisfied: ptyprocess>=0.5 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from pexpect>4.3->ipython->torch-snippets==0.530) (0.7.0)\n", - "Requirement already satisfied: wcwidth in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from prompt-toolkit<3.1.0,>=3.0.11->ipython->torch-snippets==0.530) (0.2.5)\n", - "Requirement already satisfied: PyWavelets>=1.1.1 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from scikit-image>=0.14.2->imgaug>=0.4.0->torch-snippets==0.530) (1.4.1)\n", - "Requirement already satisfied: networkx>=2.2 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from scikit-image>=0.14.2->imgaug>=0.4.0->torch-snippets==0.530) (2.8.8)\n", - "Requirement already satisfied: tifffile>=2019.7.26 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from scikit-image>=0.14.2->imgaug>=0.4.0->torch-snippets==0.530) (2022.10.10)\n", - "Requirement already satisfied: filelock<4,>=3.4.1 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from virtualenv>=20.10.0->pre-commit->torch-snippets==0.530) (3.8.2)\n", - "Requirement already satisfied: platformdirs<4,>=2.4 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from virtualenv>=20.10.0->pre-commit->torch-snippets==0.530) (2.6.0)\n", - "Requirement already satisfied: distlib<1,>=0.3.6 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from virtualenv>=20.10.0->pre-commit->torch-snippets==0.530) (0.3.6)\n", - "Requirement already satisfied: soupsieve>1.2 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from beautifulsoup4->nbconvert->torch-snippets==0.530) (2.3.2.post1)\n", - "Requirement already satisfied: webencodings in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from bleach->nbconvert->torch-snippets==0.530) (0.5.1)\n", - "Requirement already satisfied: pure-eval in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from stack-data->ipython->torch-snippets==0.530) (0.2.2)\n", - "Requirement already satisfied: zipp>=3.1.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from importlib-resources>=1.4.0->jsonschema>=3.0->altair->torch-snippets==0.530) (3.8.0)\n", - "Requirement already satisfied: pyzmq>=23.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from jupyter-client>=6.1.5->nbclient>=0.5.0->nbconvert->torch-snippets==0.530) (23.2.0)\n", - "Requirement already satisfied: tornado>=6.2 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from jupyter-client>=6.1.5->nbclient>=0.5.0->nbconvert->torch-snippets==0.530) (6.2)\n", + "\u001b[?25hRequirement already satisfied: fastcore in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (1.5.27)\n", + "Requirement already satisfied: matplotlib in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (3.6.2)\n", + "Requirement already satisfied: Pillow in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (9.3.0)\n", + "Requirement already satisfied: altair in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (5.2.0)\n", + "Requirement already satisfied: dill in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (0.3.6)\n", + "Requirement already satisfied: ipython in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (8.7.0)\n", + "Requirement already satisfied: loguru in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (0.7.0)\n", + "Requirement already satisfied: numpy in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (1.23.5)\n", + "Requirement already satisfied: pandas in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (1.5.2)\n", + "Requirement already satisfied: tqdm in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (4.65.2)\n", + "Requirement already satisfied: rich in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (13.4.2)\n", + "Requirement already satisfied: PyYAML in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (6.0)\n", + "Requirement already satisfied: catalogue in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (2.0.8)\n", + "Requirement already satisfied: confection in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (0.0.3)\n", + "Requirement already satisfied: pydantic in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (1.10.2)\n", + "Requirement already satisfied: typing in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (3.7.4.3)\n", + "Requirement already satisfied: srsly in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (2.4.5)\n", + "Requirement already satisfied: typing_extensions in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (4.9.0)\n", + "Requirement already satisfied: wasabi in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (1.1.0)\n", + "Requirement already satisfied: jsonlines in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (3.1.0)\n", + "Requirement already satisfied: imgaug>=0.4.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (0.4.0)\n", + "Requirement already satisfied: xmltodict in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (0.13.0)\n", + "Requirement already satisfied: fuzzywuzzy in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (0.18.0)\n", + "Requirement already satisfied: scikit-learn in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (1.2.0)\n", + "Requirement already satisfied: nltk in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (3.8)\n", + "Requirement already satisfied: python-Levenshtein in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (0.20.8)\n", + "Requirement already satisfied: pre-commit in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (3.0.4)\n", + "Requirement already satisfied: pymupdf in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (1.23.25)\n", + "Requirement already satisfied: nbconvert in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (6.5.4)\n", + "Requirement already satisfied: nbformat in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (5.7.0)\n", + "Requirement already satisfied: icecream in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from torch-snippets==0.531) (2.1.3)\n", + "Requirement already satisfied: opencv-python in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from imgaug>=0.4.0->torch-snippets==0.531) (4.6.0.66)\n", + "Requirement already satisfied: scikit-image>=0.14.2 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from imgaug>=0.4.0->torch-snippets==0.531) (0.19.3)\n", + "Requirement already satisfied: Shapely in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from imgaug>=0.4.0->torch-snippets==0.531) (2.0.0)\n", + "Requirement already satisfied: scipy in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from imgaug>=0.4.0->torch-snippets==0.531) (1.9.3)\n", + "Requirement already satisfied: six in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from imgaug>=0.4.0->torch-snippets==0.531) (1.16.0)\n", + "Requirement already satisfied: imageio in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from imgaug>=0.4.0->torch-snippets==0.531) (2.22.4)\n", + "Requirement already satisfied: jinja2 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from altair->torch-snippets==0.531) (3.1.2)\n", + "Requirement already satisfied: packaging in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from altair->torch-snippets==0.531) (21.3)\n", + "Requirement already satisfied: toolz in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from altair->torch-snippets==0.531) (0.12.0)\n", + "Requirement already satisfied: jsonschema>=3.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from altair->torch-snippets==0.531) (4.16.0)\n", + "Requirement already satisfied: pytz>=2020.1 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from pandas->torch-snippets==0.531) (2023.4)\n", + "Requirement already satisfied: python-dateutil>=2.8.1 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from pandas->torch-snippets==0.531) (2.8.2)\n", + "Requirement already satisfied: pip in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from fastcore->torch-snippets==0.531) (22.3.1)\n", + "Requirement already satisfied: asttokens>=2.0.1 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from icecream->torch-snippets==0.531) (2.0.5)\n", + "Requirement already satisfied: colorama>=0.3.9 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from icecream->torch-snippets==0.531) (0.4.6)\n", + "Requirement already satisfied: executing>=0.3.1 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from icecream->torch-snippets==0.531) (0.8.3)\n", + "Requirement already satisfied: pygments>=2.2.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from icecream->torch-snippets==0.531) (2.17.2)\n", + "Requirement already satisfied: stack-data in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from ipython->torch-snippets==0.531) (0.2.0)\n", + "Requirement already satisfied: traitlets>=5 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from ipython->torch-snippets==0.531) (5.1.1)\n", + "Requirement already satisfied: prompt-toolkit<3.1.0,>=3.0.11 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from ipython->torch-snippets==0.531) (3.0.20)\n", + "Requirement already satisfied: backcall in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from ipython->torch-snippets==0.531) (0.2.0)\n", + "Requirement already satisfied: decorator in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from ipython->torch-snippets==0.531) (5.1.1)\n", + "Requirement already satisfied: jedi>=0.16 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from ipython->torch-snippets==0.531) (0.18.1)\n", + "Requirement already satisfied: pickleshare in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from ipython->torch-snippets==0.531) (0.7.5)\n", + "Requirement already satisfied: matplotlib-inline in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from ipython->torch-snippets==0.531) (0.1.6)\n", + "Requirement already satisfied: appnope in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from ipython->torch-snippets==0.531) (0.1.2)\n", + "Requirement already satisfied: pexpect>4.3 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from ipython->torch-snippets==0.531) (4.8.0)\n", + "Requirement already satisfied: attrs>=19.2.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from jsonlines->torch-snippets==0.531) (22.1.0)\n", + "Requirement already satisfied: contourpy>=1.0.1 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from matplotlib->torch-snippets==0.531) (1.0.6)\n", + "Requirement already satisfied: pyparsing>=2.2.1 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from matplotlib->torch-snippets==0.531) (3.0.9)\n", + "Requirement already satisfied: cycler>=0.10 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from matplotlib->torch-snippets==0.531) (0.11.0)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from matplotlib->torch-snippets==0.531) (1.4.4)\n", + "Requirement already satisfied: fonttools>=4.22.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from matplotlib->torch-snippets==0.531) (4.38.0)\n", + "Requirement already satisfied: pandocfilters>=1.4.1 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbconvert->torch-snippets==0.531) (1.5.0)\n", + "Requirement already satisfied: mistune<2,>=0.8.1 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbconvert->torch-snippets==0.531) (0.8.4)\n", + "Requirement already satisfied: defusedxml in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbconvert->torch-snippets==0.531) (0.7.1)\n", + "Requirement already satisfied: MarkupSafe>=2.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbconvert->torch-snippets==0.531) (2.1.1)\n", + "Requirement already satisfied: jupyterlab-pygments in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbconvert->torch-snippets==0.531) (0.1.2)\n", + "Requirement already satisfied: entrypoints>=0.2.2 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbconvert->torch-snippets==0.531) (0.4)\n", + "Requirement already satisfied: nbclient>=0.5.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbconvert->torch-snippets==0.531) (0.5.13)\n", + "Requirement already satisfied: beautifulsoup4 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbconvert->torch-snippets==0.531) (4.11.1)\n", + "Requirement already satisfied: jupyter-core>=4.7 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbconvert->torch-snippets==0.531) (4.11.2)\n", + "Requirement already satisfied: tinycss2 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbconvert->torch-snippets==0.531) (1.2.1)\n", + "Requirement already satisfied: lxml in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbconvert->torch-snippets==0.531) (4.9.1)\n", + "Requirement already satisfied: bleach in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbconvert->torch-snippets==0.531) (4.1.0)\n", + "Requirement already satisfied: fastjsonschema in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbformat->torch-snippets==0.531) (2.16.2)\n", + "Requirement already satisfied: joblib in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nltk->torch-snippets==0.531) (1.2.0)\n", + "Requirement already satisfied: click in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nltk->torch-snippets==0.531) (8.1.3)\n", + "Requirement already satisfied: regex>=2021.8.3 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nltk->torch-snippets==0.531) (2022.10.31)\n", + "Requirement already satisfied: identify>=1.0.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from pre-commit->torch-snippets==0.531) (2.5.17)\n", + "Requirement already satisfied: virtualenv>=20.10.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from pre-commit->torch-snippets==0.531) (20.19.0)\n", + "Requirement already satisfied: cfgv>=2.0.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from pre-commit->torch-snippets==0.531) (3.3.1)\n", + "Requirement already satisfied: nodeenv>=0.11.1 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from pre-commit->torch-snippets==0.531) (1.7.0)\n", + "Requirement already satisfied: PyMuPDFb==1.23.22 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from pymupdf->torch-snippets==0.531) (1.23.22)\n", + "Requirement already satisfied: Levenshtein==0.20.8 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from python-Levenshtein->torch-snippets==0.531) (0.20.8)\n", + "Requirement already satisfied: rapidfuzz<3.0.0,>=2.3.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from Levenshtein==0.20.8->python-Levenshtein->torch-snippets==0.531) (2.13.6)\n", + "Requirement already satisfied: markdown-it-py>=2.2.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from rich->torch-snippets==0.531) (3.0.0)\n", + "Requirement already satisfied: threadpoolctl>=2.0.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from scikit-learn->torch-snippets==0.531) (3.1.0)\n", + "Requirement already satisfied: parso<0.9.0,>=0.8.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from jedi>=0.16->ipython->torch-snippets==0.531) (0.8.3)\n", + "Requirement already satisfied: pkgutil-resolve-name>=1.3.10 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from jsonschema>=3.0->altair->torch-snippets==0.531) (1.3.10)\n", + "Requirement already satisfied: pyrsistent!=0.17.0,!=0.17.1,!=0.17.2,>=0.14.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from jsonschema>=3.0->altair->torch-snippets==0.531) (0.18.0)\n", + "Requirement already satisfied: importlib-resources>=1.4.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from jsonschema>=3.0->altair->torch-snippets==0.531) (5.2.0)\n", + "Requirement already satisfied: mdurl~=0.1 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from markdown-it-py>=2.2.0->rich->torch-snippets==0.531) (0.1.2)\n", + "Requirement already satisfied: jupyter-client>=6.1.5 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbclient>=0.5.0->nbconvert->torch-snippets==0.531) (7.4.7)\n", + "Requirement already satisfied: nest-asyncio in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nbclient>=0.5.0->nbconvert->torch-snippets==0.531) (1.5.5)\n", + "Requirement already satisfied: setuptools in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from nodeenv>=0.11.1->pre-commit->torch-snippets==0.531) (60.2.0)\n", + "Requirement already satisfied: ptyprocess>=0.5 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from pexpect>4.3->ipython->torch-snippets==0.531) (0.7.0)\n", + "Requirement already satisfied: wcwidth in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from prompt-toolkit<3.1.0,>=3.0.11->ipython->torch-snippets==0.531) (0.2.5)\n", + "Requirement already satisfied: networkx>=2.2 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from scikit-image>=0.14.2->imgaug>=0.4.0->torch-snippets==0.531) (2.8.8)\n", + "Requirement already satisfied: PyWavelets>=1.1.1 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from scikit-image>=0.14.2->imgaug>=0.4.0->torch-snippets==0.531) (1.4.1)\n", + "Requirement already satisfied: tifffile>=2019.7.26 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from scikit-image>=0.14.2->imgaug>=0.4.0->torch-snippets==0.531) (2022.10.10)\n", + "Requirement already satisfied: filelock<4,>=3.4.1 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from virtualenv>=20.10.0->pre-commit->torch-snippets==0.531) (3.8.2)\n", + "Requirement already satisfied: platformdirs<4,>=2.4 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from virtualenv>=20.10.0->pre-commit->torch-snippets==0.531) (2.6.0)\n", + "Requirement already satisfied: distlib<1,>=0.3.6 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from virtualenv>=20.10.0->pre-commit->torch-snippets==0.531) (0.3.6)\n", + "Requirement already satisfied: soupsieve>1.2 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from beautifulsoup4->nbconvert->torch-snippets==0.531) (2.3.2.post1)\n", + "Requirement already satisfied: webencodings in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from bleach->nbconvert->torch-snippets==0.531) (0.5.1)\n", + "Requirement already satisfied: pure-eval in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from stack-data->ipython->torch-snippets==0.531) (0.2.2)\n", + "Requirement already satisfied: zipp>=3.1.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from importlib-resources>=1.4.0->jsonschema>=3.0->altair->torch-snippets==0.531) (3.8.0)\n", + "Requirement already satisfied: tornado>=6.2 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from jupyter-client>=6.1.5->nbclient>=0.5.0->nbconvert->torch-snippets==0.531) (6.2)\n", + "Requirement already satisfied: pyzmq>=23.0 in /Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages (from jupyter-client>=6.1.5->nbclient>=0.5.0->nbconvert->torch-snippets==0.531) (23.2.0)\n", "Installing collected packages: torch-snippets\n", " Attempting uninstall: torch-snippets\n", " Found existing installation: torch-snippets 0.530\n", " Uninstalling torch-snippets-0.530:\n", " Successfully uninstalled torch-snippets-0.530\n", " Running setup.py develop for torch-snippets\n", - "Successfully installed torch-snippets-0.530\n", + "Successfully installed torch-snippets-0.531\n", "Note: you may need to restart the kernel to use updated packages.\n" ] } @@ -164,17 +165,17 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 2, "metadata": {}, "outputs": [ { "data": { "text/html": [ - "[03/28/24 18:08:37] INFO loaded 25 lines 4135304039.py::3\" target=\"_blank\"><module>:3\n", + "[05/07/24 13:43:30] INFO loaded 25 lines 4135304039.py::3\" target=\"_blank\"><module>:3\n", "\n" ], "text/plain": [ - "\u001b[2;36m[03/28/24 18:08:37]\u001b[0m\u001b[2;36m \u001b[0m\u001b[2;33mINFO \u001b[0m loaded \u001b[1;36m25\u001b[0m lines \u001b]8;id=150266;file:///var/folders/cp/1fbgq2n922j8ztdsq6551vldkr5sdy/T/ipykernel_21368/4135304039.py:3\u001b\\\u001b[2m4135304039.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=359469;file:///var/folders/cp/1fbgq2n922j8ztdsq6551vldkr5sdy/T/ipykernel_21368/4135304039.py:3#:3\u001b\\\u001b[2m :3\u001b[0m\u001b]8;;\u001b\\\n" + "\u001b[2;36m[05/07/24 13:43:30]\u001b[0m\u001b[2;36m \u001b[0m\u001b[2;33mINFO \u001b[0m loaded \u001b[1;36m25\u001b[0m lines \u001b]8;id=411935;file:///var/folders/cp/1fbgq2n922j8ztdsq6551vldkr5sdy/T/ipykernel_46357/4135304039.py:3\u001b\\\u001b[2m4135304039.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=803738;file:///var/folders/cp/1fbgq2n922j8ztdsq6551vldkr5sdy/T/ipykernel_46357/4135304039.py:3# :3\u001b\\\u001b[2m :3\u001b[0m\u001b]8;;\u001b\\\n" ] }, "metadata": {}, @@ -183,10 +184,10 @@ { "data": { "text/plain": [ - "'0.530'" + "'0.531'" ] }, - "execution_count": 4, + "execution_count": 2, "metadata": {}, "output_type": "execute_result" } @@ -207,79 +208,71 @@ "name": "stdout", "output_type": "stream", "text": [ - "/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/setuptools/dist.py:771: UserWarning: Usage of dash-separated 'description-file' will not be supported in future versions. Please use the underscore name 'description_file' instead\n", + "/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/setuptools/dist.py:723: UserWarning: Usage of dash-separated 'description-file' will not be supported in future versions. Please use the underscore name 'description_file' instead\n", " warnings.warn(\n", "running sdist\n", "running egg_info\n", - "writing torch_snippets.egg-info/PKG-INFO\n", - "writing dependency_links to torch_snippets.egg-info/dependency_links.txt\n", - "writing requirements to torch_snippets.egg-info/requires.txt\n", - "writing top-level names to torch_snippets.egg-info/top_level.txt\n", - "reading manifest file 'torch_snippets.egg-info/SOURCES.txt'\n", - "reading manifest template 'MANIFEST.in'\n", "warning: no files found matching 'CONTRIBUTING.md'\n", "warning: no previously-included files matching '__pycache__' found under directory '*'\n", - "adding license file 'LICENSE'\n", - "adding license file 'LICENSE.txt'\n", "writing manifest file 'torch_snippets.egg-info/SOURCES.txt'\n", "running check\n", - "creating torch_snippets-0.530\n", - "creating torch_snippets-0.530/torch_snippets\n", - "creating torch_snippets-0.530/torch_snippets.egg-info\n", - "creating torch_snippets-0.530/torch_snippets/thinc_parser\n", - "creating torch_snippets-0.530/torch_snippets/trainer\n", - "copying files to torch_snippets-0.530...\n", - "copying LICENSE -> torch_snippets-0.530\n", - "copying LICENSE.txt -> torch_snippets-0.530\n", - "copying MANIFEST.in -> torch_snippets-0.530\n", - "copying README.md -> torch_snippets-0.530\n", - "copying settings.ini -> torch_snippets-0.530\n", - "copying setup.cfg -> torch_snippets-0.530\n", - "copying setup.py -> torch_snippets-0.530\n", - "copying torch_snippets/__init__.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/_modidx.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/_nbdev.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/adapters.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/bb_utils.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/bokeh_loader.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/charts.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/dates.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/decorators.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/fastcores.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/icecream.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/imgaug_loader.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/inspector.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/interactive_show.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/ipython.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/load_defaults.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/loader.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/logger.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/markup.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/markup2.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/misc.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/paths.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/pdf_loader.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/registry.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/sklegos.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/text_utils.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/torch_loader.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets/video.py -> torch_snippets-0.530/torch_snippets\n", - "copying torch_snippets.egg-info/PKG-INFO -> torch_snippets-0.530/torch_snippets.egg-info\n", - "copying torch_snippets.egg-info/SOURCES.txt -> torch_snippets-0.530/torch_snippets.egg-info\n", - "copying torch_snippets.egg-info/dependency_links.txt -> torch_snippets-0.530/torch_snippets.egg-info\n", - "copying torch_snippets.egg-info/not-zip-safe -> torch_snippets-0.530/torch_snippets.egg-info\n", - "copying torch_snippets.egg-info/requires.txt -> torch_snippets-0.530/torch_snippets.egg-info\n", - "copying torch_snippets.egg-info/top_level.txt -> torch_snippets-0.530/torch_snippets.egg-info\n", - "copying torch_snippets/thinc_parser/__init__.py -> torch_snippets-0.530/torch_snippets/thinc_parser\n", - "copying torch_snippets/thinc_parser/parser.py -> torch_snippets-0.530/torch_snippets/thinc_parser\n", - "copying torch_snippets/trainer/__init__.py -> torch_snippets-0.530/torch_snippets/trainer\n", - "copying torch_snippets/trainer/capsule.py -> torch_snippets-0.530/torch_snippets/trainer\n", - "copying torch_snippets/trainer/config.py -> torch_snippets-0.530/torch_snippets/trainer\n", - "copying torch_snippets/trainer/hooks.py -> torch_snippets-0.530/torch_snippets/trainer\n", - "Writing torch_snippets-0.530/setup.cfg\n", + "creating torch_snippets-0.531\n", + "creating torch_snippets-0.531/torch_snippets\n", + "creating torch_snippets-0.531/torch_snippets.egg-info\n", + "creating torch_snippets-0.531/torch_snippets/thinc_parser\n", + "creating torch_snippets-0.531/torch_snippets/trainer\n", + "copying LICENSE -> torch_snippets-0.531\n", + "copying LICENSE.txt -> torch_snippets-0.531\n", + "copying MANIFEST.in -> torch_snippets-0.531\n", + "copying README.md -> torch_snippets-0.531\n", + "copying settings.ini -> torch_snippets-0.531\n", + "copying setup.cfg -> torch_snippets-0.531\n", + "copying setup.py -> torch_snippets-0.531\n", + "copying torch_snippets/__init__.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/_modidx.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/_nbdev.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/adapters.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/bb_utils.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/bokeh_loader.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/charts.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/dates.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/decorators.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/fastcores.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/icecream.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/imgaug_loader.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/inspector.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/interactive_show.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/ipython.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/load_defaults.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/loader.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/logger.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/markup.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/markup2.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/misc.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/paths.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/pdf_loader.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/registry.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/scp.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/sklegos.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/text_utils.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/torch_loader.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets/video.py -> torch_snippets-0.531/torch_snippets\n", + "copying torch_snippets.egg-info/PKG-INFO -> torch_snippets-0.531/torch_snippets.egg-info\n", + "copying torch_snippets.egg-info/SOURCES.txt -> torch_snippets-0.531/torch_snippets.egg-info\n", + "copying torch_snippets.egg-info/dependency_links.txt -> torch_snippets-0.531/torch_snippets.egg-info\n", + "copying torch_snippets.egg-info/entry_points.txt -> torch_snippets-0.531/torch_snippets.egg-info\n", + "copying torch_snippets.egg-info/not-zip-safe -> torch_snippets-0.531/torch_snippets.egg-info\n", + "copying torch_snippets.egg-info/requires.txt -> torch_snippets-0.531/torch_snippets.egg-info\n", + "copying torch_snippets.egg-info/top_level.txt -> torch_snippets-0.531/torch_snippets.egg-info\n", + "copying torch_snippets/thinc_parser/__init__.py -> torch_snippets-0.531/torch_snippets/thinc_parser\n", + "copying torch_snippets/thinc_parser/parser.py -> torch_snippets-0.531/torch_snippets/thinc_parser\n", + "copying torch_snippets/trainer/__init__.py -> torch_snippets-0.531/torch_snippets/trainer\n", + "copying torch_snippets/trainer/capsule.py -> torch_snippets-0.531/torch_snippets/trainer\n", + "copying torch_snippets/trainer/config.py -> torch_snippets-0.531/torch_snippets/trainer\n", + "copying torch_snippets/trainer/hooks.py -> torch_snippets-0.531/torch_snippets/trainer\n", "creating dist\n", "Creating tar archive\n", - "removing 'torch_snippets-0.530' (and everything under it)\n", + "removing 'torch_snippets-0.531' (and everything under it)\n", "running bdist_wheel\n", "running build\n", "running build_py\n", @@ -311,6 +304,7 @@ "copying torch_snippets/dates.py -> build/lib/torch_snippets\n", "copying torch_snippets/bokeh_loader.py -> build/lib/torch_snippets\n", "copying torch_snippets/video.py -> build/lib/torch_snippets\n", + "copying torch_snippets/scp.py -> build/lib/torch_snippets\n", "copying torch_snippets/bb_utils.py -> build/lib/torch_snippets\n", "copying torch_snippets/adapters.py -> build/lib/torch_snippets\n", "copying torch_snippets/decorators.py -> build/lib/torch_snippets\n", @@ -324,125 +318,24 @@ "copying torch_snippets/trainer/__init__.py -> build/lib/torch_snippets/trainer\n", "/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", " warnings.warn(\n", - "installing to build/bdist.macosx-10.9-x86_64/wheel\n", "running install\n", "running install_lib\n", "creating build/bdist.macosx-10.9-x86_64\n", "creating build/bdist.macosx-10.9-x86_64/wheel\n", "creating build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", - "copying build/lib/torch_snippets/misc.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", - "copying build/lib/torch_snippets/load_defaults.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", - "copying build/lib/torch_snippets/text_utils.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", - "copying build/lib/torch_snippets/_nbdev.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", - "copying build/lib/torch_snippets/paths.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", - "copying build/lib/torch_snippets/charts.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", - "copying build/lib/torch_snippets/pdf_loader.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", - "copying build/lib/torch_snippets/interactive_show.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", - "copying build/lib/torch_snippets/registry.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", - "copying build/lib/torch_snippets/markup2.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", - "copying build/lib/torch_snippets/_modidx.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", - "copying build/lib/torch_snippets/inspector.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", - "copying build/lib/torch_snippets/__init__.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", - "copying build/lib/torch_snippets/torch_loader.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", - "copying build/lib/torch_snippets/logger.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", - "copying build/lib/torch_snippets/markup.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", - "copying build/lib/torch_snippets/fastcores.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", - "copying build/lib/torch_snippets/sklegos.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", - "copying build/lib/torch_snippets/ipython.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", - "copying build/lib/torch_snippets/loader.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", - "copying build/lib/torch_snippets/imgaug_loader.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", - "copying build/lib/torch_snippets/icecream.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", - "copying build/lib/torch_snippets/dates.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", "creating build/bdist.macosx-10.9-x86_64/wheel/torch_snippets/thinc_parser\n", - "copying build/lib/torch_snippets/thinc_parser/__init__.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets/thinc_parser\n", - "copying build/lib/torch_snippets/thinc_parser/parser.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets/thinc_parser\n", - "copying build/lib/torch_snippets/bokeh_loader.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", - "copying build/lib/torch_snippets/video.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", "creating build/bdist.macosx-10.9-x86_64/wheel/torch_snippets/trainer\n", - "copying build/lib/torch_snippets/trainer/hooks.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets/trainer\n", - "copying build/lib/torch_snippets/trainer/capsule.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets/trainer\n", - "copying build/lib/torch_snippets/trainer/config.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets/trainer\n", - "copying build/lib/torch_snippets/trainer/__init__.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets/trainer\n", - "copying build/lib/torch_snippets/bb_utils.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", - "copying build/lib/torch_snippets/adapters.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", - "copying build/lib/torch_snippets/decorators.py -> build/bdist.macosx-10.9-x86_64/wheel/torch_snippets\n", "running install_egg_info\n", - "Copying torch_snippets.egg-info to build/bdist.macosx-10.9-x86_64/wheel/torch_snippets-0.530-py3.8.egg-info\n", + "Copying torch_snippets.egg-info to build/bdist.macosx-10.9-x86_64/wheel/torch_snippets-0.531-py3.8.egg-info\n", "running install_scripts\n", - "adding license file \"LICENSE\" (matched pattern \"LICEN[CS]E*\")\n", - "adding license file \"LICENSE.txt\" (matched pattern \"LICEN[CS]E*\")\n", - "creating build/bdist.macosx-10.9-x86_64/wheel/torch_snippets-0.530.dist-info/WHEEL\n", - "creating 'dist/torch_snippets-0.530-py3-none-any.whl' and adding 'build/bdist.macosx-10.9-x86_64/wheel' to it\n", - "adding 'torch_snippets/__init__.py'\n", - "adding 'torch_snippets/_modidx.py'\n", - "adding 'torch_snippets/_nbdev.py'\n", - "adding 'torch_snippets/adapters.py'\n", - "adding 'torch_snippets/bb_utils.py'\n", - "adding 'torch_snippets/bokeh_loader.py'\n", - "adding 'torch_snippets/charts.py'\n", - "adding 'torch_snippets/dates.py'\n", - "adding 'torch_snippets/decorators.py'\n", - "adding 'torch_snippets/fastcores.py'\n", - "adding 'torch_snippets/icecream.py'\n", - "adding 'torch_snippets/imgaug_loader.py'\n", - "adding 'torch_snippets/inspector.py'\n", - "adding 'torch_snippets/interactive_show.py'\n", - "adding 'torch_snippets/ipython.py'\n", - "adding 'torch_snippets/load_defaults.py'\n", - "adding 'torch_snippets/loader.py'\n", - "adding 'torch_snippets/logger.py'\n", - "adding 'torch_snippets/markup.py'\n", - "adding 'torch_snippets/markup2.py'\n", - "adding 'torch_snippets/misc.py'\n", - "adding 'torch_snippets/paths.py'\n", - "adding 'torch_snippets/pdf_loader.py'\n", - "adding 'torch_snippets/registry.py'\n", - "adding 'torch_snippets/sklegos.py'\n", - "adding 'torch_snippets/text_utils.py'\n", - "adding 'torch_snippets/torch_loader.py'\n", - "adding 'torch_snippets/video.py'\n", - "adding 'torch_snippets/thinc_parser/__init__.py'\n", - "adding 'torch_snippets/thinc_parser/parser.py'\n", - "adding 'torch_snippets/trainer/__init__.py'\n", - "adding 'torch_snippets/trainer/capsule.py'\n", - "adding 'torch_snippets/trainer/config.py'\n", - "adding 'torch_snippets/trainer/hooks.py'\n", - "adding 'torch_snippets-0.530.dist-info/LICENSE'\n", - "adding 'torch_snippets-0.530.dist-info/LICENSE.txt'\n", - "adding 'torch_snippets-0.530.dist-info/METADATA'\n", - "adding 'torch_snippets-0.530.dist-info/WHEEL'\n", - "adding 'torch_snippets-0.530.dist-info/top_level.txt'\n", - "adding 'torch_snippets-0.530.dist-info/RECORD'\n", - "removing build/bdist.macosx-10.9-x86_64/wheel\n", "Uploading distributions to https://upload.pypi.org/legacy/\n", - "Uploading torch_snippets-0.530-py3-none-any.whl\n", - "\u001b[2K\u001b[35m100%\u001b[0m \u001b[90mโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ\u001b[0m \u001b[32m92.2/92.2 kB\u001b[0m โข \u001b[33m00:02\u001b[0m โข \u001b[31m43.8 MB/s\u001b[0m\n", - "\u001b[?25h\u001b[33mWARNING \u001b[0m Error during upload. Retry with the --verbose option for more details. \n", - "\u001b[31mERROR \u001b[0m HTTPError: 403 Forbidden from https://upload.pypi.org/legacy/ \n", - " New uploads are temporarily disabled. See \n", - " https://pypi.org/help/#admin-intervention for more information. \n", - "Traceback (most recent call last):\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/bin/nbdev_release_both\", line 8, in \n", - " sys.exit(release_both())\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/fastcore/script.py\", line 119, in _f\n", - " return tfunc(**merge(args, args_from_prog(func, xtra)))\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/nbdev/release.py\", line 328, in release_both\n", - " release_conda.__wrapped__(path, do_build=do_build, build_args=build_args, skip_upload=skip_upload, mambabuild=mambabuild, upload_user=upload_user)\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/nbdev/release.py\", line 272, in release_conda\n", - " write_conda_meta(path)\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/nbdev/release.py\", line 239, in write_conda_meta\n", - " _write_yaml(path, *_get_conda_meta())\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/nbdev/release.py\", line 208, in _get_conda_meta\n", - " pypi = pypi_json(f'{name}/{ver}')\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/nbdev/release.py\", line 150, in pypi_json\n", - " return urljson(f'{_PYPI_URL}{s}/json')\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/fastcore/net.py\", line 129, in urljson\n", - " res = urlread(url, data=data, timeout=timeout)\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/fastcore/net.py\", line 119, in urlread\n", - " if 400 <= e.code < 500: raise ExceptionsHTTP[e.code](e.url, e.hdrs, e.fp, msg=e.msg) from None\n", - "fastcore.net.HTTP404NotFoundError: HTTP Error 404: Not Found\n", - "====Error Body====\n", - "{\"message\": \"Not Found\"}\n" + "Uploading torch_snippets-0.531-py3-none-any.whl\n", + "\u001b[2K\u001b[35m100%\u001b[0m \u001b[90mโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ\u001b[0m \u001b[32m101.6/101.6 kB\u001b[0m โข \u001b[33m00:00\u001b[0m โข \u001b[31m51.4 MB/s\u001b[0m\n", + "\u001b[?25hUploading torch_snippets-0.531.tar.gz\n", + "\u001b[2K\u001b[35m100%\u001b[0m \u001b[90mโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ\u001b[0m \u001b[32m86.9/86.9 kB\u001b[0m โข \u001b[33m00:00\u001b[0m โข \u001b[31m49.7 MB/s\u001b[0m\n", + "\u001b[?25h\n", + "\u001b[32mView at:\u001b[0m\n", + "https://pypi.org/project/torch-snippets/0.531/\n" ] } ], @@ -453,203 +346,14 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/nbdev/processors.py:186: UserWarning: Found cells containing imports and other code. See FAQ.\n", - "---\n", - "#| echo: false\n", - "#| output: asis\n", - "from nbdev import show_doc\n", - "\n", - "show_doc(AttrDict)\n", - "---\n", - "\n", - " warn(f'Found cells containing imports and other code. See FAQ.\\n---\\n{cell.source}\\n---\\n')\n", - "/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/nbdev/processors.py:186: UserWarning: Found cells containing imports and other code. See FAQ.\n", - "---\n", - "from torch_snippets.loader import *\n", - "from sklearn.datasets import make_moons\n", - "\n", - "np.random.seed(10)\n", - "x, y = make_moons(1000, noise=0.1)\n", - "df = pd.DataFrame({\"x1\": x[:, 0], \"x2\": x[:, 1], \"y\": y})\n", - "\n", - "Chart(df).mark_circle().encode(x=\"x1:Q\", y=\"x2:Q\", color=\"y:N\").interactive()\n", - "---\n", - "\n", - " warn(f'Found cells containing imports and other code. See FAQ.\\n---\\n{cell.source}\\n---\\n')\n", - "/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/nbdev/processors.py:186: UserWarning: Found cells containing imports and other code. See FAQ.\n", - "---\n", - "from sklearn.datasets import load_iris\n", - "from sklearn.model_selection import train_test_split\n", - "\n", - "data = load_iris()\n", - "X, y = data.data, data.target\n", - "X_trn, X_val, y_trn, y_val = train_test_split(X, y, random_state=42)\n", - "---\n", - "\n", - " warn(f'Found cells containing imports and other code. See FAQ.\\n---\\n{cell.source}\\n---\\n')\n", - "/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/nbdev/processors.py:186: UserWarning: Found cells containing imports and other code. See FAQ.\n", - "---\n", - "from torch_snippets.inspector import inspect\n", - "from torch.utils.data import TensorDataset\n", - "\n", - "trn_ds = TensorDataset(*[torch.Tensor(i) for i in [X_trn, y_trn]])\n", - "trn_dl = DataLoader(trn_ds, batch_size=32)\n", - "\n", - "val_ds = TensorDataset(*[torch.Tensor(i) for i in [X_val, y_val]])\n", - "val_dl = DataLoader(val_ds, batch_size=32)\n", - "\n", - "inspect(next(iter(val_dl)))\n", - "---\n", - "\n", - " warn(f'Found cells containing imports and other code. See FAQ.\\n---\\n{cell.source}\\n---\\n')\n", - "/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/nbdev/processors.py:186: UserWarning: Found cells containing imports and other code. See FAQ.\n", - "---\n", - "from torch_snippets.trainer.capsule import Capsule, train, validate, predict\n", - "\n", - "\n", - "class IrisModel(Capsule):\n", - " def __init__(self):\n", - " super().__init__()\n", - " self.model = nn.Sequential(\n", - " nn.BatchNorm1d(4),\n", - " nn.Linear(4, 16),\n", - " nn.Dropout(0.2),\n", - " nn.BatchNorm1d(16),\n", - " nn.ReLU(inplace=True),\n", - " nn.Linear(16, 8),\n", - " nn.Dropout(0.2),\n", - " nn.BatchNorm1d(8),\n", - " nn.ReLU(inplace=True),\n", - " nn.Linear(8, 3),\n", - " )\n", - " self.loss_fn = nn.CrossEntropyLoss()\n", - " self.optimizer = optim.Adam(self.parameters())\n", - "\n", - " def forward(self, x):\n", - " return self.model(x)\n", - "\n", - " @train\n", - " def train_batch(self, batch):\n", - " x, y = batch\n", - " _y = self.forward(x)\n", - " loss = self.loss_fn(_y, y.long())\n", - " return {\"loss\": loss}\n", - "\n", - " @validate\n", - " def validate_batch(self, batch=None, dl=None):\n", - " if dl is not None:\n", - " output = []\n", - " for batch in dl:\n", - " output.extend(self.predict(batch=batch)[\"val_acc\"])\n", - " return np.mean(output)\n", - " x, y = batch\n", - " _y = self.forward(x)\n", - " loss = self.loss_fn(_y, y.long())\n", - " acc = (y == _y.max(-1)[1]).float().mean()\n", - " return {\"val_loss\": loss, \"val_acc\": acc}\n", - "\n", - " @predict\n", - " def predict(self, batch=None, dl=None):\n", - " if dl is not None:\n", - " output = []\n", - " for batch in dl:\n", - " output.extend(self.predict(batch=batch))\n", - " return output\n", - " x, y = batch\n", - " _y = self.forward(x)\n", - " o = _y.max(-1)[1].cpu().detach().numpy().tolist()\n", - " return o\n", - "---\n", - "\n", - " warn(f'Found cells containing imports and other code. See FAQ.\\n---\\n{cell.source}\\n---\\n')\n", - "concurrent.futures.process._RemoteTraceback: \n", - "\"\"\"\n", - "Traceback (most recent call last):\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/IPython/core/formatters.py\", line 221, in catch_format_error\n", - " r = method(self, *args, **kwargs)\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/IPython/core/formatters.py\", line 342, in __call__\n", - " return method()\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/nbdev/showdoc.py\", line 168, in _repr_markdown_\n", - " src = NbdevLookup().code(self.fn)\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/nbdev/doclinks.py\", line 200, in __init__\n", - " self.entries = {o.name: _qual_syms(o.resolve()) for o in list(pkg_resources.iter_entry_points(group='nbdev'))\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/nbdev/doclinks.py\", line 200, in \n", - " self.entries = {o.name: _qual_syms(o.resolve()) for o in list(pkg_resources.iter_entry_points(group='nbdev'))\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/pkg_resources/__init__.py\", line 2477, in resolve\n", - " module = __import__(self.module_name, fromlist=['__name__'], level=0)\n", - "ModuleNotFoundError: No module named 'zen_of'\n", - "\n", - "The above exception was the direct cause of the following exception:\n", - "\n", - "Traceback (most recent call last):\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/concurrent/futures/process.py\", line 239, in _process_worker\n", - " r = call_item.fn(*call_item.args, **call_item.kwargs)\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/concurrent/futures/process.py\", line 198, in _process_chunk\n", - " return [fn(*args) for args in chunk]\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/concurrent/futures/process.py\", line 198, in \n", - " return [fn(*args) for args in chunk]\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/fastcore/parallel.py\", line 46, in _call\n", - " return g(item)\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/nbdev/serve_drv.py\", line 22, in main\n", - " if src.suffix=='.ipynb': exec_nb(src, dst, x)\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/nbdev/serve_drv.py\", line 16, in exec_nb\n", - " cb()(nb)\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/nbdev/processors.py\", line 243, in __call__\n", - " def __call__(self, nb): return self.nb_proc(nb).process()\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/nbdev/process.py\", line 126, in process\n", - " for proc in self.procs: self._proc(proc)\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/nbdev/process.py\", line 119, in _proc\n", - " for cell in self.nb.cells: self._process_cell(proc, cell)\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/nbdev/process.py\", line 110, in _process_cell\n", - " if callable(proc) and not _is_direc(proc): cell = opt_set(cell, proc(cell))\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/nbdev/processors.py\", line 205, in __call__\n", - " raise Exception(f\"Error{' in notebook: '+title if title else ''} in cell {cell.idx_} :\\n{cell.source}\") from self.k.exc[1]\n", - "Exception: Error in notebook: Decorator Utilites in cell 5 :\n", - "#| echo: false\n", - "#| output: asis\n", - "show_doc(check_kwargs_not_none)\n", - "\"\"\"\n", - "\n", - "The above exception was the direct cause of the following exception:\n", - "\n", - "Traceback (most recent call last):\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/bin/nbdev_docs\", line 8, in \n", - " sys.exit(nbdev_docs())\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/fastcore/script.py\", line 119, in _f\n", - " return tfunc(**merge(args, args_from_prog(func, xtra)))\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/nbdev/quarto.py\", line 245, in nbdev_docs\n", - " cache,cfg,path = _pre_docs(path, n_workers=n_workers, **kwargs)\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/nbdev/quarto.py\", line 174, in _pre_docs\n", - " cache = proc_nbs(path, n_workers=n_workers, **kwargs)\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/nbdev/serve.py\", line 77, in proc_nbs\n", - " parallel(nbdev.serve_drv.main, files, n_workers=n_workers, pause=0.01, **kw)\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/fastcore/parallel.py\", line 117, in parallel\n", - " return L(r)\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/fastcore/foundation.py\", line 98, in __call__\n", - " return super().__call__(x, *args, **kwargs)\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/fastcore/foundation.py\", line 106, in __init__\n", - " items = listify(items, *rest, use_list=use_list, match=match)\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/site-packages/fastcore/basics.py\", line 66, in listify\n", - " elif is_iter(o): res = list(o)\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/concurrent/futures/process.py\", line 484, in _chain_from_iterable_of_lists\n", - " for element in iterable:\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/concurrent/futures/_base.py\", line 619, in result_iterator\n", - " yield fs.pop().result()\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/concurrent/futures/_base.py\", line 444, in result\n", - " return self.__get_result()\n", - " File \"/Users/yeshwanth.y/miniconda3/envs/mdm/lib/python3.8/concurrent/futures/_base.py\", line 389, in __get_result\n", - " raise self._exception\n", - "Exception: Error in notebook: Decorator Utilites in cell 5 :\n", - "#| echo: false\n", - "#| output: asis\n", - "show_doc(check_kwargs_not_none)\n" + "^C\n" ] } ], @@ -659,52 +363,9 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Fix End of Files.........................................................\u001b[41mFailed\u001b[m\n", - "\u001b[2m- hook id: end-of-file-fixer\u001b[m\n", - "\u001b[2m- exit code: 1\u001b[m\n", - "\u001b[2m- files were modified by this hook\u001b[m\n", - "\n", - "Fixing torch_snippets.egg-info/SOURCES.txt\n", - "\n", - "Trim Trailing Whitespace.................................................\u001b[42mPassed\u001b[m\n", - "black....................................................................\u001b[41mFailed\u001b[m\n", - "\u001b[2m- hook id: black\u001b[m\n", - "\u001b[2m- files were modified by this hook\u001b[m\n", - "\n", - "\u001b[1mreformatted torch_snippets/load_defaults.py\u001b[0m\n", - "\u001b[1mreformatted torch_snippets/decorators.py\u001b[0m\n", - "\u001b[1mreformatted torch_snippets/bokeh_loader.py\u001b[0m\n", - "\u001b[1mreformatted torch_snippets/charts.py\u001b[0m\n", - "\u001b[1mreformatted torch_snippets/inspector.py\u001b[0m\n", - "\u001b[1mreformatted torch_snippets/ipython.py\u001b[0m\n", - "\u001b[1mreformatted torch_snippets/imgaug_loader.py\u001b[0m\n", - "\u001b[1mreformatted torch_snippets/misc.py\u001b[0m\n", - "\u001b[1mreformatted torch_snippets/adapters.py\u001b[0m\n", - "\u001b[1mreformatted torch_snippets/registry.py\u001b[0m\n", - "\u001b[1mreformatted torch_snippets/pdf_loader.py\u001b[0m\n", - "\u001b[1mreformatted torch_snippets/logger.py\u001b[0m\n", - "\u001b[1mreformatted torch_snippets/trainer/config.py\u001b[0m\n", - "\u001b[1mreformatted torch_snippets/interactive_show.py\u001b[0m\n", - "\u001b[1mreformatted torch_snippets/trainer/capsule.py\u001b[0m\n", - "\u001b[1mreformatted torch_snippets/markup.py\u001b[0m\n", - "\u001b[1mreformatted torch_snippets/bb_utils.py\u001b[0m\n", - "\u001b[1mreformatted torch_snippets/paths.py\u001b[0m\n", - "\u001b[1mreformatted torch_snippets/_modidx.py\u001b[0m\n", - "\n", - "\u001b[1mAll done! โจ ๐ฐ โจ\u001b[0m\n", - "\u001b[34m\u001b[1m19 files \u001b[0m\u001b[1mreformatted\u001b[0m, \u001b[34m4 files \u001b[0mleft unchanged.\n", - "\n", - "Everything up-to-date\n" - ] - } - ], + "outputs": [], "source": [ "!git add .\n", "!git commit -m {VERSION}\n", diff --git a/settings.ini b/settings.ini index 8b34e8f..2b99841 100644 --- a/settings.ini +++ b/settings.ini @@ -8,7 +8,7 @@ author = Yeshwanth Reddy author_email = 1992chinna@gmail.com copyright = sizhky branch = master -version = 0.530 +version = 0.532 min_python = 3.7 audience = Developers language = English diff --git a/torch_snippets.egg-info/PKG-INFO b/torch_snippets.egg-info/PKG-INFO index b88c398..1178319 100644 --- a/torch_snippets.egg-info/PKG-INFO +++ b/torch_snippets.egg-info/PKG-INFO @@ -1,12 +1,13 @@ Metadata-Version: 2.1 Name: torch-snippets -Version: 0.530 +Version: 0.532 Summary: One line functions for common tasks Home-page: https://github.com/sizhky/torch_snippets/tree/master/ Author: Yeshwanth Reddy Author-email: 1992chinna@gmail.com License: Apache Software License 2.0 Keywords: snippets,torch +Platform: UNKNOWN Classifier: Development Status :: 3 - Alpha Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: Apache Software License diff --git a/torch_snippets.egg-info/SOURCES.txt b/torch_snippets.egg-info/SOURCES.txt index 94fb78e..aeebd93 100644 --- a/torch_snippets.egg-info/SOURCES.txt +++ b/torch_snippets.egg-info/SOURCES.txt @@ -29,6 +29,7 @@ torch_snippets/misc.py torch_snippets/paths.py torch_snippets/pdf_loader.py torch_snippets/registry.py +torch_snippets/scp.py torch_snippets/sklegos.py torch_snippets/text_utils.py torch_snippets/torch_loader.py @@ -36,6 +37,7 @@ torch_snippets/video.py torch_snippets.egg-info/PKG-INFO torch_snippets.egg-info/SOURCES.txt torch_snippets.egg-info/dependency_links.txt +torch_snippets.egg-info/entry_points.txt torch_snippets.egg-info/not-zip-safe torch_snippets.egg-info/requires.txt torch_snippets.egg-info/top_level.txt diff --git a/torch_snippets/__init__.py b/torch_snippets/__init__.py index cbd4e91..6c3a99b 100644 --- a/torch_snippets/__init__.py +++ b/torch_snippets/__init__.py @@ -1,4 +1,4 @@ -__version__ = "0.530" +__version__ = "0.532" from .loader import * from .paths import * from .markup import * diff --git a/torch_snippets/_modidx.py b/torch_snippets/_modidx.py index 8700be2..f43d8d8 100644 --- a/torch_snippets/_modidx.py +++ b/torch_snippets/_modidx.py @@ -208,6 +208,10 @@ ), }, "torch_snippets.charts": { + "torch_snippets.charts.UpSetAltair": ( + "charts.html#upsetaltair", + "torch_snippets/charts.py", + ), "torch_snippets.charts.confusion_matrix": ( "charts.html#confusion_matrix", "torch_snippets/charts.py", @@ -216,6 +220,10 @@ "charts.html#spider", "torch_snippets/charts.py", ), + "torch_snippets.charts.upsetaltair_top_level_configuration": ( + "charts.html#upsetaltair_top_level_configuration", + "torch_snippets/charts.py", + ), }, "torch_snippets.dates": {}, "torch_snippets.decorators": { @@ -894,6 +902,7 @@ "torch_snippets/registry.py", ), }, + "torch_snippets.scp": {}, "torch_snippets.sklegos": { "torch_snippets.sklegos.Cat2Num": ( "sklegos.html#cat2num", diff --git a/torch_snippets/adapters.py b/torch_snippets/adapters.py index 833b2f9..ea50463 100644 --- a/torch_snippets/adapters.py +++ b/torch_snippets/adapters.py @@ -27,6 +27,14 @@ def np_2_b64(image: np.ndarray) -> str: def b64_2_np(input: str) -> np.ndarray: + """Converts a base64 encoded image to a NumPy array. + + Args: + input (str): The base64 encoded image. + + Returns: + np.ndarray: The NumPy array representation of the image in RGB format. + """ input = bytes(input, "utf-8") input = base64.b64decode(input) img_nparr = np.frombuffer(input, np.uint8) @@ -69,6 +77,17 @@ def file_2_bytes(fpath): def _process( df: pd.DataFrame, label_column="readable_label", default_label="Background" ): + """ + Process the given DataFrame and convert it into a list of records. + + Args: + df (pd.DataFrame): The input DataFrame to be processed. + label_column (str, optional): The column name for the label. Defaults to "readable_label". + default_label (str, optional): The default label value. Defaults to "Background". + + Returns: + list: A list of records, where each record is a dictionary representing a row in the DataFrame. + """ df["@xbr"] = df["X"] df["@xtl"] = df["x"] df["@ybr"] = df["Y"] @@ -118,6 +137,22 @@ def csvs_2_cvat( default_label="Background", extension="jpg", ): + """ + Convert CSV annotations to CVAT XML format. + + Args: + images_folder (str): Path to the folder containing the images. + csvs_folder (str): Path to the folder containing the CSV annotations. + xml_output_file (str): Path to the output XML file. + items (list, optional): List of items to process. If None, all items will be processed. Defaults to None. + parquet (bool, optional): Whether the annotations are stored in Parquet format. Defaults to False. + relative_df (bool, optional): Whether the bounding box coordinates in the CSV are relative to the image size. Defaults to True. + default_label (str, optional): Default label for the bounding boxes. Defaults to "Background". + extension (str, optional): Image file extension. Defaults to "jpg". + + Returns: + None + """ images_folder, csvs_folder = [P(_) for _ in [images_folder, csvs_folder]] data = AttrDict({"annotations": {"image": []}}) if items is None: @@ -144,6 +179,16 @@ def csvs_2_cvat( def _get_attribute_columns(column): + """ + Get attribute columns from a given column. + + Args: + column (pandas.Series): The input column. + + Returns: + set: A set of attribute columns extracted from the input column. + """ + def _get_columns_from_row(item): if item != item: return [] @@ -157,6 +202,16 @@ def _get_columns_from_row(item): def _get_attribute_data(item, column_name): + """ + Retrieves the attribute data for a given item and column name. + + Parameters: + - item: The item to retrieve the attribute data from. + - column_name: The name of the column to retrieve the attribute data for. + + Returns: + - The attribute data for the given item and column name, or np.nan if not found. + """ if item != item: return item if isinstance(item, dict): @@ -173,6 +228,15 @@ def _get_attribute_data(item, column_name): def _cvat_ann_2_csv(ann): + """ + Convert CVAT annotation to a pandas DataFrame in CSV format. + + Args: + ann (dict): CVAT annotation dictionary. + + Returns: + pandas.DataFrame: DataFrame containing the converted annotation data in CSV format. + """ if "box" not in ann: return pd.DataFrame() if isinstance(ann.box, AttrDict): @@ -206,8 +270,21 @@ def _cvat_ann_2_csv(ann): def cvat_2_csvs(xmlfile, csvs_folder): + """ + Convert CVAT XML annotations to CSV files. + + Args: + xmlfile (str): Path to the CVAT XML file. + csvs_folder (str): Path to the folder where the CSV files will be saved. + + Returns: + None + """ data = read_xml(xmlfile) - for item in data.annotations.image: + items = data.annotations.image + if not isinstance(items, list): + items = [items] + for item in items: try: df = _cvat_ann_2_csv(item) save_at = f'{csvs_folder}/{stem(item["@name"])}.csv' diff --git a/torch_snippets/bb_utils.py b/torch_snippets/bb_utils.py index 611371a..dc9f3a9 100644 --- a/torch_snippets/bb_utils.py +++ b/torch_snippets/bb_utils.py @@ -171,6 +171,15 @@ def distances(self, other_bbs, threshold=None, direction=None): # %% ../nbs/bounding_boxes.ipynb 8 def df2bbs(df): + """ + Convert a DataFrame to bounding boxes. + + Parameters: + df (pd.DataFrame): The DataFrame to convert. + + Returns: + list: A list of bounding boxes. + """ if "bb" in df.columns: try: return bbfy(df["bb"].values.tolist()) @@ -180,19 +189,56 @@ def df2bbs(df): def bbs2df(bbs): + """ + Convert bounding boxes to a DataFrame. + + Parameters: + bbs (list): The bounding boxes to convert. + + Returns: + pd.DataFrame: A DataFrame representing the bounding boxes. + """ bbs = [list(bb) for bb in bbs] return pd.DataFrame(bbs, columns=["x", "y", "X", "Y"]) def bbfy(bbs): + """ + Convert bounding boxes to BB objects. + + Parameters: + bbs (list): The bounding boxes to convert. + + Returns: + list: A list of BB objects. + """ return [BB(bb) for bb in bbs] def jitter(bbs, noise): + """ + Add noise to bounding boxes. Useful when you have a lot of overlapping boxes. + + Parameters: + bbs (list): The bounding boxes to add noise to. + noise (float): The amount of noise to add. + + Returns: + list: A list of bounding boxes with added noise. + """ return [BB(bb).jitter(noise) for bb in bbs] def compute_eps(eps): + """ + Compute epsilon values for bounding box manipulation. + + Parameters: + eps (float or tuple): The epsilon value(s) to compute. + + Returns: + tuple: A tuple of epsilon values. + """ if isinstance(eps, tuple): if len(eps) == 4: epsx, epsy, epsX, epsY = eps @@ -205,7 +251,16 @@ def compute_eps(eps): def enlarge_bbs(bbs, eps=0.2): - "enlarge all `bbs` by `eps` fraction (i.e., eps*100 percent)" + """ + Enlarge bounding boxes by a certain fraction. + + Parameters: + bbs (list): The bounding boxes to enlarge. + eps (float, optional): The fraction to enlarge by. Defaults to 0.2. + + Returns: + list: A list of enlarged bounding boxes. + """ bbs = bbfy(bbs) epsx, epsy, epsX, epsY = compute_eps(eps) bbs = bbfy(bbs) @@ -217,7 +272,16 @@ def enlarge_bbs(bbs, eps=0.2): def shrink_bbs(bbs, eps=0.2): - "shrink all `bbs` by `eps` fraction (i.e., eps*100 percent)" + """ + Shrink bounding boxes by a certain fraction. + + Parameters: + bbs (list): The bounding boxes to shrink. + eps (float, optional): The fraction to shrink by. Defaults to 0.2. + + Returns: + list: A list of shrunk bounding boxes. + """ bbs = bbfy(bbs) epsx, epsy, epsX, epsY = compute_eps(eps) bbs = bbfy(bbs) @@ -230,6 +294,17 @@ def shrink_bbs(bbs, eps=0.2): # %% ../nbs/bounding_boxes.ipynb 9 def iou(bboxes1, bboxes2): + """ + Calculates the Intersection over Union (IoU) between two sets of bounding boxes. + + Args: + bboxes1 (list or numpy array): The first set of bounding boxes in the format [x, y, X, Y]. + bboxes2 (list or numpy array): The second set of bounding boxes in the format [x, y, X, Y]. + + Returns: + numpy array: The IoU between each pair of bounding boxes. + + """ bboxes1 = np.array(bboxes1) bboxes2 = np.array(bboxes2) x11, y11, x12, y12 = np.split(bboxes1, 4, axis=1) @@ -246,6 +321,17 @@ def iou(bboxes1, bboxes2): def compute_distance_matrix(bboxes1, bboxes2): + """ + Compute the distance matrix between two sets of bounding boxes. + + Parameters: + - bboxes1 (list): List of bounding boxes in the format [x, y, X, Y]. + - bboxes2 (list): List of bounding boxes in the format [x, y, X, Y]. + + Returns: + - distance_matrix (ndarray): 2D array containing the Euclidean distances between all pairs of bounding boxes. + """ + # Convert the bounding box lists to NumPy arrays bboxes1 = np.array(bboxes1) bboxes2 = np.array(bboxes2) @@ -264,7 +350,17 @@ def compute_distance_matrix(bboxes1, bboxes2): def compute_distances(df1, df2, shrink_factors=(1, 1)): - """Return euclidean distance mxn matrix for all boxes from df1 with all boxes from df2""" + """ + Compute the Euclidean distance matrix between bounding boxes in df1 and df2. + + Parameters: + - df1 (DataFrame): The first DataFrame containing bounding boxes. + - df2 (DataFrame): The second DataFrame containing bounding boxes. + - shrink_factors (tuple, optional): The shrink factors to apply to the bounding boxes. Default is (1, 1). + + Returns: + - distances (ndarray): The Euclidean distance matrix between the bounding boxes in df1 and df2. + """ sx, sy = shrink_factors bbs1 = np.array(df2bbs(df1)) / np.array([sx, sy, sx, sy]) bbs2 = np.array(df2bbs(df2)) / np.array([sx, sy, sx, sy]) @@ -275,7 +371,18 @@ def compute_distances(df1, df2, shrink_factors=(1, 1)): # %% ../nbs/bounding_boxes.ipynb 10 def split_bb_to_xyXY(df): - "convert bb column to separate x,y,X,Y columns" + """ + Convert the 'bb' column in the DataFrame to separate 'x', 'y', 'X', 'Y' columns. + + Args: + df (pd.DataFrame): The DataFrame containing the bounding box information. + + Returns: + pd.DataFrame: The DataFrame with separate 'x', 'y', 'X', 'Y' columns. + + Raises: + AssertionError: If the input is not a DataFrame or if the 'bb' column is missing. + """ df = df.copy() assert isinstance(df, pd.DataFrame) if all([item in df.columns for item in "xyXY"]): @@ -294,7 +401,18 @@ def split_bb_to_xyXY(df): def combine_xyXY_to_bb(df): - "combine `x,y,X,Y` to `bb` column" + """ + Combine `x`, `y`, `X`, `Y` columns into a single `bb` column. + + Args: + df (pandas.DataFrame): The input DataFrame containing `x`, `y`, `X`, `Y` columns. + + Returns: + pandas.DataFrame: The modified DataFrame with the `bb` column. + + Raises: + AssertionError: If any of the columns `x`, `y`, `X`, `Y` are missing in the DataFrame. + """ df = df.copy() assert all( [item in df.columns for item in "xyXY"] @@ -305,16 +423,47 @@ def combine_xyXY_to_bb(df): def is_absolute(df): + """ + Check if the bounding boxes in the given DataFrame are absolute. + + Args: + df (pandas.DataFrame): The DataFrame containing bounding box coordinates. + + Returns: + bool: True if the maximum value of the bounding box coordinates is greater than 1.1, False otherwise. + """ bbs = df2bbs(df) bbs = np.array(bbs) return bbs.max() > 1.1 def is_relative(df): + """ + Check if the bounding box coordinates in the DataFrame are relative. + + Args: + df (pandas.DataFrame): The DataFrame containing bounding box coordinates. + + Returns: + bool: True if the bounding box coordinates are relative, False otherwise. + """ return not is_absolute(df) def to_relative(df, height, width, force=False): + """ + Converts bounding box coordinates in a DataFrame to relative coordinates. + + Args: + df (pandas.DataFrame): The DataFrame containing bounding box coordinates. + height (int): The height of the image. + width (int): The width of the image. + force (bool, optional): If True, forces conversion even if the coordinates are already relative. + Defaults to False. + + Returns: + pandas.DataFrame: The DataFrame with bounding box coordinates converted to relative coordinates. + """ if not force and is_relative(df): return df df = df.copy() @@ -333,6 +482,18 @@ def to_relative(df, height, width, force=False): def to_absolute(df, height, width, force=False): + """ + Converts bounding box coordinates from relative to absolute values. + + Args: + df (pandas.DataFrame): The DataFrame containing the bounding box coordinates. + height (int): The height of the image. + width (int): The width of the image. + force (bool, optional): If True, forces the conversion even if the coordinates are already in absolute values. Defaults to False. + + Returns: + pandas.DataFrame: The DataFrame with the bounding box coordinates converted to absolute values. + """ if not force and is_absolute(df): return df df = df.copy() diff --git a/torch_snippets/bokeh_loader.py b/torch_snippets/bokeh_loader.py index 9e882a2..d06add0 100644 --- a/torch_snippets/bokeh_loader.py +++ b/torch_snippets/bokeh_loader.py @@ -25,6 +25,18 @@ # %% ../nbs/bokeh_plotting.ipynb 3 def parse_sz(size): + """ + Parses the size argument and returns a tuple of width and height. + + Args: + size (int or tuple): The size argument to be parsed. + + Returns: + tuple: A tuple of width and height. + + Raises: + NotImplementedError: If the size argument is not an int or a tuple of length 2. + """ if isinstance(size, int): return size, size elif isinstance(size, tuple): @@ -34,6 +46,17 @@ def parse_sz(size): def get_bplot(sz=500, **kwargs): + """ + Create a Bokeh plot with specified size and tools. + + Parameters: + - sz (int): Size of the plot in pixels. + - **kwargs: Additional keyword arguments for customizing the plot. + + Returns: + - plot (bokeh.plotting.Figure): Bokeh plot object. + + """ h, w = parse_sz(sz) output_notebook() plot = figure( diff --git a/torch_snippets/charts.py b/torch_snippets/charts.py index b01de1c..9bc28ea 100644 --- a/torch_snippets/charts.py +++ b/torch_snippets/charts.py @@ -1,7 +1,16 @@ # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/charts.ipynb. # %% auto 0 -__all__ = ["alt", "Chart", "CM", "radar", "confusion_matrix", "spider"] +__all__ = [ + "alt", + "Chart", + "CM", + "radar", + "confusion_matrix", + "spider", + "upsetaltair_top_level_configuration", + "UpSetAltair", +] # %% ../nbs/charts.ipynb 2 import altair as alt @@ -13,6 +22,33 @@ # %% ../nbs/charts.ipynb 5 def confusion_matrix(df=None, truth=None, pred=None, mapping=None, save_to=None): + """ + Generate a confusion matrix chart based on the given DataFrame or truth/prediction arrays. + + Parameters: + - df (DataFrame, optional): The input DataFrame containing the truth and prediction columns. If not provided, the truth and prediction arrays must be provided separately. + - truth (array-like, optional): The array-like object containing the true labels. + - pred (array-like, optional): The array-like object containing the predicted labels. + - mapping (dict, optional): A dictionary mapping the labels to their corresponding names. + - save_to (str, optional): The file path to save the chart in HTML format. + + Returns: + - chart (altair.Chart): The confusion matrix chart. + + Note: + - If `df` is not provided, `truth` and `pred` must be provided separately. + - If `save_to` is provided, the chart will be saved at the specified file path in HTML format. + - If `mapping` is provided, the labels in the chart will be replaced with their corresponding names. + + Example usage: + ``` + # Generate confusion matrix from DataFrame + confusion_matrix(df=my_df, mapping=my_mapping, save_to="confusion_matrix.html") + + # Generate confusion matrix from separate truth and prediction arrays + confusion_matrix(truth=my_truth, pred=my_pred, mapping=my_mapping) + ``` + """ if df is None: df = pd.DataFrame({"truth": truth, "pred": pred}) truth = "truth" @@ -58,20 +94,76 @@ def confusion_matrix(df=None, truth=None, pred=None, mapping=None, save_to=None) CM = confusion_matrix # %% ../nbs/charts.ipynb 12 -def spider(df, *, id_column, title=None, max_values=None, padding=1.25): +def spider( + df, + *, + id_column=None, + title=None, + max_values=None, + padding=1.25, + global_scale=False, + ax=None, + sz=10, +): + """ + Plot a spider chart based on the given dataframe. + + Parameters: + - df: pandas DataFrame + The input dataframe containing the data to be plotted. + - id_column: str, optional + The column name to be used as the identifier for each data point. If not provided, the index of the dataframe will be used. + - title: str, optional + The title of the spider chart. + - max_values: dict, optional + A dictionary specifying the maximum values for each category. If not provided, the maximum values will be calculated based on the data. + - padding: float, optional + The padding factor to be applied when calculating the maximum values. Default is 1.25. + - global_scale: bool or float, optional + If False, each category will have its own maximum value. If True, a single maximum value will be used for all categories. If a float value is provided, it will be used as the maximum value for all categories. + - ax: matplotlib Axes, optional + The axes on which to plot the spider chart. If not provided, a new figure and axes will be created. + - sz: float, optional + The size of the figure (both width and height) in inches. Default is 10. + + Returns: + - None + + Example usage: + spider(df, id_column='model', title='Spider Chart', max_values={'category1': 10, 'category2': 20}, padding=1.5) + """ + if id_column is None: + df = df.copy().reset_index(names="index") + id_column = "index" + df = df.sort_values(id_column, ascending=True) + df = df[sorted(df.columns)] categories = df.dtypes[(df.dtypes == "float") | (df.dtypes == "int")].index.tolist() data = df[categories].to_dict(orient="list") - ids = df[id_column].tolist() + ids = sorted(df[id_column].tolist()) if max_values is None: - max_values = {key: padding * max(value) for key, value in data.items()} + if not global_scale: + max_values = {key: padding * max(value) for key, value in data.items()} + else: + if isinstance(global_scale, bool): + max_value = np.array(list(data.values())).max() + elif isinstance(global_scale, (int, float)): + max_value = global_scale + padding = 1.0 + max_values = {key: padding * max_value for key, _ in data.items()} normalized_data = { key: np.array(value) / max_values[key] for key, value in data.items() } + num_vars = len(data.keys()) tiks = list(data.keys()) tiks += tiks[:1] angles = np.linspace(0, 2 * np.pi, num_vars, endpoint=False).tolist() + [0] - fig, ax = plt.subplots(figsize=(8, 8), subplot_kw=dict(polar=True)) + + if ax is None: + fig, ax = plt.subplots(figsize=(sz, sz), subplot_kw=dict(polar=True)) + _show = True + else: + _show = False for i, model_name in enumerate(ids): values = [normalized_data[key][i] for key in data.keys()] actual_values = [data[key][i] for key in data.keys()] @@ -89,7 +181,378 @@ def spider(df, *, id_column, title=None, max_values=None, padding=1.25): ax.legend(loc="upper right", bbox_to_anchor=(0.1, 0.1)) if title is not None: plt.suptitle(title) - plt.show() + if _show: + plt.show() radar = spider + +# %% ../nbs/charts.ipynb 14 +# Top-level altair configuration +def upsetaltair_top_level_configuration( + base, legend_orient="top-left", legend_symbol_size=30 +): + """ + Configure the top-level settings for an UpSet plot in Altair. + + Parameters: + - base: The base chart to configure. + - legend_orient: The orientation of the legend. Default is "top-left". + - legend_symbol_size: The size of the legend symbols. Default is 30. + + Returns: + - The configured chart. + + """ + return ( + base.configure_view(stroke=None) + .configure_title( + fontSize=18, fontWeight=400, anchor="start", subtitlePadding=10 + ) + .configure_axis( + labelFontSize=14, + labelFontWeight=300, + titleFontSize=16, + titleFontWeight=400, + titlePadding=10, + ) + .configure_legend( + titleFontSize=16, + titleFontWeight=400, + labelFontSize=14, + labelFontWeight=300, + padding=20, + orient=legend_orient, + symbolType="circle", + symbolSize=legend_symbol_size, + ) + .configure_concat(spacing=0) + ) + + +def UpSetAltair( + data=None, + title="", + subtitle="", + sets=None, + abbre=None, + sort_by="frequency", + sort_order="ascending", + width=1200, + height=700, + height_ratio=0.6, + horizontal_bar_chart_width=300, + color_range=["#55A8DB", "#3070B5", "#30363F", "#F1AD60", "#DF6234", "#BDC6CA"], + highlight_color="#EA4667", + glyph_size=200, + set_label_bg_size=1000, + line_connection_size=2, + horizontal_bar_size=20, + vertical_bar_label_size=16, + vertical_bar_padding=20, +): + """This function generates Altair-based interactive UpSet plots. + + Parameters: + - data (pandas.DataFrame): Tabular data containing the membership of each element (row) in + exclusive intersecting sets (column). + - sets (list): List of set names of interest to show in the UpSet plots. + This list reflects the order of sets to be shown in the plots as well. + - abbre (list): Abbreviated set names. + - sort_by (str): "frequency" or "degree" + - sort_order (str): "ascending" or "descending" + - width (int): Vertical size of the UpSet plot. + - height (int): Horizontal size of the UpSet plot. + - height_ratio (float): Ratio of height between upper and under views, ranges from 0 to 1. + - horizontal_bar_chart_width (int): Width of horizontal bar chart on the bottom-right. + - color_range (list): Color to encode sets. + - highlight_color (str): Color to encode intersecting sets upon mouse hover. + - glyph_size (int): Size of UpSet glyph (โฌค). + - set_label_bg_size (int): Size of label background in the horizontal bar chart. + - line_connection_size (int): width of lines in matrix view. + - horizontal_bar_size (int): Height of bars in the horizontal bar chart. + - vertical_bar_label_size (int): Font size of texts in the vertical bar chart on the top. + - vertical_bar_padding (int): Gap between a pair of bars in the vertical bar charts. + """ + + if (data is None) or (sets is None): + print("No data and/or a list of sets are provided") + return + if (height_ratio < 0) or (1 < height_ratio): + print("height_ratio set to 0.5") + height_ratio = 0.5 + if len(sets) != len(abbre): + abbre = None + print( + "Dropping the `abbre` list because the lengths of `sets` and `abbre` are not identical." + ) + + """ + Data Preprocessing + """ + data["count"] = 0 + data = data[sets + ["count"]] + data = data.groupby(sets).count().reset_index() + + data["intersection_id"] = data.index + data["degree"] = data[sets].sum(axis=1) + data = data.sort_values( + by=["count"], ascending=True if sort_order == "ascending" else False + ) + + data = pd.melt(data, id_vars=["intersection_id", "count", "degree"]) + data = data.rename(columns={"variable": "set", "value": "is_intersect"}) + + if abbre == None: + abbre = sets + + set_to_abbre = pd.DataFrame( + [[sets[i], abbre[i]] for i in range(len(sets))], columns=["set", "set_abbre"] + ) + set_to_order = pd.DataFrame( + [[sets[i], 1 + sets.index(sets[i])] for i in range(len(sets))], + columns=["set", "set_order"], + ) + + degree_calculation = "" + for s in sets: + degree_calculation += f"(isDefined(datum['{s}']) ? datum['{s}'] : 0)" + if sets[-1] != s: + degree_calculation += "+" + + """ + Selections + """ + legend_selection = alt.selection_multi(fields=["set"], bind="legend") + color_selection = alt.selection_single(fields=["intersection_id"], on="mouseover") + opacity_selection = alt.selection_single(fields=["intersection_id"]) + + """ + Styles + """ + vertical_bar_chart_height = height * height_ratio + matrix_height = height - vertical_bar_chart_height + matrix_width = width - horizontal_bar_chart_width + + vertical_bar_size = min( + 30, + width / len(data["intersection_id"].unique().tolist()) - vertical_bar_padding, + ) + + main_color = "#3A3A3A" + brush_opacity = alt.condition(~opacity_selection, alt.value(1), alt.value(0.6)) + brush_color = alt.condition( + ~color_selection, alt.value(main_color), alt.value(highlight_color) + ) + + is_show_horizontal_bar_label_bg = len(abbre[0]) <= 2 + horizontal_bar_label_bg_color = ( + "white" if is_show_horizontal_bar_label_bg else "black" + ) + + x_sort = alt.Sort( + field="count" if sort_by == "frequency" else "degree", order=sort_order + ) + tooltip = [ + alt.Tooltip("max(count):Q", title="Cardinality"), + alt.Tooltip("degree:Q", title="Degree"), + ] + + """ + Plots + """ + # To use native interactivity in Altair, we are using the data transformation functions + # supported in Altair. + base = ( + alt.Chart(data) + .transform_filter(legend_selection) + .transform_pivot( + # Right before this operation, columns should be: + # `count`, `set`, `is_intersect`, (`intersection_id`, `degree`, `set_order`, `set_abbre`) + # where (fields with brackets) should be dropped and recalculated later. + "set", + op="max", + groupby=["intersection_id", "count"], + value="is_intersect", + ) + .transform_aggregate( + # count, set1, set2, ... + count="sum(count)", + groupby=sets, + ) + .transform_calculate( + # count, set1, set2, ... + degree=degree_calculation + ) + .transform_filter( + # count, set1, set2, ..., degree + alt.datum["degree"] + != 0 + ) + .transform_window( + # count, set1, set2, ..., degree + intersection_id="row_number()", + frame=[None, None], + ) + .transform_fold( + # count, set1, set2, ..., degree, intersection_id + sets, + as_=["set", "is_intersect"], + ) + .transform_lookup( + # count, set, is_intersect, degree, intersection_id + lookup="set", + from_=alt.LookupData(set_to_abbre, "set", ["set_abbre"]), + ) + .transform_lookup( + # count, set, is_intersect, degree, intersection_id, set_abbre + lookup="set", + from_=alt.LookupData(set_to_order, "set", ["set_order"]), + ) + .transform_filter( + # Make sure to remove the filtered sets. + legend_selection + ) + .transform_window( + # count, set, is_intersect, degree, intersection_id, set_abbre + set_order="distinct(set)", + frame=[None, 0], + sort=[{"field": "set_order"}], + ) + ) + # Now, we have data in the following format: + # count, set, is_intersect, degree, intersection_id, set_abbre + + # Cardinality by intersecting sets (vertical bar chart) + vertical_bar = ( + base.mark_bar(color=main_color, size=vertical_bar_size) + .encode( + x=alt.X( + "intersection_id:N", + axis=alt.Axis(grid=False, labels=False, ticks=False, domain=True), + sort=x_sort, + title=None, + ), + y=alt.Y( + "max(count):Q", + axis=alt.Axis(grid=False, tickCount=3, orient="right"), + title="Intersection Size", + ), + color=brush_color, + tooltip=tooltip, + ) + .properties(width=matrix_width, height=vertical_bar_chart_height) + ) + + vertical_bar_text = vertical_bar.mark_text( + color=main_color, dy=-10, size=vertical_bar_label_size + ).encode(text=alt.Text("count:Q", format=".0f")) + + vertical_bar_chart = (vertical_bar + vertical_bar_text).add_selection( + color_selection + ) + + # UpSet glyph view (matrix view) + circle_bg = ( + vertical_bar.mark_circle(size=glyph_size, opacity=1) + .encode( + x=alt.X( + "intersection_id:N", + axis=alt.Axis(grid=False, labels=False, ticks=False, domain=False), + sort=x_sort, + title=None, + ), + y=alt.Y( + "set_order:N", + axis=alt.Axis(grid=False, labels=False, ticks=False, domain=False), + title=None, + ), + color=alt.value("#E6E6E6"), + ) + .properties(height=matrix_height) + ) + + rect_bg = ( + circle_bg.mark_rect() + .transform_filter(alt.datum["set_order"] % 2 == 1) + .encode(color=alt.value("#F7F7F7")) + ) + + circle = circle_bg.transform_filter(alt.datum["is_intersect"] == 1).encode( + color=brush_color + ) + + line_connection = ( + vertical_bar.mark_bar(size=line_connection_size, color=main_color) + .transform_filter(alt.datum["is_intersect"] == 1) + .encode(y=alt.Y("min(set_order):N"), y2=alt.Y2("max(set_order):N")) + ) + + matrix_view = ( + circle + rect_bg + circle_bg + line_connection + circle + ).add_selection( + # Duplicate `circle` is to properly show tooltips. + color_selection + ) + + # Cardinality by sets (horizontal bar chart) + horizontal_bar_label_bg = base.mark_circle(size=set_label_bg_size).encode( + y=alt.Y( + "set_order:N", + axis=alt.Axis(grid=False, labels=False, ticks=False, domain=False), + title=None, + ), + color=alt.Color( + "set:N", scale=alt.Scale(domain=sets, range=color_range), title=None + ), + opacity=alt.value(1), + ) + horizontal_bar_label = horizontal_bar_label_bg.mark_text( + align=("center" if is_show_horizontal_bar_label_bg else "center") + ).encode( + text=alt.Text("set_abbre:N"), color=alt.value(horizontal_bar_label_bg_color) + ) + horizontal_bar_axis = ( + (horizontal_bar_label_bg + horizontal_bar_label) + if is_show_horizontal_bar_label_bg + else horizontal_bar_label + ) + + horizontal_bar = ( + horizontal_bar_label_bg.mark_bar(size=horizontal_bar_size) + .transform_filter(alt.datum["is_intersect"] == 1) + .encode( + x=alt.X( + "sum(count):Q", axis=alt.Axis(grid=False, tickCount=3), title="Set Size" + ) + ) + .properties(width=horizontal_bar_chart_width) + ) + + # Concat Plots + upsetaltair = alt.vconcat( + vertical_bar_chart, + alt.hconcat( + matrix_view, + horizontal_bar_axis, + horizontal_bar, # horizontal bar chart + spacing=5, + ).resolve_scale(y="shared"), + spacing=20, + ).add_selection(legend_selection) + + # Apply top-level configuration + upsetaltair = upsetaltair_top_level_configuration( + upsetaltair, legend_orient="top", legend_symbol_size=set_label_bg_size / 2.0 + ).properties( + title={ + "text": title, + "subtitle": subtitle, + "fontSize": 20, + "fontWeight": 500, + "subtitleColor": main_color, + "subtitleFontSize": 14, + } + ) + + return upsetaltair diff --git a/torch_snippets/decorators.py b/torch_snippets/decorators.py index 80d217e..376670b 100644 --- a/torch_snippets/decorators.py +++ b/torch_snippets/decorators.py @@ -11,6 +11,24 @@ # %% ../nbs/decorators.ipynb 3 def timeit(func): + """ + A decorator that measures the execution time of a function. + + Args: + func (callable): The function to be timed. + + Returns: + callable: The wrapped function. + + Example: + @timeit + def my_function(): + # code to be timed + pass + + my_function() # prints the execution time of my_function + """ + @wraps(func) def wrapper(*args, **kwargs): start = time.time() @@ -23,6 +41,17 @@ def wrapper(*args, **kwargs): def io(func): + """ + A decorator that inspects the inputs and outputs of a function. + + Args: + func: The function to be decorated. + + Returns: + The decorated function. + + """ + @wraps(func) def wrapper(*args, **kwargs): if len(args) != 0: @@ -37,6 +66,20 @@ def wrapper(*args, **kwargs): def check_kwargs_not_none(func): + """ + A decorator that checks if any keyword argument is None. + Raises a ValueError if any argument is None. + + Args: + func: The function to be decorated. + + Returns: + The decorated function. + + Raises: + ValueError: If any keyword argument is None. + """ + @wraps(func) def wrapper(*args, **kwargs): for key, value in kwargs.items(): diff --git a/torch_snippets/imgaug_loader.py b/torch_snippets/imgaug_loader.py index 9b26a64..916ee9d 100644 --- a/torch_snippets/imgaug_loader.py +++ b/torch_snippets/imgaug_loader.py @@ -26,6 +26,19 @@ # %% ../nbs/imgaug_loader.ipynb 3 def do(img, bbs=None, aug=None, cval=255): + """ + Apply image augmentation to the input image and bounding boxes. + + Args: + img (numpy.ndarray or PIL.Image.Image): The input image. + bbs (pandas.DataFrame or None): The bounding boxes associated with the image. + aug (imgaug.augmenters.Augmenter or None): The image augmentation object. + cval (int): The constant value used for padding. + + Returns: + tuple or PIL.Image.Image: If `bbs` is None, returns the augmented image. + Otherwise, returns a tuple containing the augmented image and the augmented bounding boxes. + """ if isinstance(img, PIL.Image.Image): _Image = True img = np.array(img) @@ -72,16 +85,52 @@ def do(img, bbs=None, aug=None, cval=255): def bw(img, bbs): + """ + Applies grayscale augmentation to the input image. + + Args: + img (numpy.ndarray): The input image. + bbs (list): List of bounding boxes associated with the image. + + Returns: + numpy.ndarray: The augmented image. + + """ aug = iaa.Grayscale() return do(img, bbs, aug) def rotate(img, bbs=None, angle=None, cval=255): + """ + Rotate the input image and bounding boxes (if provided) by a given angle. + + Args: + img (numpy.ndarray): The input image. + bbs (list, optional): List of bounding boxes. Defaults to None. + angle (float, optional): The angle of rotation in degrees. Defaults to None. + cval (int, optional): The constant value used to fill the empty space after rotation. Defaults to 255. + + Returns: + numpy.ndarray: The rotated image. + """ aug = iaa.Rotate(angle, cval=cval, fit_output=True) return do(img, bbs=bbs, aug=aug) def pad(img, bbs, sz=None, deltas=None, cval=0): + """ + Pad an image and its bounding boxes. + + Args: + img (numpy.ndarray or PIL.Image.Image): The input image. + bbs (list): List of bounding boxes. + sz (tuple, optional): The desired size of the output image. If provided, the image will be padded to this size. Defaults to None. + deltas (tuple, optional): The amount of padding to be applied on each side of the image. If provided, sz will be ignored. Defaults to None. + cval (int, optional): The value used for padding. Defaults to 0. + + Returns: + numpy.ndarray: The padded image. + """ if isinstance(img, np.ndarray): h, w = img.shape[:2] else: @@ -95,6 +144,30 @@ def pad(img, bbs, sz=None, deltas=None, cval=0): def get_size(sz, h, w): + """ + Calculate the target size (height and width) based on the input size and resize parameters. + + Args: + sz (tuple, list, float, int): The resize parameters. It can be one of the following: + - (tuple, list): A tuple or list containing a signal and target size (H, W). + The signal can be either 'at-least' or 'at-most'. + The target size represents the desired size of the image. + - float: A float value representing the fraction of the input size. + - int: An integer value representing the target size. + - tuple: A tuple containing the target size (H, W). + The target size can be -1 to maintain the aspect ratio of the input size. + - float: A float value representing the target size as a fraction of the input size. + + h (int): The height of the input size. + w (int): The width of the input size. + + Returns: + tuple: A tuple containing the target size (H, W). + + Raises: + AssertionError: If the resize type is not 'at-least' or 'at-most'. + + """ if isinstance(sz, (tuple, list)) and isinstance(sz[0], str): signal, (H, W) = sz assert signal in "at-least,at-most".split( @@ -129,6 +202,19 @@ def get_size(sz, h, w): def rescale(im, bbs, sz): + """ + Rescales the input image and bounding boxes to the specified size. + + Args: + im (PIL.Image.Image or numpy.ndarray): The input image. + bbs (list): List of bounding boxes. + sz (tuple): The target size (height, width) to resize the image. + + Returns: + PIL.Image.Image: The resized image. + list: The resized bounding boxes. + + """ if isinstance(im, PIL.Image.Image): to_pil = True im = np.array(im) @@ -144,11 +230,33 @@ def rescale(im, bbs, sz): def crop(img, bbs, deltas): + """ + Crop the image and bounding boxes using the specified deltas. + + Args: + img (numpy.ndarray): The input image. + bbs (list): List of bounding boxes. + deltas (tuple or list): The crop deltas in the form of (top, right, bottom, left). + + Returns: + numpy.ndarray: The cropped image. + list: The cropped bounding boxes. + """ aug = iaa.Crop(deltas) return do(img, bbs, aug) def imgaugbbs2bbs(bbs): + """ + Converts a list of imgaug bounding boxes to a list of custom BB objects. + + Args: + bbs (list): A list of imgaug bounding boxes. + + Returns: + list: A list of custom BB objects. + + """ if bbs is None: return None return [ @@ -158,6 +266,16 @@ def imgaugbbs2bbs(bbs): def bbs2imgaugbbs(bbs, img): + """ + Convert a list of bounding boxes to an imgaug BoundingBoxesOnImage object. + + Args: + bbs (list): List of bounding boxes in the format [(x1, y1, x2, y2), ...]. + img (numpy.ndarray): Input image. + + Returns: + imgaug.BoundingBoxesOnImage: BoundingBoxesOnImage object representing the bounding boxes on the image. + """ if bbs is None: return None return BoundingBoxesOnImage( diff --git a/torch_snippets/loader.py b/torch_snippets/loader.py index 2f3392a..f2dd0be 100755 --- a/torch_snippets/loader.py +++ b/torch_snippets/loader.py @@ -366,13 +366,18 @@ def show( try: if isinstance(img, (str, Path)): img = read(str(img), 1) - if isinstance(img, torch.Tensor): - img = img.cpu().detach().numpy().copy() + try: + import torch + + if isinstance(img, torch.Tensor): + img = img.cpu().detach().numpy().copy() + except ModuleNotFoundError: + pass if isinstance(img, PIL.Image.Image): img = np.array(img) - except Exception as e: - print(e) + Warn(e) + if isinstance(img, pd.DataFrame): df = img html_str = "" diff --git a/torch_snippets/markup.py b/torch_snippets/markup.py index 2a1501a..bf8eec0 100644 --- a/torch_snippets/markup.py +++ b/torch_snippets/markup.py @@ -19,11 +19,12 @@ ] # %% ../nbs/markups.ipynb 2 -import json +import json, os from json import JSONEncoder import jsonlines import yaml + from .loader import BB, L, np from .paths import * from .logger import * @@ -174,7 +175,16 @@ def __dir__(self): def __contains__(self, key): key = str(key) - return key in self.__dict__.keys() + if "." not in key: + return key in self.__dict__.keys() + else: + d = self + for _k in key.split("."): + try: + d = d[_k] + except AttributeError: + return False + return True def __delitem__(self, key): key = str(key) @@ -240,54 +250,82 @@ def find_address(self, key, current_path=""): addresses.extend(item.find_address(key, f"{new_path}.{i}")) return addresses - def summary(self, current_path="", summary_str="", depth=0, sep="\t"): - tab = sep * depth - for k in self.keys(): - if current_path: - new_path = f"{current_path}.{k}" - else: - new_path = k + def summary(self, current_path="", depth=0, sep=" ", max_items=10): + max_items = int(os.environ.get("AD_MAX_ITEMS", max_items)) + sep = os.environ.get("AD_SEP", sep) - if isinstance(self[k], AttrDict): - summary_str += f"{tab}{k}\n" - summary_str = self[k].summary(new_path, summary_str, depth + 1, sep=sep) - elif isinstance(self[k], (list, tuple, set, frozenset)): - summary_str += f"{tab}{k}\n" - for i, item in enumerate(self[k]): - summary_str += f"{tab}{sep}{i}\n" - if isinstance(item, AttrDict): - summary_str = item.summary( - f"{new_path}.{i}", summary_str, depth + 2, sep=sep - ) - elif isinstance(item, (list, tuple, set, frozenset)): - nested_path = f"{new_path}.{i}" - nested_summary_str = "" - for j, nested_item in enumerate(item): - summary_str += f"{tab}{sep}{sep}{j}\n" - if isinstance(nested_item, AttrDict): - nested_summary_str = nested_item.summary( - f"{nested_path}.{j}", - nested_summary_str, - depth + 3, - sep=sep, - ) - elif isinstance(nested_item, (list, tuple, set, frozenset)): - nested_list_path = f"{nested_path}.{j}" - for idx, nested_list_item in enumerate(nested_item): - if isinstance(nested_list_item, AttrDict): - nested_summary_str = nested_list_item.summary( - f"{nested_list_path}.{idx}", - nested_summary_str, - depth + 4, - sep=sep, - ) - if nested_summary_str: - summary_str += nested_summary_str - else: - summary_str += f"{tab}{sep}{k} - {type(self[k]).__name__}\n" - else: - summary_str += f"{tab}{k} - {type(self[k]).__name__}\n" + def format_path(path, key): + return f"{path}.{key}" if path else key + + def format_item(key, item, path, depth, sep): + import numpy as np + import pandas as pd + try: + import torch + except ModuleNotFoundError: + + class Torch: + Tensor = type(None) + + torch = Torch() + + if isinstance(item, (pd.DataFrame,)): + return f"{sep * depth}{key} - {type(item).__name__} - shape {item.shape} - columns {item.columns} - {hash_pandas_dataframe(item)}\n" + if isinstance(item, AttrDict) or hasattr(item, "keys"): + item = AttrDict(**item) + return f"{sep*depth}{key}\n" + item.summary(path, depth + 1, sep) + elif isinstance(item, (list, tuple, set, frozenset, L)): + return summarize_collection(key, item, path, depth + 1, sep) + elif isinstance(item, (torch.Tensor, np.ndarray)): + is_np = False + if isinstance(item, np.ndarray): + is_np = True + item = torch.tensor(item) + is_np = "๐ฆ" if not is_np else "np." + return f"{sep * depth}{key} - {is_np}{item} - {hash_tensor(item)}\n" + + else: + if isinstance(item, (int, float, complex, str)): + is_multiline = False + if isinstance(item, str): + is_multiline = "\n" in item + _sep = ( + " ...\n...\n...\n...\n... " if is_multiline else "........." + ) + if len(item) > 250: + item = item[:100] + _sep + item[-100:] + if is_multiline: + _item = item.split("\n") + _item = "\n".join([f"{sep*(depth+1)}{l}" for l in _item]) + item = f"โ\n{sep*(depth+1)}```\n{_item}\n{sep*(depth+1)}```" + multiline = "" if not is_multiline else "Multiline " + return f"{sep * depth}{key} - {item} ({multiline}{type(item).__name__})\n" + else: + return f"{sep * depth}{key} - {type(item).__name__}\n" + + def summarize_collection(key, collection, path, d, s): + summary_str = f"{s * (d - 1)}{key}\n" + for i, item in enumerate(collection): + item_path = format_path(path, i) + if i < max_items: + summary_str += format_item(i, item, item_path, d, s) + else: + summary_str += ( + f"{s*d}... {len(collection) - max_items} more items ...\n" + ) + break + return summary_str + + summary_str = "" + for ix, key in enumerate(self.keys()): + if ix >= max_items: + summary_str += ( + f"{sep*depth} ... {len(self.keys()) - max_items} more keys ...\n" + ) + break + new_path = format_path(current_path, key) + summary_str += format_item(key, self[key], new_path, depth, sep) return summary_str def write_summary(self, to, **kwargs): diff --git a/torch_snippets/markup2.py b/torch_snippets/markup2.py index cce47db..3017ed5 100644 --- a/torch_snippets/markup2.py +++ b/torch_snippets/markup2.py @@ -22,6 +22,7 @@ # %% ../nbs/markups.ipynb 2 import json +import os from collections.abc import Mapping from json import JSONEncoder from typing import Union @@ -215,7 +216,16 @@ def __dir__(self): def __contains__(self, key): key = str(key) - return key in self.__dict__.keys() + if "." not in key: + return key in self.__dict__.keys() + else: + d = self + for _k in key.split("."): + try: + d = d[_k] + except AttributeError: + return False + return True def __delitem__(self, key): key = str(key) @@ -282,6 +292,9 @@ def find_address(self, key, current_path=""): return addresses def summary(self, current_path="", depth=0, sep=" ", max_items=10): + max_items = int(os.environ.get("AD_MAX_ITEMS", max_items)) + sep = os.environ.get("AD_SEP", sep) + def format_path(path, key): return f"{path}.{key}" if path else key @@ -305,12 +318,13 @@ class Torch: return f"{sep*depth}{key}\n" + item.summary(path, depth + 1, sep) elif isinstance(item, (list, tuple, set, frozenset, L)): return summarize_collection(key, item, path, depth + 1, sep) - elif torch.Tensor != type(None) and isinstance( - item, (torch.Tensor, np.ndarray) - ): + elif isinstance(item, (torch.Tensor, np.ndarray)): + is_np = False if isinstance(item, np.ndarray): + is_np = True item = torch.tensor(item) - return f"{sep * depth}{key} - {item} - {hash_tensor(item)}\n" + is_np = "๐ฆ" if not is_np else "np." + return f"{sep * depth}{key} - {is_np}{item} - {hash_tensor(item)}\n" else: if isinstance(item, (int, float, complex, str)): diff --git a/torch_snippets/paths.py b/torch_snippets/paths.py index 2c346e9..ac14adf 100644 --- a/torch_snippets/paths.py +++ b/torch_snippets/paths.py @@ -344,20 +344,20 @@ def folder_summary(thing): print_folder_summary = lambda x: print(folder_summary(x)) -def tree(folder_path, *additional_flags): - import subprocess +def tree(directory): from builtins import print + # Construct the shell command + shell_command = f"tree \"{directory}\" --filelimit=20 | sed 's/โ/ /g; s/โ/ /g; s/|/ /g; s/`/ /g; s/-/โ/g; s/โ/ /g; s/+/ /g'" + # Execute the shell command try: - # Construct the command by combining "tree" with the folder path and additional flags - command = ["tree", folder_path] - command.extend(additional_flags) - - # Run the command and capture the output - result = subprocess.check_output(command, universal_newlines=True) - return print(result) + result = subprocess.run( + shell_command, shell=True, capture_output=True, text=True + ) + # Print the output + print(result.stdout) except subprocess.CalledProcessError as e: - return f"Error: {e}" + print(f"Error executing command: {e}") # %% ../nbs/paths.ipynb 26 diff --git a/torch_snippets/scp.py b/torch_snippets/scp.py new file mode 100644 index 0000000..974bede --- /dev/null +++ b/torch_snippets/scp.py @@ -0,0 +1,94 @@ +import os +from pathlib import Path as P + +import paramiko +from loguru import logger +from scp import SCPClient as SCP + + +class SCPClient: + def __init__( + self, hostname, port, username, password=None, private_key=None, logfile=None + ): + self.hostname = hostname + self.port = port + self.username = username + self.password = password + self.private_key = private_key + self.client = None + logfile = "/tmp/{time}-scp.log" if logfile is None else logfile + self.logger = logger + self.logger.add(logfile) + + def connect(self): + self.client = paramiko.SSHClient() + self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + if self.private_key: + self.client.connect( + self.hostname, + port=self.port, + username=self.username, + key_filename=self.private_key, + ) + else: + self.client.connect( + self.hostname, + port=self.port, + username=self.username, + password=self.password, + ) + + def close(self): + if self.client: + self.client.close() + + def upload(self, local_path, remote_path): + self.connect() + try: + with SCP(self.client.get_transport()) as scp: + if os.path.isdir(local_path): + scp.put(local_path, recursive=True, remote_path=remote_path) + else: + scp.put(local_path, remote_path) + self.logger.info(f"Uploaded {local_path} to {remote_path}") + except Exception as e: + self.logger.warning(f"Error uploading: {e}") + finally: + self.close() + + def download(self, remote_path, local_path): + self.connect() + try: + with SCP(self.client.get_transport()) as scp: + is_remote_dir = "." not in remote_path + not_a_dir = "" if is_remote_dir else "not" + self.logger.info(f"Assuming {remote_path} is {not_a_dir}a directory") + if is_remote_dir: + scp.get(remote_path, recursive=True, local_path=local_path) + else: + os.makedirs(P(local_path).parent, exist_ok=True) + scp.get(remote_path, local_path) + self.logger.info(f"Downloaded {remote_path} to {local_path}") + except Exception as e: + self.logger.warning(f"Error downloading: {e}") + finally: + self.close() + + +# Example usage +if __name__ == "__main__": + hostname = "10.161.141.73" + port = 22 # Default port for SSH + username = "jioaidev" + password = os.environ["JIOAIDEV_PASSWORD"] + private_key = None # Or specify the path to your private key file + + scp_client = SCPClient(hostname, port, username, password, private_key) + local_path = "/tmp/tmp.csv" + remote_path = "/data/datasets/210-invoices/051--2.5k-invoices-20231027/ToCleanup/vitstr_80k/00010100002900891622023/0.csv" + + # Upload a file/folder + # scp_client.upload(local_path, remote_path) + + # Or download a file/folder + scp_client.download(remote_path, local_path) diff --git a/torch_snippets/sklegos.py b/torch_snippets/sklegos.py index 618c01b..3a4fb81 100644 --- a/torch_snippets/sklegos.py +++ b/torch_snippets/sklegos.py @@ -1,8 +1,18 @@ # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/sklegos.ipynb. # %% auto 0 -__all__ = ['ColumnSelector', 'GroupedEstimator', 'GroupedPredictor', 'EstimatorTransformer', 'train_test_split', 'MakeFrame', - 'ImputeMissingValues', 'LambdaTransformer', 'Cat2Num', 'SplitDateColumn'] +__all__ = [ + "ColumnSelector", + "GroupedEstimator", + "GroupedPredictor", + "EstimatorTransformer", + "train_test_split", + "MakeFrame", + "ImputeMissingValues", + "LambdaTransformer", + "Cat2Num", + "SplitDateColumn", +] # %% ../nbs/sklegos.ipynb 3 from . import * @@ -19,6 +29,7 @@ def train_test_split(*args, **kwargs): outputs = [i.reset_index(drop=True) for i in outputs] return outputs + # %% ../nbs/sklegos.ipynb 4 from sklearn.base import BaseEstimator, TransformerMixin, MetaEstimatorMixin from sklego.preprocessing import ColumnSelector @@ -67,6 +78,7 @@ def fit(self, X, y=None): def transform(self, X, y=None): return pd.DataFrame(X, columns=self.column_names) + # %% ../nbs/sklegos.ipynb 8 class ImputeMissingValues(BaseEstimator, TransformerMixin): """DataFrame input - DataFrame output @@ -114,6 +126,7 @@ def transform(self, X, y=None): def fit_transform(self, trn_df, y=None): return self.transform(self.fit(trn_df, y)) + # %% ../nbs/sklegos.ipynb 9 class LambdaTransformer(BaseEstimator, TransformerMixin): def __init__(self, fn): @@ -134,6 +147,7 @@ def transform(self, X, y=None): def fit_transform(self, X, y=None): return self.fit(self.transform(X)) + # %% ../nbs/sklegos.ipynb 10 class MakeFrame(BaseEstimator, TransformerMixin): def __init__(self, column_names): @@ -151,6 +165,7 @@ def predict_proba(self, X, y=None): def predict(self, X, y=None): return self.transform(X) + # %% ../nbs/sklegos.ipynb 11 class Cat2Num(BaseEstimator, TransformerMixin): def __init__(self): @@ -173,6 +188,7 @@ def transform(self, df, y=None): def fit_transform(self, trn_df, y=None): return self.transform(self.fit(trn_df, y)) + # %% ../nbs/sklegos.ipynb 12 class SplitDateColumn(BaseEstimator, TransformerMixin): def __init__(self, column_names, has_date, has_time, date_format=None): diff --git a/torch_snippets/trainer/config.py b/torch_snippets/trainer/config.py index 011f261..5635af6 100644 --- a/torch_snippets/trainer/config.py +++ b/torch_snippets/trainer/config.py @@ -10,6 +10,28 @@ # %% ../../nbs/config.ipynb 2 class DeepLearningConfig: + """ + A configuration class for deep learning models. + + This class provides methods to access and manipulate configuration settings. + + Attributes: + input_variables (list): List of input variables defined in the class constructor. + + Methods: + keys(): Returns the list of input variables. + __getitem__(key): Returns the value of the specified key. + __contains__(key): Checks if the specified key is present in the input variables. + from_ini_file(filepath, config_root=None): Creates an instance of the class from an INI file. + __repr__(): Returns a string representation of the class. + + Example usage: + config = DeepLearningConfig() + config.from_ini_file('config.ini') + print(config.keys()) + print(config['learning_rate']) + """ + def keys(self): if not hasattr(self, "input_variables"): self.input_variables = inspect_builtin.signature(